a9c2b6a900
Do vector right shift operation for small int types only after loads Reviewed-by: jrose, dlong
4985 lines
162 KiB
Plaintext
4985 lines
162 KiB
Plaintext
//
|
|
// Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
|
|
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
//
|
|
// This code is free software; you can redistribute it and/or modify it
|
|
// under the terms of the GNU General Public License version 2 only, as
|
|
// published by the Free Software Foundation.
|
|
//
|
|
// This code is distributed in the hope that it will be useful, but WITHOUT
|
|
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
// version 2 for more details (a copy is included in the LICENSE file that
|
|
// accompanied this code).
|
|
//
|
|
// You should have received a copy of the GNU General Public License version
|
|
// 2 along with this work; if not, write to the Free Software Foundation,
|
|
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
//
|
|
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
// or visit www.oracle.com if you need additional information or have any
|
|
// questions.
|
|
//
|
|
//
|
|
|
|
// X86 Common Architecture Description File
|
|
|
|
//----------REGISTER DEFINITION BLOCK------------------------------------------
|
|
// This information is used by the matcher and the register allocator to
|
|
// describe individual registers and classes of registers within the target
|
|
// archtecture.
|
|
|
|
register %{
|
|
//----------Architecture Description Register Definitions----------------------
|
|
// General Registers
|
|
// "reg_def" name ( register save type, C convention save type,
|
|
// ideal register type, encoding );
|
|
// Register Save Types:
|
|
//
|
|
// NS = No-Save: The register allocator assumes that these registers
|
|
// can be used without saving upon entry to the method, &
|
|
// that they do not need to be saved at call sites.
|
|
//
|
|
// SOC = Save-On-Call: The register allocator assumes that these registers
|
|
// can be used without saving upon entry to the method,
|
|
// but that they must be saved at call sites.
|
|
//
|
|
// SOE = Save-On-Entry: The register allocator assumes that these registers
|
|
// must be saved before using them upon entry to the
|
|
// method, but they do not need to be saved at call
|
|
// sites.
|
|
//
|
|
// AS = Always-Save: The register allocator assumes that these registers
|
|
// must be saved before using them upon entry to the
|
|
// method, & that they must be saved at call sites.
|
|
//
|
|
// Ideal Register Type is used to determine how to save & restore a
|
|
// register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
|
|
// spilled with LoadP/StoreP. If the register supports both, use Op_RegI.
|
|
//
|
|
// The encoding number is the actual bit-pattern placed into the opcodes.
|
|
|
|
// XMM registers. 256-bit registers or 8 words each, labeled (a)-h.
|
|
// Word a in each register holds a Float, words ab hold a Double.
|
|
// The whole registers are used in SSE4.2 version intrinsics,
|
|
// array copy stubs and superword operations (see UseSSE42Intrinsics,
|
|
// UseXMMForArrayCopy and UseSuperword flags).
|
|
// XMM8-XMM15 must be encoded with REX (VEX for UseAVX).
|
|
// Linux ABI: No register preserved across function calls
|
|
// XMM0-XMM7 might hold parameters
|
|
// Windows ABI: XMM6-XMM15 preserved across function calls
|
|
// XMM0-XMM3 might hold parameters
|
|
|
|
reg_def XMM0 ( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg());
|
|
reg_def XMM0b( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(1));
|
|
reg_def XMM0c( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(2));
|
|
reg_def XMM0d( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(3));
|
|
reg_def XMM0e( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(4));
|
|
reg_def XMM0f( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(5));
|
|
reg_def XMM0g( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(6));
|
|
reg_def XMM0h( SOC, SOC, Op_RegF, 0, xmm0->as_VMReg()->next(7));
|
|
|
|
reg_def XMM1 ( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg());
|
|
reg_def XMM1b( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(1));
|
|
reg_def XMM1c( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(2));
|
|
reg_def XMM1d( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(3));
|
|
reg_def XMM1e( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(4));
|
|
reg_def XMM1f( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(5));
|
|
reg_def XMM1g( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(6));
|
|
reg_def XMM1h( SOC, SOC, Op_RegF, 1, xmm1->as_VMReg()->next(7));
|
|
|
|
reg_def XMM2 ( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg());
|
|
reg_def XMM2b( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(1));
|
|
reg_def XMM2c( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(2));
|
|
reg_def XMM2d( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(3));
|
|
reg_def XMM2e( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(4));
|
|
reg_def XMM2f( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(5));
|
|
reg_def XMM2g( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(6));
|
|
reg_def XMM2h( SOC, SOC, Op_RegF, 2, xmm2->as_VMReg()->next(7));
|
|
|
|
reg_def XMM3 ( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg());
|
|
reg_def XMM3b( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(1));
|
|
reg_def XMM3c( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(2));
|
|
reg_def XMM3d( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(3));
|
|
reg_def XMM3e( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(4));
|
|
reg_def XMM3f( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(5));
|
|
reg_def XMM3g( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(6));
|
|
reg_def XMM3h( SOC, SOC, Op_RegF, 3, xmm3->as_VMReg()->next(7));
|
|
|
|
reg_def XMM4 ( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg());
|
|
reg_def XMM4b( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(1));
|
|
reg_def XMM4c( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(2));
|
|
reg_def XMM4d( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(3));
|
|
reg_def XMM4e( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(4));
|
|
reg_def XMM4f( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(5));
|
|
reg_def XMM4g( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(6));
|
|
reg_def XMM4h( SOC, SOC, Op_RegF, 4, xmm4->as_VMReg()->next(7));
|
|
|
|
reg_def XMM5 ( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg());
|
|
reg_def XMM5b( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(1));
|
|
reg_def XMM5c( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(2));
|
|
reg_def XMM5d( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(3));
|
|
reg_def XMM5e( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(4));
|
|
reg_def XMM5f( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(5));
|
|
reg_def XMM5g( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(6));
|
|
reg_def XMM5h( SOC, SOC, Op_RegF, 5, xmm5->as_VMReg()->next(7));
|
|
|
|
#ifdef _WIN64
|
|
|
|
reg_def XMM6 ( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg());
|
|
reg_def XMM6b( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(1));
|
|
reg_def XMM6c( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(2));
|
|
reg_def XMM6d( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(3));
|
|
reg_def XMM6e( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(4));
|
|
reg_def XMM6f( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(5));
|
|
reg_def XMM6g( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(6));
|
|
reg_def XMM6h( SOC, SOE, Op_RegF, 6, xmm6->as_VMReg()->next(7));
|
|
|
|
reg_def XMM7 ( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg());
|
|
reg_def XMM7b( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(1));
|
|
reg_def XMM7c( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(2));
|
|
reg_def XMM7d( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(3));
|
|
reg_def XMM7e( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(4));
|
|
reg_def XMM7f( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(5));
|
|
reg_def XMM7g( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(6));
|
|
reg_def XMM7h( SOC, SOE, Op_RegF, 7, xmm7->as_VMReg()->next(7));
|
|
|
|
reg_def XMM8 ( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg());
|
|
reg_def XMM8b( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(1));
|
|
reg_def XMM8c( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(2));
|
|
reg_def XMM8d( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(3));
|
|
reg_def XMM8e( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(4));
|
|
reg_def XMM8f( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(5));
|
|
reg_def XMM8g( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(6));
|
|
reg_def XMM8h( SOC, SOE, Op_RegF, 8, xmm8->as_VMReg()->next(7));
|
|
|
|
reg_def XMM9 ( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg());
|
|
reg_def XMM9b( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(1));
|
|
reg_def XMM9c( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(2));
|
|
reg_def XMM9d( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(3));
|
|
reg_def XMM9e( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(4));
|
|
reg_def XMM9f( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(5));
|
|
reg_def XMM9g( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(6));
|
|
reg_def XMM9h( SOC, SOE, Op_RegF, 9, xmm9->as_VMReg()->next(7));
|
|
|
|
reg_def XMM10 ( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg());
|
|
reg_def XMM10b( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(1));
|
|
reg_def XMM10c( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(2));
|
|
reg_def XMM10d( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(3));
|
|
reg_def XMM10e( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(4));
|
|
reg_def XMM10f( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(5));
|
|
reg_def XMM10g( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(6));
|
|
reg_def XMM10h( SOC, SOE, Op_RegF, 10, xmm10->as_VMReg()->next(7));
|
|
|
|
reg_def XMM11 ( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg());
|
|
reg_def XMM11b( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(1));
|
|
reg_def XMM11c( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(2));
|
|
reg_def XMM11d( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(3));
|
|
reg_def XMM11e( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(4));
|
|
reg_def XMM11f( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(5));
|
|
reg_def XMM11g( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(6));
|
|
reg_def XMM11h( SOC, SOE, Op_RegF, 11, xmm11->as_VMReg()->next(7));
|
|
|
|
reg_def XMM12 ( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg());
|
|
reg_def XMM12b( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(1));
|
|
reg_def XMM12c( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(2));
|
|
reg_def XMM12d( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(3));
|
|
reg_def XMM12e( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(4));
|
|
reg_def XMM12f( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(5));
|
|
reg_def XMM12g( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(6));
|
|
reg_def XMM12h( SOC, SOE, Op_RegF, 12, xmm12->as_VMReg()->next(7));
|
|
|
|
reg_def XMM13 ( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg());
|
|
reg_def XMM13b( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(1));
|
|
reg_def XMM13c( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(2));
|
|
reg_def XMM13d( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(3));
|
|
reg_def XMM13e( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(4));
|
|
reg_def XMM13f( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(5));
|
|
reg_def XMM13g( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(6));
|
|
reg_def XMM13h( SOC, SOE, Op_RegF, 13, xmm13->as_VMReg()->next(7));
|
|
|
|
reg_def XMM14 ( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg());
|
|
reg_def XMM14b( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(1));
|
|
reg_def XMM14c( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(2));
|
|
reg_def XMM14d( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(3));
|
|
reg_def XMM14e( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(4));
|
|
reg_def XMM14f( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(5));
|
|
reg_def XMM14g( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(6));
|
|
reg_def XMM14h( SOC, SOE, Op_RegF, 14, xmm14->as_VMReg()->next(7));
|
|
|
|
reg_def XMM15 ( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg());
|
|
reg_def XMM15b( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(1));
|
|
reg_def XMM15c( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(2));
|
|
reg_def XMM15d( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(3));
|
|
reg_def XMM15e( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(4));
|
|
reg_def XMM15f( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(5));
|
|
reg_def XMM15g( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(6));
|
|
reg_def XMM15h( SOC, SOE, Op_RegF, 15, xmm15->as_VMReg()->next(7));
|
|
|
|
#else // _WIN64
|
|
|
|
reg_def XMM6 ( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg());
|
|
reg_def XMM6b( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(1));
|
|
reg_def XMM6c( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(2));
|
|
reg_def XMM6d( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(3));
|
|
reg_def XMM6e( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(4));
|
|
reg_def XMM6f( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(5));
|
|
reg_def XMM6g( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(6));
|
|
reg_def XMM6h( SOC, SOC, Op_RegF, 6, xmm6->as_VMReg()->next(7));
|
|
|
|
reg_def XMM7 ( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg());
|
|
reg_def XMM7b( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(1));
|
|
reg_def XMM7c( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(2));
|
|
reg_def XMM7d( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(3));
|
|
reg_def XMM7e( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(4));
|
|
reg_def XMM7f( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(5));
|
|
reg_def XMM7g( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(6));
|
|
reg_def XMM7h( SOC, SOC, Op_RegF, 7, xmm7->as_VMReg()->next(7));
|
|
|
|
#ifdef _LP64
|
|
|
|
reg_def XMM8 ( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg());
|
|
reg_def XMM8b( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(1));
|
|
reg_def XMM8c( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(2));
|
|
reg_def XMM8d( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(3));
|
|
reg_def XMM8e( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(4));
|
|
reg_def XMM8f( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(5));
|
|
reg_def XMM8g( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(6));
|
|
reg_def XMM8h( SOC, SOC, Op_RegF, 8, xmm8->as_VMReg()->next(7));
|
|
|
|
reg_def XMM9 ( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg());
|
|
reg_def XMM9b( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(1));
|
|
reg_def XMM9c( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(2));
|
|
reg_def XMM9d( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(3));
|
|
reg_def XMM9e( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(4));
|
|
reg_def XMM9f( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(5));
|
|
reg_def XMM9g( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(6));
|
|
reg_def XMM9h( SOC, SOC, Op_RegF, 9, xmm9->as_VMReg()->next(7));
|
|
|
|
reg_def XMM10 ( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg());
|
|
reg_def XMM10b( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(1));
|
|
reg_def XMM10c( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(2));
|
|
reg_def XMM10d( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(3));
|
|
reg_def XMM10e( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(4));
|
|
reg_def XMM10f( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(5));
|
|
reg_def XMM10g( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(6));
|
|
reg_def XMM10h( SOC, SOC, Op_RegF, 10, xmm10->as_VMReg()->next(7));
|
|
|
|
reg_def XMM11 ( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg());
|
|
reg_def XMM11b( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(1));
|
|
reg_def XMM11c( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(2));
|
|
reg_def XMM11d( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(3));
|
|
reg_def XMM11e( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(4));
|
|
reg_def XMM11f( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(5));
|
|
reg_def XMM11g( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(6));
|
|
reg_def XMM11h( SOC, SOC, Op_RegF, 11, xmm11->as_VMReg()->next(7));
|
|
|
|
reg_def XMM12 ( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg());
|
|
reg_def XMM12b( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(1));
|
|
reg_def XMM12c( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(2));
|
|
reg_def XMM12d( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(3));
|
|
reg_def XMM12e( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(4));
|
|
reg_def XMM12f( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(5));
|
|
reg_def XMM12g( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(6));
|
|
reg_def XMM12h( SOC, SOC, Op_RegF, 12, xmm12->as_VMReg()->next(7));
|
|
|
|
reg_def XMM13 ( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg());
|
|
reg_def XMM13b( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(1));
|
|
reg_def XMM13c( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(2));
|
|
reg_def XMM13d( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(3));
|
|
reg_def XMM13e( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(4));
|
|
reg_def XMM13f( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(5));
|
|
reg_def XMM13g( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(6));
|
|
reg_def XMM13h( SOC, SOC, Op_RegF, 13, xmm13->as_VMReg()->next(7));
|
|
|
|
reg_def XMM14 ( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg());
|
|
reg_def XMM14b( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(1));
|
|
reg_def XMM14c( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(2));
|
|
reg_def XMM14d( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(3));
|
|
reg_def XMM14e( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(4));
|
|
reg_def XMM14f( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(5));
|
|
reg_def XMM14g( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(6));
|
|
reg_def XMM14h( SOC, SOC, Op_RegF, 14, xmm14->as_VMReg()->next(7));
|
|
|
|
reg_def XMM15 ( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg());
|
|
reg_def XMM15b( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(1));
|
|
reg_def XMM15c( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(2));
|
|
reg_def XMM15d( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(3));
|
|
reg_def XMM15e( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(4));
|
|
reg_def XMM15f( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(5));
|
|
reg_def XMM15g( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(6));
|
|
reg_def XMM15h( SOC, SOC, Op_RegF, 15, xmm15->as_VMReg()->next(7));
|
|
|
|
#endif // _LP64
|
|
|
|
#endif // _WIN64
|
|
|
|
#ifdef _LP64
|
|
reg_def RFLAGS(SOC, SOC, 0, 16, VMRegImpl::Bad());
|
|
#else
|
|
reg_def RFLAGS(SOC, SOC, 0, 8, VMRegImpl::Bad());
|
|
#endif // _LP64
|
|
|
|
alloc_class chunk1(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h,
|
|
XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h,
|
|
XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h,
|
|
XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h,
|
|
XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h,
|
|
XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h,
|
|
XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h,
|
|
XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h
|
|
#ifdef _LP64
|
|
,XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h,
|
|
XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h,
|
|
XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h,
|
|
XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h,
|
|
XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h,
|
|
XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h,
|
|
XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h,
|
|
XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h
|
|
#endif
|
|
);
|
|
|
|
// flags allocation class should be last.
|
|
alloc_class chunk2(RFLAGS);
|
|
|
|
// Singleton class for condition codes
|
|
reg_class int_flags(RFLAGS);
|
|
|
|
// Class for all float registers
|
|
reg_class float_reg(XMM0,
|
|
XMM1,
|
|
XMM2,
|
|
XMM3,
|
|
XMM4,
|
|
XMM5,
|
|
XMM6,
|
|
XMM7
|
|
#ifdef _LP64
|
|
,XMM8,
|
|
XMM9,
|
|
XMM10,
|
|
XMM11,
|
|
XMM12,
|
|
XMM13,
|
|
XMM14,
|
|
XMM15
|
|
#endif
|
|
);
|
|
|
|
// Class for all double registers
|
|
reg_class double_reg(XMM0, XMM0b,
|
|
XMM1, XMM1b,
|
|
XMM2, XMM2b,
|
|
XMM3, XMM3b,
|
|
XMM4, XMM4b,
|
|
XMM5, XMM5b,
|
|
XMM6, XMM6b,
|
|
XMM7, XMM7b
|
|
#ifdef _LP64
|
|
,XMM8, XMM8b,
|
|
XMM9, XMM9b,
|
|
XMM10, XMM10b,
|
|
XMM11, XMM11b,
|
|
XMM12, XMM12b,
|
|
XMM13, XMM13b,
|
|
XMM14, XMM14b,
|
|
XMM15, XMM15b
|
|
#endif
|
|
);
|
|
|
|
// Class for all 32bit vector registers
|
|
reg_class vectors_reg(XMM0,
|
|
XMM1,
|
|
XMM2,
|
|
XMM3,
|
|
XMM4,
|
|
XMM5,
|
|
XMM6,
|
|
XMM7
|
|
#ifdef _LP64
|
|
,XMM8,
|
|
XMM9,
|
|
XMM10,
|
|
XMM11,
|
|
XMM12,
|
|
XMM13,
|
|
XMM14,
|
|
XMM15
|
|
#endif
|
|
);
|
|
|
|
// Class for all 64bit vector registers
|
|
reg_class vectord_reg(XMM0, XMM0b,
|
|
XMM1, XMM1b,
|
|
XMM2, XMM2b,
|
|
XMM3, XMM3b,
|
|
XMM4, XMM4b,
|
|
XMM5, XMM5b,
|
|
XMM6, XMM6b,
|
|
XMM7, XMM7b
|
|
#ifdef _LP64
|
|
,XMM8, XMM8b,
|
|
XMM9, XMM9b,
|
|
XMM10, XMM10b,
|
|
XMM11, XMM11b,
|
|
XMM12, XMM12b,
|
|
XMM13, XMM13b,
|
|
XMM14, XMM14b,
|
|
XMM15, XMM15b
|
|
#endif
|
|
);
|
|
|
|
// Class for all 128bit vector registers
|
|
reg_class vectorx_reg(XMM0, XMM0b, XMM0c, XMM0d,
|
|
XMM1, XMM1b, XMM1c, XMM1d,
|
|
XMM2, XMM2b, XMM2c, XMM2d,
|
|
XMM3, XMM3b, XMM3c, XMM3d,
|
|
XMM4, XMM4b, XMM4c, XMM4d,
|
|
XMM5, XMM5b, XMM5c, XMM5d,
|
|
XMM6, XMM6b, XMM6c, XMM6d,
|
|
XMM7, XMM7b, XMM7c, XMM7d
|
|
#ifdef _LP64
|
|
,XMM8, XMM8b, XMM8c, XMM8d,
|
|
XMM9, XMM9b, XMM9c, XMM9d,
|
|
XMM10, XMM10b, XMM10c, XMM10d,
|
|
XMM11, XMM11b, XMM11c, XMM11d,
|
|
XMM12, XMM12b, XMM12c, XMM12d,
|
|
XMM13, XMM13b, XMM13c, XMM13d,
|
|
XMM14, XMM14b, XMM14c, XMM14d,
|
|
XMM15, XMM15b, XMM15c, XMM15d
|
|
#endif
|
|
);
|
|
|
|
// Class for all 256bit vector registers
|
|
reg_class vectory_reg(XMM0, XMM0b, XMM0c, XMM0d, XMM0e, XMM0f, XMM0g, XMM0h,
|
|
XMM1, XMM1b, XMM1c, XMM1d, XMM1e, XMM1f, XMM1g, XMM1h,
|
|
XMM2, XMM2b, XMM2c, XMM2d, XMM2e, XMM2f, XMM2g, XMM2h,
|
|
XMM3, XMM3b, XMM3c, XMM3d, XMM3e, XMM3f, XMM3g, XMM3h,
|
|
XMM4, XMM4b, XMM4c, XMM4d, XMM4e, XMM4f, XMM4g, XMM4h,
|
|
XMM5, XMM5b, XMM5c, XMM5d, XMM5e, XMM5f, XMM5g, XMM5h,
|
|
XMM6, XMM6b, XMM6c, XMM6d, XMM6e, XMM6f, XMM6g, XMM6h,
|
|
XMM7, XMM7b, XMM7c, XMM7d, XMM7e, XMM7f, XMM7g, XMM7h
|
|
#ifdef _LP64
|
|
,XMM8, XMM8b, XMM8c, XMM8d, XMM8e, XMM8f, XMM8g, XMM8h,
|
|
XMM9, XMM9b, XMM9c, XMM9d, XMM9e, XMM9f, XMM9g, XMM9h,
|
|
XMM10, XMM10b, XMM10c, XMM10d, XMM10e, XMM10f, XMM10g, XMM10h,
|
|
XMM11, XMM11b, XMM11c, XMM11d, XMM11e, XMM11f, XMM11g, XMM11h,
|
|
XMM12, XMM12b, XMM12c, XMM12d, XMM12e, XMM12f, XMM12g, XMM12h,
|
|
XMM13, XMM13b, XMM13c, XMM13d, XMM13e, XMM13f, XMM13g, XMM13h,
|
|
XMM14, XMM14b, XMM14c, XMM14d, XMM14e, XMM14f, XMM14g, XMM14h,
|
|
XMM15, XMM15b, XMM15c, XMM15d, XMM15e, XMM15f, XMM15g, XMM15h
|
|
#endif
|
|
);
|
|
|
|
%}
|
|
|
|
source %{
|
|
// Float masks come from different places depending on platform.
|
|
#ifdef _LP64
|
|
static address float_signmask() { return StubRoutines::x86::float_sign_mask(); }
|
|
static address float_signflip() { return StubRoutines::x86::float_sign_flip(); }
|
|
static address double_signmask() { return StubRoutines::x86::double_sign_mask(); }
|
|
static address double_signflip() { return StubRoutines::x86::double_sign_flip(); }
|
|
#else
|
|
static address float_signmask() { return (address)float_signmask_pool; }
|
|
static address float_signflip() { return (address)float_signflip_pool; }
|
|
static address double_signmask() { return (address)double_signmask_pool; }
|
|
static address double_signflip() { return (address)double_signflip_pool; }
|
|
#endif
|
|
|
|
|
|
const bool Matcher::match_rule_supported(int opcode) {
|
|
if (!has_match_rule(opcode))
|
|
return false;
|
|
|
|
switch (opcode) {
|
|
case Op_PopCountI:
|
|
case Op_PopCountL:
|
|
if (!UsePopCountInstruction)
|
|
return false;
|
|
break;
|
|
case Op_MulVI:
|
|
if ((UseSSE < 4) && (UseAVX < 1)) // only with SSE4_1 or AVX
|
|
return false;
|
|
break;
|
|
case Op_CompareAndSwapL:
|
|
#ifdef _LP64
|
|
case Op_CompareAndSwapP:
|
|
#endif
|
|
if (!VM_Version::supports_cx8())
|
|
return false;
|
|
break;
|
|
}
|
|
|
|
return true; // Per default match rules are supported.
|
|
}
|
|
|
|
// Max vector size in bytes. 0 if not supported.
|
|
const int Matcher::vector_width_in_bytes(BasicType bt) {
|
|
assert(is_java_primitive(bt), "only primitive type vectors");
|
|
if (UseSSE < 2) return 0;
|
|
// SSE2 supports 128bit vectors for all types.
|
|
// AVX2 supports 256bit vectors for all types.
|
|
int size = (UseAVX > 1) ? 32 : 16;
|
|
// AVX1 supports 256bit vectors only for FLOAT and DOUBLE.
|
|
if (UseAVX > 0 && (bt == T_FLOAT || bt == T_DOUBLE))
|
|
size = 32;
|
|
// Use flag to limit vector size.
|
|
size = MIN2(size,(int)MaxVectorSize);
|
|
// Minimum 2 values in vector (or 4 for bytes).
|
|
switch (bt) {
|
|
case T_DOUBLE:
|
|
case T_LONG:
|
|
if (size < 16) return 0;
|
|
case T_FLOAT:
|
|
case T_INT:
|
|
if (size < 8) return 0;
|
|
case T_BOOLEAN:
|
|
case T_BYTE:
|
|
case T_CHAR:
|
|
case T_SHORT:
|
|
if (size < 4) return 0;
|
|
break;
|
|
default:
|
|
ShouldNotReachHere();
|
|
}
|
|
return size;
|
|
}
|
|
|
|
// Limits on vector size (number of elements) loaded into vector.
|
|
const int Matcher::max_vector_size(const BasicType bt) {
|
|
return vector_width_in_bytes(bt)/type2aelembytes(bt);
|
|
}
|
|
const int Matcher::min_vector_size(const BasicType bt) {
|
|
int max_size = max_vector_size(bt);
|
|
// Min size which can be loaded into vector is 4 bytes.
|
|
int size = (type2aelembytes(bt) == 1) ? 4 : 2;
|
|
return MIN2(size,max_size);
|
|
}
|
|
|
|
// Vector ideal reg corresponding to specidied size in bytes
|
|
const int Matcher::vector_ideal_reg(int size) {
|
|
assert(MaxVectorSize >= size, "");
|
|
switch(size) {
|
|
case 4: return Op_VecS;
|
|
case 8: return Op_VecD;
|
|
case 16: return Op_VecX;
|
|
case 32: return Op_VecY;
|
|
}
|
|
ShouldNotReachHere();
|
|
return 0;
|
|
}
|
|
|
|
// Only lowest bits of xmm reg are used for vector shift count.
|
|
const int Matcher::vector_shift_count_ideal_reg(int size) {
|
|
return Op_VecS;
|
|
}
|
|
|
|
// x86 supports misaligned vectors store/load.
|
|
const bool Matcher::misaligned_vectors_ok() {
|
|
return !AlignVector; // can be changed by flag
|
|
}
|
|
|
|
// Helper methods for MachSpillCopyNode::implementation().
|
|
static int vec_mov_helper(CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
|
|
int src_hi, int dst_hi, uint ireg, outputStream* st) {
|
|
// In 64-bit VM size calculation is very complex. Emitting instructions
|
|
// into scratch buffer is used to get size in 64-bit VM.
|
|
LP64_ONLY( assert(!do_size, "this method calculates size only for 32-bit VM"); )
|
|
assert(ireg == Op_VecS || // 32bit vector
|
|
(src_lo & 1) == 0 && (src_lo + 1) == src_hi &&
|
|
(dst_lo & 1) == 0 && (dst_lo + 1) == dst_hi,
|
|
"no non-adjacent vector moves" );
|
|
if (cbuf) {
|
|
MacroAssembler _masm(cbuf);
|
|
int offset = __ offset();
|
|
switch (ireg) {
|
|
case Op_VecS: // copy whole register
|
|
case Op_VecD:
|
|
case Op_VecX:
|
|
__ movdqu(as_XMMRegister(Matcher::_regEncode[dst_lo]), as_XMMRegister(Matcher::_regEncode[src_lo]));
|
|
break;
|
|
case Op_VecY:
|
|
__ vmovdqu(as_XMMRegister(Matcher::_regEncode[dst_lo]), as_XMMRegister(Matcher::_regEncode[src_lo]));
|
|
break;
|
|
default:
|
|
ShouldNotReachHere();
|
|
}
|
|
int size = __ offset() - offset;
|
|
#ifdef ASSERT
|
|
// VEX_2bytes prefix is used if UseAVX > 0, so it takes the same 2 bytes as SIMD prefix.
|
|
assert(!do_size || size == 4, "incorrect size calculattion");
|
|
#endif
|
|
return size;
|
|
#ifndef PRODUCT
|
|
} else if (!do_size) {
|
|
switch (ireg) {
|
|
case Op_VecS:
|
|
case Op_VecD:
|
|
case Op_VecX:
|
|
st->print("movdqu %s,%s\t# spill",Matcher::regName[dst_lo],Matcher::regName[src_lo]);
|
|
break;
|
|
case Op_VecY:
|
|
st->print("vmovdqu %s,%s\t# spill",Matcher::regName[dst_lo],Matcher::regName[src_lo]);
|
|
break;
|
|
default:
|
|
ShouldNotReachHere();
|
|
}
|
|
#endif
|
|
}
|
|
// VEX_2bytes prefix is used if UseAVX > 0, and it takes the same 2 bytes as SIMD prefix.
|
|
return 4;
|
|
}
|
|
|
|
static int vec_spill_helper(CodeBuffer *cbuf, bool do_size, bool is_load,
|
|
int stack_offset, int reg, uint ireg, outputStream* st) {
|
|
// In 64-bit VM size calculation is very complex. Emitting instructions
|
|
// into scratch buffer is used to get size in 64-bit VM.
|
|
LP64_ONLY( assert(!do_size, "this method calculates size only for 32-bit VM"); )
|
|
if (cbuf) {
|
|
MacroAssembler _masm(cbuf);
|
|
int offset = __ offset();
|
|
if (is_load) {
|
|
switch (ireg) {
|
|
case Op_VecS:
|
|
__ movdl(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset));
|
|
break;
|
|
case Op_VecD:
|
|
__ movq(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset));
|
|
break;
|
|
case Op_VecX:
|
|
__ movdqu(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset));
|
|
break;
|
|
case Op_VecY:
|
|
__ vmovdqu(as_XMMRegister(Matcher::_regEncode[reg]), Address(rsp, stack_offset));
|
|
break;
|
|
default:
|
|
ShouldNotReachHere();
|
|
}
|
|
} else { // store
|
|
switch (ireg) {
|
|
case Op_VecS:
|
|
__ movdl(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg]));
|
|
break;
|
|
case Op_VecD:
|
|
__ movq(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg]));
|
|
break;
|
|
case Op_VecX:
|
|
__ movdqu(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg]));
|
|
break;
|
|
case Op_VecY:
|
|
__ vmovdqu(Address(rsp, stack_offset), as_XMMRegister(Matcher::_regEncode[reg]));
|
|
break;
|
|
default:
|
|
ShouldNotReachHere();
|
|
}
|
|
}
|
|
int size = __ offset() - offset;
|
|
#ifdef ASSERT
|
|
int offset_size = (stack_offset == 0) ? 0 : ((stack_offset < 0x80) ? 1 : 4);
|
|
// VEX_2bytes prefix is used if UseAVX > 0, so it takes the same 2 bytes as SIMD prefix.
|
|
assert(!do_size || size == (5+offset_size), "incorrect size calculattion");
|
|
#endif
|
|
return size;
|
|
#ifndef PRODUCT
|
|
} else if (!do_size) {
|
|
if (is_load) {
|
|
switch (ireg) {
|
|
case Op_VecS:
|
|
st->print("movd %s,[rsp + %d]\t# spill", Matcher::regName[reg], stack_offset);
|
|
break;
|
|
case Op_VecD:
|
|
st->print("movq %s,[rsp + %d]\t# spill", Matcher::regName[reg], stack_offset);
|
|
break;
|
|
case Op_VecX:
|
|
st->print("movdqu %s,[rsp + %d]\t# spill", Matcher::regName[reg], stack_offset);
|
|
break;
|
|
case Op_VecY:
|
|
st->print("vmovdqu %s,[rsp + %d]\t# spill", Matcher::regName[reg], stack_offset);
|
|
break;
|
|
default:
|
|
ShouldNotReachHere();
|
|
}
|
|
} else { // store
|
|
switch (ireg) {
|
|
case Op_VecS:
|
|
st->print("movd [rsp + %d],%s\t# spill", stack_offset, Matcher::regName[reg]);
|
|
break;
|
|
case Op_VecD:
|
|
st->print("movq [rsp + %d],%s\t# spill", stack_offset, Matcher::regName[reg]);
|
|
break;
|
|
case Op_VecX:
|
|
st->print("movdqu [rsp + %d],%s\t# spill", stack_offset, Matcher::regName[reg]);
|
|
break;
|
|
case Op_VecY:
|
|
st->print("vmovdqu [rsp + %d],%s\t# spill", stack_offset, Matcher::regName[reg]);
|
|
break;
|
|
default:
|
|
ShouldNotReachHere();
|
|
}
|
|
}
|
|
#endif
|
|
}
|
|
int offset_size = (stack_offset == 0) ? 0 : ((stack_offset < 0x80) ? 1 : 4);
|
|
// VEX_2bytes prefix is used if UseAVX > 0, so it takes the same 2 bytes as SIMD prefix.
|
|
return 5+offset_size;
|
|
}
|
|
|
|
static inline jfloat replicate4_imm(int con, int width) {
|
|
// Load a constant of "width" (in bytes) and replicate it to fill 32bit.
|
|
assert(width == 1 || width == 2, "only byte or short types here");
|
|
int bit_width = width * 8;
|
|
jint val = con;
|
|
val &= (1 << bit_width) - 1; // mask off sign bits
|
|
while(bit_width < 32) {
|
|
val |= (val << bit_width);
|
|
bit_width <<= 1;
|
|
}
|
|
jfloat fval = *((jfloat*) &val); // coerce to float type
|
|
return fval;
|
|
}
|
|
|
|
static inline jdouble replicate8_imm(int con, int width) {
|
|
// Load a constant of "width" (in bytes) and replicate it to fill 64bit.
|
|
assert(width == 1 || width == 2 || width == 4, "only byte, short or int types here");
|
|
int bit_width = width * 8;
|
|
jlong val = con;
|
|
val &= (((jlong) 1) << bit_width) - 1; // mask off sign bits
|
|
while(bit_width < 64) {
|
|
val |= (val << bit_width);
|
|
bit_width <<= 1;
|
|
}
|
|
jdouble dval = *((jdouble*) &val); // coerce to double type
|
|
return dval;
|
|
}
|
|
|
|
#ifndef PRODUCT
|
|
void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
|
|
st->print("nop \t# %d bytes pad for loops and calls", _count);
|
|
}
|
|
#endif
|
|
|
|
void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const {
|
|
MacroAssembler _masm(&cbuf);
|
|
__ nop(_count);
|
|
}
|
|
|
|
uint MachNopNode::size(PhaseRegAlloc*) const {
|
|
return _count;
|
|
}
|
|
|
|
#ifndef PRODUCT
|
|
void MachBreakpointNode::format(PhaseRegAlloc*, outputStream* st) const {
|
|
st->print("# breakpoint");
|
|
}
|
|
#endif
|
|
|
|
void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
|
|
MacroAssembler _masm(&cbuf);
|
|
__ int3();
|
|
}
|
|
|
|
uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
|
|
return MachNode::size(ra_);
|
|
}
|
|
|
|
%}
|
|
|
|
encode %{
|
|
|
|
enc_class preserve_SP %{
|
|
debug_only(int off0 = cbuf.insts_size());
|
|
MacroAssembler _masm(&cbuf);
|
|
// RBP is preserved across all calls, even compiled calls.
|
|
// Use it to preserve RSP in places where the callee might change the SP.
|
|
__ movptr(rbp_mh_SP_save, rsp);
|
|
debug_only(int off1 = cbuf.insts_size());
|
|
assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
|
|
%}
|
|
|
|
enc_class restore_SP %{
|
|
MacroAssembler _masm(&cbuf);
|
|
__ movptr(rsp, rbp_mh_SP_save);
|
|
%}
|
|
|
|
enc_class call_epilog %{
|
|
if (VerifyStackAtCalls) {
|
|
// Check that stack depth is unchanged: find majik cookie on stack
|
|
int framesize = ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP, -3*VMRegImpl::slots_per_word));
|
|
MacroAssembler _masm(&cbuf);
|
|
Label L;
|
|
__ cmpptr(Address(rsp, framesize), (int32_t)0xbadb100d);
|
|
__ jccb(Assembler::equal, L);
|
|
// Die if stack mismatch
|
|
__ int3();
|
|
__ bind(L);
|
|
}
|
|
%}
|
|
|
|
%}
|
|
|
|
|
|
//----------OPERANDS-----------------------------------------------------------
|
|
// Operand definitions must precede instruction definitions for correct parsing
|
|
// in the ADLC because operands constitute user defined types which are used in
|
|
// instruction definitions.
|
|
|
|
// Vectors
|
|
operand vecS() %{
|
|
constraint(ALLOC_IN_RC(vectors_reg));
|
|
match(VecS);
|
|
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
operand vecD() %{
|
|
constraint(ALLOC_IN_RC(vectord_reg));
|
|
match(VecD);
|
|
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
operand vecX() %{
|
|
constraint(ALLOC_IN_RC(vectorx_reg));
|
|
match(VecX);
|
|
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
operand vecY() %{
|
|
constraint(ALLOC_IN_RC(vectory_reg));
|
|
match(VecY);
|
|
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
|
|
// INSTRUCTIONS -- Platform independent definitions (same for 32- and 64-bit)
|
|
|
|
// ============================================================================
|
|
|
|
instruct ShouldNotReachHere() %{
|
|
match(Halt);
|
|
format %{ "int3\t# ShouldNotReachHere" %}
|
|
ins_encode %{
|
|
__ int3();
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
// ============================================================================
|
|
|
|
instruct addF_reg(regF dst, regF src) %{
|
|
predicate((UseSSE>=1) && (UseAVX == 0));
|
|
match(Set dst (AddF dst src));
|
|
|
|
format %{ "addss $dst, $src" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ addss($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct addF_mem(regF dst, memory src) %{
|
|
predicate((UseSSE>=1) && (UseAVX == 0));
|
|
match(Set dst (AddF dst (LoadF src)));
|
|
|
|
format %{ "addss $dst, $src" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ addss($dst$$XMMRegister, $src$$Address);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct addF_imm(regF dst, immF con) %{
|
|
predicate((UseSSE>=1) && (UseAVX == 0));
|
|
match(Set dst (AddF dst con));
|
|
format %{ "addss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ addss($dst$$XMMRegister, $constantaddress($con));
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
|
|
predicate(UseAVX > 0);
|
|
match(Set dst (AddF src1 src2));
|
|
|
|
format %{ "vaddss $dst, $src1, $src2" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ vaddss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct addF_reg_mem(regF dst, regF src1, memory src2) %{
|
|
predicate(UseAVX > 0);
|
|
match(Set dst (AddF src1 (LoadF src2)));
|
|
|
|
format %{ "vaddss $dst, $src1, $src2" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ vaddss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct addF_reg_imm(regF dst, regF src, immF con) %{
|
|
predicate(UseAVX > 0);
|
|
match(Set dst (AddF src con));
|
|
|
|
format %{ "vaddss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ vaddss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct addD_reg(regD dst, regD src) %{
|
|
predicate((UseSSE>=2) && (UseAVX == 0));
|
|
match(Set dst (AddD dst src));
|
|
|
|
format %{ "addsd $dst, $src" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ addsd($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct addD_mem(regD dst, memory src) %{
|
|
predicate((UseSSE>=2) && (UseAVX == 0));
|
|
match(Set dst (AddD dst (LoadD src)));
|
|
|
|
format %{ "addsd $dst, $src" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ addsd($dst$$XMMRegister, $src$$Address);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct addD_imm(regD dst, immD con) %{
|
|
predicate((UseSSE>=2) && (UseAVX == 0));
|
|
match(Set dst (AddD dst con));
|
|
format %{ "addsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ addsd($dst$$XMMRegister, $constantaddress($con));
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
|
|
predicate(UseAVX > 0);
|
|
match(Set dst (AddD src1 src2));
|
|
|
|
format %{ "vaddsd $dst, $src1, $src2" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ vaddsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct addD_reg_mem(regD dst, regD src1, memory src2) %{
|
|
predicate(UseAVX > 0);
|
|
match(Set dst (AddD src1 (LoadD src2)));
|
|
|
|
format %{ "vaddsd $dst, $src1, $src2" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ vaddsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct addD_reg_imm(regD dst, regD src, immD con) %{
|
|
predicate(UseAVX > 0);
|
|
match(Set dst (AddD src con));
|
|
|
|
format %{ "vaddsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ vaddsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct subF_reg(regF dst, regF src) %{
|
|
predicate((UseSSE>=1) && (UseAVX == 0));
|
|
match(Set dst (SubF dst src));
|
|
|
|
format %{ "subss $dst, $src" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ subss($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct subF_mem(regF dst, memory src) %{
|
|
predicate((UseSSE>=1) && (UseAVX == 0));
|
|
match(Set dst (SubF dst (LoadF src)));
|
|
|
|
format %{ "subss $dst, $src" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ subss($dst$$XMMRegister, $src$$Address);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct subF_imm(regF dst, immF con) %{
|
|
predicate((UseSSE>=1) && (UseAVX == 0));
|
|
match(Set dst (SubF dst con));
|
|
format %{ "subss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ subss($dst$$XMMRegister, $constantaddress($con));
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
|
|
predicate(UseAVX > 0);
|
|
match(Set dst (SubF src1 src2));
|
|
|
|
format %{ "vsubss $dst, $src1, $src2" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ vsubss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct subF_reg_mem(regF dst, regF src1, memory src2) %{
|
|
predicate(UseAVX > 0);
|
|
match(Set dst (SubF src1 (LoadF src2)));
|
|
|
|
format %{ "vsubss $dst, $src1, $src2" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ vsubss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct subF_reg_imm(regF dst, regF src, immF con) %{
|
|
predicate(UseAVX > 0);
|
|
match(Set dst (SubF src con));
|
|
|
|
format %{ "vsubss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ vsubss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct subD_reg(regD dst, regD src) %{
|
|
predicate((UseSSE>=2) && (UseAVX == 0));
|
|
match(Set dst (SubD dst src));
|
|
|
|
format %{ "subsd $dst, $src" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ subsd($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct subD_mem(regD dst, memory src) %{
|
|
predicate((UseSSE>=2) && (UseAVX == 0));
|
|
match(Set dst (SubD dst (LoadD src)));
|
|
|
|
format %{ "subsd $dst, $src" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ subsd($dst$$XMMRegister, $src$$Address);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct subD_imm(regD dst, immD con) %{
|
|
predicate((UseSSE>=2) && (UseAVX == 0));
|
|
match(Set dst (SubD dst con));
|
|
format %{ "subsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ subsd($dst$$XMMRegister, $constantaddress($con));
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
|
|
predicate(UseAVX > 0);
|
|
match(Set dst (SubD src1 src2));
|
|
|
|
format %{ "vsubsd $dst, $src1, $src2" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ vsubsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct subD_reg_mem(regD dst, regD src1, memory src2) %{
|
|
predicate(UseAVX > 0);
|
|
match(Set dst (SubD src1 (LoadD src2)));
|
|
|
|
format %{ "vsubsd $dst, $src1, $src2" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ vsubsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct subD_reg_imm(regD dst, regD src, immD con) %{
|
|
predicate(UseAVX > 0);
|
|
match(Set dst (SubD src con));
|
|
|
|
format %{ "vsubsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ vsubsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct mulF_reg(regF dst, regF src) %{
|
|
predicate((UseSSE>=1) && (UseAVX == 0));
|
|
match(Set dst (MulF dst src));
|
|
|
|
format %{ "mulss $dst, $src" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ mulss($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct mulF_mem(regF dst, memory src) %{
|
|
predicate((UseSSE>=1) && (UseAVX == 0));
|
|
match(Set dst (MulF dst (LoadF src)));
|
|
|
|
format %{ "mulss $dst, $src" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ mulss($dst$$XMMRegister, $src$$Address);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct mulF_imm(regF dst, immF con) %{
|
|
predicate((UseSSE>=1) && (UseAVX == 0));
|
|
match(Set dst (MulF dst con));
|
|
format %{ "mulss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ mulss($dst$$XMMRegister, $constantaddress($con));
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
|
|
predicate(UseAVX > 0);
|
|
match(Set dst (MulF src1 src2));
|
|
|
|
format %{ "vmulss $dst, $src1, $src2" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ vmulss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct mulF_reg_mem(regF dst, regF src1, memory src2) %{
|
|
predicate(UseAVX > 0);
|
|
match(Set dst (MulF src1 (LoadF src2)));
|
|
|
|
format %{ "vmulss $dst, $src1, $src2" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ vmulss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct mulF_reg_imm(regF dst, regF src, immF con) %{
|
|
predicate(UseAVX > 0);
|
|
match(Set dst (MulF src con));
|
|
|
|
format %{ "vmulss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ vmulss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct mulD_reg(regD dst, regD src) %{
|
|
predicate((UseSSE>=2) && (UseAVX == 0));
|
|
match(Set dst (MulD dst src));
|
|
|
|
format %{ "mulsd $dst, $src" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ mulsd($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct mulD_mem(regD dst, memory src) %{
|
|
predicate((UseSSE>=2) && (UseAVX == 0));
|
|
match(Set dst (MulD dst (LoadD src)));
|
|
|
|
format %{ "mulsd $dst, $src" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ mulsd($dst$$XMMRegister, $src$$Address);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct mulD_imm(regD dst, immD con) %{
|
|
predicate((UseSSE>=2) && (UseAVX == 0));
|
|
match(Set dst (MulD dst con));
|
|
format %{ "mulsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ mulsd($dst$$XMMRegister, $constantaddress($con));
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
|
|
predicate(UseAVX > 0);
|
|
match(Set dst (MulD src1 src2));
|
|
|
|
format %{ "vmulsd $dst, $src1, $src2" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ vmulsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct mulD_reg_mem(regD dst, regD src1, memory src2) %{
|
|
predicate(UseAVX > 0);
|
|
match(Set dst (MulD src1 (LoadD src2)));
|
|
|
|
format %{ "vmulsd $dst, $src1, $src2" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ vmulsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct mulD_reg_imm(regD dst, regD src, immD con) %{
|
|
predicate(UseAVX > 0);
|
|
match(Set dst (MulD src con));
|
|
|
|
format %{ "vmulsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ vmulsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct divF_reg(regF dst, regF src) %{
|
|
predicate((UseSSE>=1) && (UseAVX == 0));
|
|
match(Set dst (DivF dst src));
|
|
|
|
format %{ "divss $dst, $src" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ divss($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct divF_mem(regF dst, memory src) %{
|
|
predicate((UseSSE>=1) && (UseAVX == 0));
|
|
match(Set dst (DivF dst (LoadF src)));
|
|
|
|
format %{ "divss $dst, $src" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ divss($dst$$XMMRegister, $src$$Address);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct divF_imm(regF dst, immF con) %{
|
|
predicate((UseSSE>=1) && (UseAVX == 0));
|
|
match(Set dst (DivF dst con));
|
|
format %{ "divss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ divss($dst$$XMMRegister, $constantaddress($con));
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct divF_reg_reg(regF dst, regF src1, regF src2) %{
|
|
predicate(UseAVX > 0);
|
|
match(Set dst (DivF src1 src2));
|
|
|
|
format %{ "vdivss $dst, $src1, $src2" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ vdivss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct divF_reg_mem(regF dst, regF src1, memory src2) %{
|
|
predicate(UseAVX > 0);
|
|
match(Set dst (DivF src1 (LoadF src2)));
|
|
|
|
format %{ "vdivss $dst, $src1, $src2" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ vdivss($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct divF_reg_imm(regF dst, regF src, immF con) %{
|
|
predicate(UseAVX > 0);
|
|
match(Set dst (DivF src con));
|
|
|
|
format %{ "vdivss $dst, $src, [$constantaddress]\t# load from constant table: float=$con" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ vdivss($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct divD_reg(regD dst, regD src) %{
|
|
predicate((UseSSE>=2) && (UseAVX == 0));
|
|
match(Set dst (DivD dst src));
|
|
|
|
format %{ "divsd $dst, $src" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ divsd($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct divD_mem(regD dst, memory src) %{
|
|
predicate((UseSSE>=2) && (UseAVX == 0));
|
|
match(Set dst (DivD dst (LoadD src)));
|
|
|
|
format %{ "divsd $dst, $src" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ divsd($dst$$XMMRegister, $src$$Address);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct divD_imm(regD dst, immD con) %{
|
|
predicate((UseSSE>=2) && (UseAVX == 0));
|
|
match(Set dst (DivD dst con));
|
|
format %{ "divsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ divsd($dst$$XMMRegister, $constantaddress($con));
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct divD_reg_reg(regD dst, regD src1, regD src2) %{
|
|
predicate(UseAVX > 0);
|
|
match(Set dst (DivD src1 src2));
|
|
|
|
format %{ "vdivsd $dst, $src1, $src2" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ vdivsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct divD_reg_mem(regD dst, regD src1, memory src2) %{
|
|
predicate(UseAVX > 0);
|
|
match(Set dst (DivD src1 (LoadD src2)));
|
|
|
|
format %{ "vdivsd $dst, $src1, $src2" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ vdivsd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$Address);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct divD_reg_imm(regD dst, regD src, immD con) %{
|
|
predicate(UseAVX > 0);
|
|
match(Set dst (DivD src con));
|
|
|
|
format %{ "vdivsd $dst, $src, [$constantaddress]\t# load from constant table: double=$con" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ vdivsd($dst$$XMMRegister, $src$$XMMRegister, $constantaddress($con));
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct absF_reg(regF dst) %{
|
|
predicate((UseSSE>=1) && (UseAVX == 0));
|
|
match(Set dst (AbsF dst));
|
|
ins_cost(150);
|
|
format %{ "andps $dst, [0x7fffffff]\t# abs float by sign masking" %}
|
|
ins_encode %{
|
|
__ andps($dst$$XMMRegister, ExternalAddress(float_signmask()));
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct absF_reg_reg(regF dst, regF src) %{
|
|
predicate(UseAVX > 0);
|
|
match(Set dst (AbsF src));
|
|
ins_cost(150);
|
|
format %{ "vandps $dst, $src, [0x7fffffff]\t# abs float by sign masking" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vandps($dst$$XMMRegister, $src$$XMMRegister,
|
|
ExternalAddress(float_signmask()), vector256);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct absD_reg(regD dst) %{
|
|
predicate((UseSSE>=2) && (UseAVX == 0));
|
|
match(Set dst (AbsD dst));
|
|
ins_cost(150);
|
|
format %{ "andpd $dst, [0x7fffffffffffffff]\t"
|
|
"# abs double by sign masking" %}
|
|
ins_encode %{
|
|
__ andpd($dst$$XMMRegister, ExternalAddress(double_signmask()));
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct absD_reg_reg(regD dst, regD src) %{
|
|
predicate(UseAVX > 0);
|
|
match(Set dst (AbsD src));
|
|
ins_cost(150);
|
|
format %{ "vandpd $dst, $src, [0x7fffffffffffffff]\t"
|
|
"# abs double by sign masking" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vandpd($dst$$XMMRegister, $src$$XMMRegister,
|
|
ExternalAddress(double_signmask()), vector256);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct negF_reg(regF dst) %{
|
|
predicate((UseSSE>=1) && (UseAVX == 0));
|
|
match(Set dst (NegF dst));
|
|
ins_cost(150);
|
|
format %{ "xorps $dst, [0x80000000]\t# neg float by sign flipping" %}
|
|
ins_encode %{
|
|
__ xorps($dst$$XMMRegister, ExternalAddress(float_signflip()));
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct negF_reg_reg(regF dst, regF src) %{
|
|
predicate(UseAVX > 0);
|
|
match(Set dst (NegF src));
|
|
ins_cost(150);
|
|
format %{ "vxorps $dst, $src, [0x80000000]\t# neg float by sign flipping" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vxorps($dst$$XMMRegister, $src$$XMMRegister,
|
|
ExternalAddress(float_signflip()), vector256);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct negD_reg(regD dst) %{
|
|
predicate((UseSSE>=2) && (UseAVX == 0));
|
|
match(Set dst (NegD dst));
|
|
ins_cost(150);
|
|
format %{ "xorpd $dst, [0x8000000000000000]\t"
|
|
"# neg double by sign flipping" %}
|
|
ins_encode %{
|
|
__ xorpd($dst$$XMMRegister, ExternalAddress(double_signflip()));
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct negD_reg_reg(regD dst, regD src) %{
|
|
predicate(UseAVX > 0);
|
|
match(Set dst (NegD src));
|
|
ins_cost(150);
|
|
format %{ "vxorpd $dst, $src, [0x8000000000000000]\t"
|
|
"# neg double by sign flipping" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vxorpd($dst$$XMMRegister, $src$$XMMRegister,
|
|
ExternalAddress(double_signflip()), vector256);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct sqrtF_reg(regF dst, regF src) %{
|
|
predicate(UseSSE>=1);
|
|
match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
|
|
|
|
format %{ "sqrtss $dst, $src" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ sqrtss($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct sqrtF_mem(regF dst, memory src) %{
|
|
predicate(UseSSE>=1);
|
|
match(Set dst (ConvD2F (SqrtD (ConvF2D (LoadF src)))));
|
|
|
|
format %{ "sqrtss $dst, $src" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ sqrtss($dst$$XMMRegister, $src$$Address);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct sqrtF_imm(regF dst, immF con) %{
|
|
predicate(UseSSE>=1);
|
|
match(Set dst (ConvD2F (SqrtD (ConvF2D con))));
|
|
format %{ "sqrtss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ sqrtss($dst$$XMMRegister, $constantaddress($con));
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct sqrtD_reg(regD dst, regD src) %{
|
|
predicate(UseSSE>=2);
|
|
match(Set dst (SqrtD src));
|
|
|
|
format %{ "sqrtsd $dst, $src" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ sqrtsd($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct sqrtD_mem(regD dst, memory src) %{
|
|
predicate(UseSSE>=2);
|
|
match(Set dst (SqrtD (LoadD src)));
|
|
|
|
format %{ "sqrtsd $dst, $src" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ sqrtsd($dst$$XMMRegister, $src$$Address);
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
instruct sqrtD_imm(regD dst, immD con) %{
|
|
predicate(UseSSE>=2);
|
|
match(Set dst (SqrtD con));
|
|
format %{ "sqrtsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
|
|
ins_cost(150);
|
|
ins_encode %{
|
|
__ sqrtsd($dst$$XMMRegister, $constantaddress($con));
|
|
%}
|
|
ins_pipe(pipe_slow);
|
|
%}
|
|
|
|
|
|
// ====================VECTOR INSTRUCTIONS=====================================
|
|
|
|
// Load vectors (4 bytes long)
|
|
instruct loadV4(vecS dst, memory mem) %{
|
|
predicate(n->as_LoadVector()->memory_size() == 4);
|
|
match(Set dst (LoadVector mem));
|
|
ins_cost(125);
|
|
format %{ "movd $dst,$mem\t! load vector (4 bytes)" %}
|
|
ins_encode %{
|
|
__ movdl($dst$$XMMRegister, $mem$$Address);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Load vectors (8 bytes long)
|
|
instruct loadV8(vecD dst, memory mem) %{
|
|
predicate(n->as_LoadVector()->memory_size() == 8);
|
|
match(Set dst (LoadVector mem));
|
|
ins_cost(125);
|
|
format %{ "movq $dst,$mem\t! load vector (8 bytes)" %}
|
|
ins_encode %{
|
|
__ movq($dst$$XMMRegister, $mem$$Address);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Load vectors (16 bytes long)
|
|
instruct loadV16(vecX dst, memory mem) %{
|
|
predicate(n->as_LoadVector()->memory_size() == 16);
|
|
match(Set dst (LoadVector mem));
|
|
ins_cost(125);
|
|
format %{ "movdqu $dst,$mem\t! load vector (16 bytes)" %}
|
|
ins_encode %{
|
|
__ movdqu($dst$$XMMRegister, $mem$$Address);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Load vectors (32 bytes long)
|
|
instruct loadV32(vecY dst, memory mem) %{
|
|
predicate(n->as_LoadVector()->memory_size() == 32);
|
|
match(Set dst (LoadVector mem));
|
|
ins_cost(125);
|
|
format %{ "vmovdqu $dst,$mem\t! load vector (32 bytes)" %}
|
|
ins_encode %{
|
|
__ vmovdqu($dst$$XMMRegister, $mem$$Address);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Store vectors
|
|
instruct storeV4(memory mem, vecS src) %{
|
|
predicate(n->as_StoreVector()->memory_size() == 4);
|
|
match(Set mem (StoreVector mem src));
|
|
ins_cost(145);
|
|
format %{ "movd $mem,$src\t! store vector (4 bytes)" %}
|
|
ins_encode %{
|
|
__ movdl($mem$$Address, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct storeV8(memory mem, vecD src) %{
|
|
predicate(n->as_StoreVector()->memory_size() == 8);
|
|
match(Set mem (StoreVector mem src));
|
|
ins_cost(145);
|
|
format %{ "movq $mem,$src\t! store vector (8 bytes)" %}
|
|
ins_encode %{
|
|
__ movq($mem$$Address, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct storeV16(memory mem, vecX src) %{
|
|
predicate(n->as_StoreVector()->memory_size() == 16);
|
|
match(Set mem (StoreVector mem src));
|
|
ins_cost(145);
|
|
format %{ "movdqu $mem,$src\t! store vector (16 bytes)" %}
|
|
ins_encode %{
|
|
__ movdqu($mem$$Address, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct storeV32(memory mem, vecY src) %{
|
|
predicate(n->as_StoreVector()->memory_size() == 32);
|
|
match(Set mem (StoreVector mem src));
|
|
ins_cost(145);
|
|
format %{ "vmovdqu $mem,$src\t! store vector (32 bytes)" %}
|
|
ins_encode %{
|
|
__ vmovdqu($mem$$Address, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Replicate byte scalar to be vector
|
|
instruct Repl4B(vecS dst, rRegI src) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (ReplicateB src));
|
|
format %{ "movd $dst,$src\n\t"
|
|
"punpcklbw $dst,$dst\n\t"
|
|
"pshuflw $dst,$dst,0x00\t! replicate4B" %}
|
|
ins_encode %{
|
|
__ movdl($dst$$XMMRegister, $src$$Register);
|
|
__ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister);
|
|
__ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct Repl8B(vecD dst, rRegI src) %{
|
|
predicate(n->as_Vector()->length() == 8);
|
|
match(Set dst (ReplicateB src));
|
|
format %{ "movd $dst,$src\n\t"
|
|
"punpcklbw $dst,$dst\n\t"
|
|
"pshuflw $dst,$dst,0x00\t! replicate8B" %}
|
|
ins_encode %{
|
|
__ movdl($dst$$XMMRegister, $src$$Register);
|
|
__ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister);
|
|
__ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct Repl16B(vecX dst, rRegI src) %{
|
|
predicate(n->as_Vector()->length() == 16);
|
|
match(Set dst (ReplicateB src));
|
|
format %{ "movd $dst,$src\n\t"
|
|
"punpcklbw $dst,$dst\n\t"
|
|
"pshuflw $dst,$dst,0x00\n\t"
|
|
"punpcklqdq $dst,$dst\t! replicate16B" %}
|
|
ins_encode %{
|
|
__ movdl($dst$$XMMRegister, $src$$Register);
|
|
__ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister);
|
|
__ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
|
|
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct Repl32B(vecY dst, rRegI src) %{
|
|
predicate(n->as_Vector()->length() == 32);
|
|
match(Set dst (ReplicateB src));
|
|
format %{ "movd $dst,$src\n\t"
|
|
"punpcklbw $dst,$dst\n\t"
|
|
"pshuflw $dst,$dst,0x00\n\t"
|
|
"punpcklqdq $dst,$dst\n\t"
|
|
"vinserti128h $dst,$dst,$dst\t! replicate32B" %}
|
|
ins_encode %{
|
|
__ movdl($dst$$XMMRegister, $src$$Register);
|
|
__ punpcklbw($dst$$XMMRegister, $dst$$XMMRegister);
|
|
__ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
|
|
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
|
|
__ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Replicate byte scalar immediate to be vector by loading from const table.
|
|
instruct Repl4B_imm(vecS dst, immI con) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (ReplicateB con));
|
|
format %{ "movdl $dst,[$constantaddress]\t! replicate4B($con)" %}
|
|
ins_encode %{
|
|
__ movdl($dst$$XMMRegister, $constantaddress(replicate4_imm($con$$constant, 1)));
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct Repl8B_imm(vecD dst, immI con) %{
|
|
predicate(n->as_Vector()->length() == 8);
|
|
match(Set dst (ReplicateB con));
|
|
format %{ "movq $dst,[$constantaddress]\t! replicate8B($con)" %}
|
|
ins_encode %{
|
|
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1)));
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct Repl16B_imm(vecX dst, immI con) %{
|
|
predicate(n->as_Vector()->length() == 16);
|
|
match(Set dst (ReplicateB con));
|
|
format %{ "movq $dst,[$constantaddress]\n\t"
|
|
"punpcklqdq $dst,$dst\t! replicate16B($con)" %}
|
|
ins_encode %{
|
|
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1)));
|
|
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct Repl32B_imm(vecY dst, immI con) %{
|
|
predicate(n->as_Vector()->length() == 32);
|
|
match(Set dst (ReplicateB con));
|
|
format %{ "movq $dst,[$constantaddress]\n\t"
|
|
"punpcklqdq $dst,$dst\n\t"
|
|
"vinserti128h $dst,$dst,$dst\t! lreplicate32B($con)" %}
|
|
ins_encode %{
|
|
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 1)));
|
|
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
|
|
__ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Replicate byte scalar zero to be vector
|
|
instruct Repl4B_zero(vecS dst, immI0 zero) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (ReplicateB zero));
|
|
format %{ "pxor $dst,$dst\t! replicate4B zero" %}
|
|
ins_encode %{
|
|
__ pxor($dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( fpu_reg_reg );
|
|
%}
|
|
|
|
instruct Repl8B_zero(vecD dst, immI0 zero) %{
|
|
predicate(n->as_Vector()->length() == 8);
|
|
match(Set dst (ReplicateB zero));
|
|
format %{ "pxor $dst,$dst\t! replicate8B zero" %}
|
|
ins_encode %{
|
|
__ pxor($dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( fpu_reg_reg );
|
|
%}
|
|
|
|
instruct Repl16B_zero(vecX dst, immI0 zero) %{
|
|
predicate(n->as_Vector()->length() == 16);
|
|
match(Set dst (ReplicateB zero));
|
|
format %{ "pxor $dst,$dst\t! replicate16B zero" %}
|
|
ins_encode %{
|
|
__ pxor($dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( fpu_reg_reg );
|
|
%}
|
|
|
|
instruct Repl32B_zero(vecY dst, immI0 zero) %{
|
|
predicate(n->as_Vector()->length() == 32);
|
|
match(Set dst (ReplicateB zero));
|
|
format %{ "vpxor $dst,$dst,$dst\t! replicate32B zero" %}
|
|
ins_encode %{
|
|
// Use vxorpd since AVX does not have vpxor for 256-bit (AVX2 will have it).
|
|
bool vector256 = true;
|
|
__ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( fpu_reg_reg );
|
|
%}
|
|
|
|
// Replicate char/short (2 byte) scalar to be vector
|
|
instruct Repl2S(vecS dst, rRegI src) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (ReplicateS src));
|
|
format %{ "movd $dst,$src\n\t"
|
|
"pshuflw $dst,$dst,0x00\t! replicate2S" %}
|
|
ins_encode %{
|
|
__ movdl($dst$$XMMRegister, $src$$Register);
|
|
__ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
|
|
%}
|
|
ins_pipe( fpu_reg_reg );
|
|
%}
|
|
|
|
instruct Repl4S(vecD dst, rRegI src) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (ReplicateS src));
|
|
format %{ "movd $dst,$src\n\t"
|
|
"pshuflw $dst,$dst,0x00\t! replicate4S" %}
|
|
ins_encode %{
|
|
__ movdl($dst$$XMMRegister, $src$$Register);
|
|
__ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
|
|
%}
|
|
ins_pipe( fpu_reg_reg );
|
|
%}
|
|
|
|
instruct Repl8S(vecX dst, rRegI src) %{
|
|
predicate(n->as_Vector()->length() == 8);
|
|
match(Set dst (ReplicateS src));
|
|
format %{ "movd $dst,$src\n\t"
|
|
"pshuflw $dst,$dst,0x00\n\t"
|
|
"punpcklqdq $dst,$dst\t! replicate8S" %}
|
|
ins_encode %{
|
|
__ movdl($dst$$XMMRegister, $src$$Register);
|
|
__ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
|
|
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct Repl16S(vecY dst, rRegI src) %{
|
|
predicate(n->as_Vector()->length() == 16);
|
|
match(Set dst (ReplicateS src));
|
|
format %{ "movd $dst,$src\n\t"
|
|
"pshuflw $dst,$dst,0x00\n\t"
|
|
"punpcklqdq $dst,$dst\n\t"
|
|
"vinserti128h $dst,$dst,$dst\t! replicate16S" %}
|
|
ins_encode %{
|
|
__ movdl($dst$$XMMRegister, $src$$Register);
|
|
__ pshuflw($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
|
|
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
|
|
__ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Replicate char/short (2 byte) scalar immediate to be vector by loading from const table.
|
|
instruct Repl2S_imm(vecS dst, immI con) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (ReplicateS con));
|
|
format %{ "movdl $dst,[$constantaddress]\t! replicate2S($con)" %}
|
|
ins_encode %{
|
|
__ movdl($dst$$XMMRegister, $constantaddress(replicate4_imm($con$$constant, 2)));
|
|
%}
|
|
ins_pipe( fpu_reg_reg );
|
|
%}
|
|
|
|
instruct Repl4S_imm(vecD dst, immI con) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (ReplicateS con));
|
|
format %{ "movq $dst,[$constantaddress]\t! replicate4S($con)" %}
|
|
ins_encode %{
|
|
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2)));
|
|
%}
|
|
ins_pipe( fpu_reg_reg );
|
|
%}
|
|
|
|
instruct Repl8S_imm(vecX dst, immI con) %{
|
|
predicate(n->as_Vector()->length() == 8);
|
|
match(Set dst (ReplicateS con));
|
|
format %{ "movq $dst,[$constantaddress]\n\t"
|
|
"punpcklqdq $dst,$dst\t! replicate8S($con)" %}
|
|
ins_encode %{
|
|
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2)));
|
|
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct Repl16S_imm(vecY dst, immI con) %{
|
|
predicate(n->as_Vector()->length() == 16);
|
|
match(Set dst (ReplicateS con));
|
|
format %{ "movq $dst,[$constantaddress]\n\t"
|
|
"punpcklqdq $dst,$dst\n\t"
|
|
"vinserti128h $dst,$dst,$dst\t! replicate16S($con)" %}
|
|
ins_encode %{
|
|
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 2)));
|
|
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
|
|
__ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Replicate char/short (2 byte) scalar zero to be vector
|
|
instruct Repl2S_zero(vecS dst, immI0 zero) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (ReplicateS zero));
|
|
format %{ "pxor $dst,$dst\t! replicate2S zero" %}
|
|
ins_encode %{
|
|
__ pxor($dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( fpu_reg_reg );
|
|
%}
|
|
|
|
instruct Repl4S_zero(vecD dst, immI0 zero) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (ReplicateS zero));
|
|
format %{ "pxor $dst,$dst\t! replicate4S zero" %}
|
|
ins_encode %{
|
|
__ pxor($dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( fpu_reg_reg );
|
|
%}
|
|
|
|
instruct Repl8S_zero(vecX dst, immI0 zero) %{
|
|
predicate(n->as_Vector()->length() == 8);
|
|
match(Set dst (ReplicateS zero));
|
|
format %{ "pxor $dst,$dst\t! replicate8S zero" %}
|
|
ins_encode %{
|
|
__ pxor($dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( fpu_reg_reg );
|
|
%}
|
|
|
|
instruct Repl16S_zero(vecY dst, immI0 zero) %{
|
|
predicate(n->as_Vector()->length() == 16);
|
|
match(Set dst (ReplicateS zero));
|
|
format %{ "vpxor $dst,$dst,$dst\t! replicate16S zero" %}
|
|
ins_encode %{
|
|
// Use vxorpd since AVX does not have vpxor for 256-bit (AVX2 will have it).
|
|
bool vector256 = true;
|
|
__ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( fpu_reg_reg );
|
|
%}
|
|
|
|
// Replicate integer (4 byte) scalar to be vector
|
|
instruct Repl2I(vecD dst, rRegI src) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (ReplicateI src));
|
|
format %{ "movd $dst,$src\n\t"
|
|
"pshufd $dst,$dst,0x00\t! replicate2I" %}
|
|
ins_encode %{
|
|
__ movdl($dst$$XMMRegister, $src$$Register);
|
|
__ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
|
|
%}
|
|
ins_pipe( fpu_reg_reg );
|
|
%}
|
|
|
|
instruct Repl4I(vecX dst, rRegI src) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (ReplicateI src));
|
|
format %{ "movd $dst,$src\n\t"
|
|
"pshufd $dst,$dst,0x00\t! replicate4I" %}
|
|
ins_encode %{
|
|
__ movdl($dst$$XMMRegister, $src$$Register);
|
|
__ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct Repl8I(vecY dst, rRegI src) %{
|
|
predicate(n->as_Vector()->length() == 8);
|
|
match(Set dst (ReplicateI src));
|
|
format %{ "movd $dst,$src\n\t"
|
|
"pshufd $dst,$dst,0x00\n\t"
|
|
"vinserti128h $dst,$dst,$dst\t! replicate8I" %}
|
|
ins_encode %{
|
|
__ movdl($dst$$XMMRegister, $src$$Register);
|
|
__ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
|
|
__ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Replicate integer (4 byte) scalar immediate to be vector by loading from const table.
|
|
instruct Repl2I_imm(vecD dst, immI con) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (ReplicateI con));
|
|
format %{ "movq $dst,[$constantaddress]\t! replicate2I($con)" %}
|
|
ins_encode %{
|
|
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4)));
|
|
%}
|
|
ins_pipe( fpu_reg_reg );
|
|
%}
|
|
|
|
instruct Repl4I_imm(vecX dst, immI con) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (ReplicateI con));
|
|
format %{ "movq $dst,[$constantaddress]\t! replicate4I($con)\n\t"
|
|
"punpcklqdq $dst,$dst" %}
|
|
ins_encode %{
|
|
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4)));
|
|
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct Repl8I_imm(vecY dst, immI con) %{
|
|
predicate(n->as_Vector()->length() == 8);
|
|
match(Set dst (ReplicateI con));
|
|
format %{ "movq $dst,[$constantaddress]\t! replicate8I($con)\n\t"
|
|
"punpcklqdq $dst,$dst\n\t"
|
|
"vinserti128h $dst,$dst,$dst" %}
|
|
ins_encode %{
|
|
__ movq($dst$$XMMRegister, $constantaddress(replicate8_imm($con$$constant, 4)));
|
|
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
|
|
__ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Integer could be loaded into xmm register directly from memory.
|
|
instruct Repl2I_mem(vecD dst, memory mem) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (ReplicateI (LoadI mem)));
|
|
format %{ "movd $dst,$mem\n\t"
|
|
"pshufd $dst,$dst,0x00\t! replicate2I" %}
|
|
ins_encode %{
|
|
__ movdl($dst$$XMMRegister, $mem$$Address);
|
|
__ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
|
|
%}
|
|
ins_pipe( fpu_reg_reg );
|
|
%}
|
|
|
|
instruct Repl4I_mem(vecX dst, memory mem) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (ReplicateI (LoadI mem)));
|
|
format %{ "movd $dst,$mem\n\t"
|
|
"pshufd $dst,$dst,0x00\t! replicate4I" %}
|
|
ins_encode %{
|
|
__ movdl($dst$$XMMRegister, $mem$$Address);
|
|
__ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct Repl8I_mem(vecY dst, memory mem) %{
|
|
predicate(n->as_Vector()->length() == 8);
|
|
match(Set dst (ReplicateI (LoadI mem)));
|
|
format %{ "movd $dst,$mem\n\t"
|
|
"pshufd $dst,$dst,0x00\n\t"
|
|
"vinserti128h $dst,$dst,$dst\t! replicate8I" %}
|
|
ins_encode %{
|
|
__ movdl($dst$$XMMRegister, $mem$$Address);
|
|
__ pshufd($dst$$XMMRegister, $dst$$XMMRegister, 0x00);
|
|
__ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Replicate integer (4 byte) scalar zero to be vector
|
|
instruct Repl2I_zero(vecD dst, immI0 zero) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (ReplicateI zero));
|
|
format %{ "pxor $dst,$dst\t! replicate2I" %}
|
|
ins_encode %{
|
|
__ pxor($dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( fpu_reg_reg );
|
|
%}
|
|
|
|
instruct Repl4I_zero(vecX dst, immI0 zero) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (ReplicateI zero));
|
|
format %{ "pxor $dst,$dst\t! replicate4I zero)" %}
|
|
ins_encode %{
|
|
__ pxor($dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( fpu_reg_reg );
|
|
%}
|
|
|
|
instruct Repl8I_zero(vecY dst, immI0 zero) %{
|
|
predicate(n->as_Vector()->length() == 8);
|
|
match(Set dst (ReplicateI zero));
|
|
format %{ "vpxor $dst,$dst,$dst\t! replicate8I zero" %}
|
|
ins_encode %{
|
|
// Use vxorpd since AVX does not have vpxor for 256-bit (AVX2 will have it).
|
|
bool vector256 = true;
|
|
__ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( fpu_reg_reg );
|
|
%}
|
|
|
|
// Replicate long (8 byte) scalar to be vector
|
|
#ifdef _LP64
|
|
instruct Repl2L(vecX dst, rRegL src) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (ReplicateL src));
|
|
format %{ "movdq $dst,$src\n\t"
|
|
"punpcklqdq $dst,$dst\t! replicate2L" %}
|
|
ins_encode %{
|
|
__ movdq($dst$$XMMRegister, $src$$Register);
|
|
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct Repl4L(vecY dst, rRegL src) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (ReplicateL src));
|
|
format %{ "movdq $dst,$src\n\t"
|
|
"punpcklqdq $dst,$dst\n\t"
|
|
"vinserti128h $dst,$dst,$dst\t! replicate4L" %}
|
|
ins_encode %{
|
|
__ movdq($dst$$XMMRegister, $src$$Register);
|
|
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
|
|
__ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
#else // _LP64
|
|
instruct Repl2L(vecX dst, eRegL src, regD tmp) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (ReplicateL src));
|
|
effect(TEMP dst, USE src, TEMP tmp);
|
|
format %{ "movdl $dst,$src.lo\n\t"
|
|
"movdl $tmp,$src.hi\n\t"
|
|
"punpckldq $dst,$tmp\n\t"
|
|
"punpcklqdq $dst,$dst\t! replicate2L"%}
|
|
ins_encode %{
|
|
__ movdl($dst$$XMMRegister, $src$$Register);
|
|
__ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register));
|
|
__ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister);
|
|
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct Repl4L(vecY dst, eRegL src, regD tmp) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (ReplicateL src));
|
|
effect(TEMP dst, USE src, TEMP tmp);
|
|
format %{ "movdl $dst,$src.lo\n\t"
|
|
"movdl $tmp,$src.hi\n\t"
|
|
"punpckldq $dst,$tmp\n\t"
|
|
"punpcklqdq $dst,$dst\n\t"
|
|
"vinserti128h $dst,$dst,$dst\t! replicate4L" %}
|
|
ins_encode %{
|
|
__ movdl($dst$$XMMRegister, $src$$Register);
|
|
__ movdl($tmp$$XMMRegister, HIGH_FROM_LOW($src$$Register));
|
|
__ punpckldq($dst$$XMMRegister, $tmp$$XMMRegister);
|
|
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
|
|
__ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
#endif // _LP64
|
|
|
|
// Replicate long (8 byte) scalar immediate to be vector by loading from const table.
|
|
instruct Repl2L_imm(vecX dst, immL con) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (ReplicateL con));
|
|
format %{ "movq $dst,[$constantaddress]\n\t"
|
|
"punpcklqdq $dst,$dst\t! replicate2L($con)" %}
|
|
ins_encode %{
|
|
__ movq($dst$$XMMRegister, $constantaddress($con));
|
|
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct Repl4L_imm(vecY dst, immL con) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (ReplicateL con));
|
|
format %{ "movq $dst,[$constantaddress]\n\t"
|
|
"punpcklqdq $dst,$dst\n\t"
|
|
"vinserti128h $dst,$dst,$dst\t! replicate4L($con)" %}
|
|
ins_encode %{
|
|
__ movq($dst$$XMMRegister, $constantaddress($con));
|
|
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
|
|
__ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Long could be loaded into xmm register directly from memory.
|
|
instruct Repl2L_mem(vecX dst, memory mem) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (ReplicateL (LoadL mem)));
|
|
format %{ "movq $dst,$mem\n\t"
|
|
"punpcklqdq $dst,$dst\t! replicate2L" %}
|
|
ins_encode %{
|
|
__ movq($dst$$XMMRegister, $mem$$Address);
|
|
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct Repl4L_mem(vecY dst, memory mem) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (ReplicateL (LoadL mem)));
|
|
format %{ "movq $dst,$mem\n\t"
|
|
"punpcklqdq $dst,$dst\n\t"
|
|
"vinserti128h $dst,$dst,$dst\t! replicate4L" %}
|
|
ins_encode %{
|
|
__ movq($dst$$XMMRegister, $mem$$Address);
|
|
__ punpcklqdq($dst$$XMMRegister, $dst$$XMMRegister);
|
|
__ vinserti128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Replicate long (8 byte) scalar zero to be vector
|
|
instruct Repl2L_zero(vecX dst, immL0 zero) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (ReplicateL zero));
|
|
format %{ "pxor $dst,$dst\t! replicate2L zero" %}
|
|
ins_encode %{
|
|
__ pxor($dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( fpu_reg_reg );
|
|
%}
|
|
|
|
instruct Repl4L_zero(vecY dst, immL0 zero) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (ReplicateL zero));
|
|
format %{ "vpxor $dst,$dst,$dst\t! replicate4L zero" %}
|
|
ins_encode %{
|
|
// Use vxorpd since AVX does not have vpxor for 256-bit (AVX2 will have it).
|
|
bool vector256 = true;
|
|
__ vpxor($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( fpu_reg_reg );
|
|
%}
|
|
|
|
// Replicate float (4 byte) scalar to be vector
|
|
instruct Repl2F(vecD dst, regF src) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (ReplicateF src));
|
|
format %{ "pshufd $dst,$dst,0x00\t! replicate2F" %}
|
|
ins_encode %{
|
|
__ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00);
|
|
%}
|
|
ins_pipe( fpu_reg_reg );
|
|
%}
|
|
|
|
instruct Repl4F(vecX dst, regF src) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (ReplicateF src));
|
|
format %{ "pshufd $dst,$dst,0x00\t! replicate4F" %}
|
|
ins_encode %{
|
|
__ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct Repl8F(vecY dst, regF src) %{
|
|
predicate(n->as_Vector()->length() == 8);
|
|
match(Set dst (ReplicateF src));
|
|
format %{ "pshufd $dst,$src,0x00\n\t"
|
|
"vinsertf128h $dst,$dst,$dst\t! replicate8F" %}
|
|
ins_encode %{
|
|
__ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x00);
|
|
__ vinsertf128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Replicate float (4 byte) scalar zero to be vector
|
|
instruct Repl2F_zero(vecD dst, immF0 zero) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (ReplicateF zero));
|
|
format %{ "xorps $dst,$dst\t! replicate2F zero" %}
|
|
ins_encode %{
|
|
__ xorps($dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( fpu_reg_reg );
|
|
%}
|
|
|
|
instruct Repl4F_zero(vecX dst, immF0 zero) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (ReplicateF zero));
|
|
format %{ "xorps $dst,$dst\t! replicate4F zero" %}
|
|
ins_encode %{
|
|
__ xorps($dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( fpu_reg_reg );
|
|
%}
|
|
|
|
instruct Repl8F_zero(vecY dst, immF0 zero) %{
|
|
predicate(n->as_Vector()->length() == 8);
|
|
match(Set dst (ReplicateF zero));
|
|
format %{ "vxorps $dst,$dst,$dst\t! replicate8F zero" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vxorps($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( fpu_reg_reg );
|
|
%}
|
|
|
|
// Replicate double (8 bytes) scalar to be vector
|
|
instruct Repl2D(vecX dst, regD src) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (ReplicateD src));
|
|
format %{ "pshufd $dst,$src,0x44\t! replicate2D" %}
|
|
ins_encode %{
|
|
__ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x44);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct Repl4D(vecY dst, regD src) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (ReplicateD src));
|
|
format %{ "pshufd $dst,$src,0x44\n\t"
|
|
"vinsertf128h $dst,$dst,$dst\t! replicate4D" %}
|
|
ins_encode %{
|
|
__ pshufd($dst$$XMMRegister, $src$$XMMRegister, 0x44);
|
|
__ vinsertf128h($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Replicate double (8 byte) scalar zero to be vector
|
|
instruct Repl2D_zero(vecX dst, immD0 zero) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (ReplicateD zero));
|
|
format %{ "xorpd $dst,$dst\t! replicate2D zero" %}
|
|
ins_encode %{
|
|
__ xorpd($dst$$XMMRegister, $dst$$XMMRegister);
|
|
%}
|
|
ins_pipe( fpu_reg_reg );
|
|
%}
|
|
|
|
instruct Repl4D_zero(vecY dst, immD0 zero) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (ReplicateD zero));
|
|
format %{ "vxorpd $dst,$dst,$dst,vect256\t! replicate4D zero" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vxorpd($dst$$XMMRegister, $dst$$XMMRegister, $dst$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( fpu_reg_reg );
|
|
%}
|
|
|
|
// ====================VECTOR ARITHMETIC=======================================
|
|
|
|
// --------------------------------- ADD --------------------------------------
|
|
|
|
// Bytes vector add
|
|
instruct vadd4B(vecS dst, vecS src) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (AddVB dst src));
|
|
format %{ "paddb $dst,$src\t! add packed4B" %}
|
|
ins_encode %{
|
|
__ paddb($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd4B_reg(vecS dst, vecS src1, vecS src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (AddVB src1 src2));
|
|
format %{ "vpaddb $dst,$src1,$src2\t! add packed4B" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpaddb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd8B(vecD dst, vecD src) %{
|
|
predicate(n->as_Vector()->length() == 8);
|
|
match(Set dst (AddVB dst src));
|
|
format %{ "paddb $dst,$src\t! add packed8B" %}
|
|
ins_encode %{
|
|
__ paddb($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd8B_reg(vecD dst, vecD src1, vecD src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
|
|
match(Set dst (AddVB src1 src2));
|
|
format %{ "vpaddb $dst,$src1,$src2\t! add packed8B" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpaddb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd16B(vecX dst, vecX src) %{
|
|
predicate(n->as_Vector()->length() == 16);
|
|
match(Set dst (AddVB dst src));
|
|
format %{ "paddb $dst,$src\t! add packed16B" %}
|
|
ins_encode %{
|
|
__ paddb($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd16B_reg(vecX dst, vecX src1, vecX src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 16);
|
|
match(Set dst (AddVB src1 src2));
|
|
format %{ "vpaddb $dst,$src1,$src2\t! add packed16B" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpaddb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd16B_mem(vecX dst, vecX src, memory mem) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 16);
|
|
match(Set dst (AddVB src (LoadVector mem)));
|
|
format %{ "vpaddb $dst,$src,$mem\t! add packed16B" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpaddb($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd32B_reg(vecY dst, vecY src1, vecY src2) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 32);
|
|
match(Set dst (AddVB src1 src2));
|
|
format %{ "vpaddb $dst,$src1,$src2\t! add packed32B" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpaddb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd32B_mem(vecY dst, vecY src, memory mem) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 32);
|
|
match(Set dst (AddVB src (LoadVector mem)));
|
|
format %{ "vpaddb $dst,$src,$mem\t! add packed32B" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpaddb($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Shorts/Chars vector add
|
|
instruct vadd2S(vecS dst, vecS src) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (AddVS dst src));
|
|
format %{ "paddw $dst,$src\t! add packed2S" %}
|
|
ins_encode %{
|
|
__ paddw($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd2S_reg(vecS dst, vecS src1, vecS src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (AddVS src1 src2));
|
|
format %{ "vpaddw $dst,$src1,$src2\t! add packed2S" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpaddw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd4S(vecD dst, vecD src) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (AddVS dst src));
|
|
format %{ "paddw $dst,$src\t! add packed4S" %}
|
|
ins_encode %{
|
|
__ paddw($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd4S_reg(vecD dst, vecD src1, vecD src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (AddVS src1 src2));
|
|
format %{ "vpaddw $dst,$src1,$src2\t! add packed4S" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpaddw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd8S(vecX dst, vecX src) %{
|
|
predicate(n->as_Vector()->length() == 8);
|
|
match(Set dst (AddVS dst src));
|
|
format %{ "paddw $dst,$src\t! add packed8S" %}
|
|
ins_encode %{
|
|
__ paddw($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd8S_reg(vecX dst, vecX src1, vecX src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
|
|
match(Set dst (AddVS src1 src2));
|
|
format %{ "vpaddw $dst,$src1,$src2\t! add packed8S" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpaddw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd8S_mem(vecX dst, vecX src, memory mem) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
|
|
match(Set dst (AddVS src (LoadVector mem)));
|
|
format %{ "vpaddw $dst,$src,$mem\t! add packed8S" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpaddw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd16S_reg(vecY dst, vecY src1, vecY src2) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 16);
|
|
match(Set dst (AddVS src1 src2));
|
|
format %{ "vpaddw $dst,$src1,$src2\t! add packed16S" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpaddw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd16S_mem(vecY dst, vecY src, memory mem) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 16);
|
|
match(Set dst (AddVS src (LoadVector mem)));
|
|
format %{ "vpaddw $dst,$src,$mem\t! add packed16S" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpaddw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Integers vector add
|
|
instruct vadd2I(vecD dst, vecD src) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (AddVI dst src));
|
|
format %{ "paddd $dst,$src\t! add packed2I" %}
|
|
ins_encode %{
|
|
__ paddd($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd2I_reg(vecD dst, vecD src1, vecD src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (AddVI src1 src2));
|
|
format %{ "vpaddd $dst,$src1,$src2\t! add packed2I" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpaddd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd4I(vecX dst, vecX src) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (AddVI dst src));
|
|
format %{ "paddd $dst,$src\t! add packed4I" %}
|
|
ins_encode %{
|
|
__ paddd($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd4I_reg(vecX dst, vecX src1, vecX src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (AddVI src1 src2));
|
|
format %{ "vpaddd $dst,$src1,$src2\t! add packed4I" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpaddd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd4I_mem(vecX dst, vecX src, memory mem) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (AddVI src (LoadVector mem)));
|
|
format %{ "vpaddd $dst,$src,$mem\t! add packed4I" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpaddd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd8I_reg(vecY dst, vecY src1, vecY src2) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 8);
|
|
match(Set dst (AddVI src1 src2));
|
|
format %{ "vpaddd $dst,$src1,$src2\t! add packed8I" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpaddd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd8I_mem(vecY dst, vecY src, memory mem) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 8);
|
|
match(Set dst (AddVI src (LoadVector mem)));
|
|
format %{ "vpaddd $dst,$src,$mem\t! add packed8I" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpaddd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Longs vector add
|
|
instruct vadd2L(vecX dst, vecX src) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (AddVL dst src));
|
|
format %{ "paddq $dst,$src\t! add packed2L" %}
|
|
ins_encode %{
|
|
__ paddq($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd2L_reg(vecX dst, vecX src1, vecX src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (AddVL src1 src2));
|
|
format %{ "vpaddq $dst,$src1,$src2\t! add packed2L" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpaddq($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd2L_mem(vecX dst, vecX src, memory mem) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (AddVL src (LoadVector mem)));
|
|
format %{ "vpaddq $dst,$src,$mem\t! add packed2L" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpaddq($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd4L_reg(vecY dst, vecY src1, vecY src2) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 4);
|
|
match(Set dst (AddVL src1 src2));
|
|
format %{ "vpaddq $dst,$src1,$src2\t! add packed4L" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpaddq($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd4L_mem(vecY dst, vecY src, memory mem) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 4);
|
|
match(Set dst (AddVL src (LoadVector mem)));
|
|
format %{ "vpaddq $dst,$src,$mem\t! add packed4L" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpaddq($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Floats vector add
|
|
instruct vadd2F(vecD dst, vecD src) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (AddVF dst src));
|
|
format %{ "addps $dst,$src\t! add packed2F" %}
|
|
ins_encode %{
|
|
__ addps($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd2F_reg(vecD dst, vecD src1, vecD src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (AddVF src1 src2));
|
|
format %{ "vaddps $dst,$src1,$src2\t! add packed2F" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vaddps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd4F(vecX dst, vecX src) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (AddVF dst src));
|
|
format %{ "addps $dst,$src\t! add packed4F" %}
|
|
ins_encode %{
|
|
__ addps($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd4F_reg(vecX dst, vecX src1, vecX src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (AddVF src1 src2));
|
|
format %{ "vaddps $dst,$src1,$src2\t! add packed4F" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vaddps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd4F_mem(vecX dst, vecX src, memory mem) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (AddVF src (LoadVector mem)));
|
|
format %{ "vaddps $dst,$src,$mem\t! add packed4F" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vaddps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd8F_reg(vecY dst, vecY src1, vecY src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
|
|
match(Set dst (AddVF src1 src2));
|
|
format %{ "vaddps $dst,$src1,$src2\t! add packed8F" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vaddps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd8F_mem(vecY dst, vecY src, memory mem) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
|
|
match(Set dst (AddVF src (LoadVector mem)));
|
|
format %{ "vaddps $dst,$src,$mem\t! add packed8F" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vaddps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Doubles vector add
|
|
instruct vadd2D(vecX dst, vecX src) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (AddVD dst src));
|
|
format %{ "addpd $dst,$src\t! add packed2D" %}
|
|
ins_encode %{
|
|
__ addpd($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd2D_reg(vecX dst, vecX src1, vecX src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (AddVD src1 src2));
|
|
format %{ "vaddpd $dst,$src1,$src2\t! add packed2D" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vaddpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd2D_mem(vecX dst, vecX src, memory mem) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (AddVD src (LoadVector mem)));
|
|
format %{ "vaddpd $dst,$src,$mem\t! add packed2D" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vaddpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd4D_reg(vecY dst, vecY src1, vecY src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (AddVD src1 src2));
|
|
format %{ "vaddpd $dst,$src1,$src2\t! add packed4D" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vaddpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vadd4D_mem(vecY dst, vecY src, memory mem) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (AddVD src (LoadVector mem)));
|
|
format %{ "vaddpd $dst,$src,$mem\t! add packed4D" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vaddpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// --------------------------------- SUB --------------------------------------
|
|
|
|
// Bytes vector sub
|
|
instruct vsub4B(vecS dst, vecS src) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (SubVB dst src));
|
|
format %{ "psubb $dst,$src\t! sub packed4B" %}
|
|
ins_encode %{
|
|
__ psubb($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub4B_reg(vecS dst, vecS src1, vecS src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (SubVB src1 src2));
|
|
format %{ "vpsubb $dst,$src1,$src2\t! sub packed4B" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsubb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub8B(vecD dst, vecD src) %{
|
|
predicate(n->as_Vector()->length() == 8);
|
|
match(Set dst (SubVB dst src));
|
|
format %{ "psubb $dst,$src\t! sub packed8B" %}
|
|
ins_encode %{
|
|
__ psubb($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub8B_reg(vecD dst, vecD src1, vecD src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
|
|
match(Set dst (SubVB src1 src2));
|
|
format %{ "vpsubb $dst,$src1,$src2\t! sub packed8B" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsubb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub16B(vecX dst, vecX src) %{
|
|
predicate(n->as_Vector()->length() == 16);
|
|
match(Set dst (SubVB dst src));
|
|
format %{ "psubb $dst,$src\t! sub packed16B" %}
|
|
ins_encode %{
|
|
__ psubb($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub16B_reg(vecX dst, vecX src1, vecX src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 16);
|
|
match(Set dst (SubVB src1 src2));
|
|
format %{ "vpsubb $dst,$src1,$src2\t! sub packed16B" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsubb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub16B_mem(vecX dst, vecX src, memory mem) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 16);
|
|
match(Set dst (SubVB src (LoadVector mem)));
|
|
format %{ "vpsubb $dst,$src,$mem\t! sub packed16B" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsubb($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub32B_reg(vecY dst, vecY src1, vecY src2) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 32);
|
|
match(Set dst (SubVB src1 src2));
|
|
format %{ "vpsubb $dst,$src1,$src2\t! sub packed32B" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpsubb($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub32B_mem(vecY dst, vecY src, memory mem) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 32);
|
|
match(Set dst (SubVB src (LoadVector mem)));
|
|
format %{ "vpsubb $dst,$src,$mem\t! sub packed32B" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpsubb($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Shorts/Chars vector sub
|
|
instruct vsub2S(vecS dst, vecS src) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (SubVS dst src));
|
|
format %{ "psubw $dst,$src\t! sub packed2S" %}
|
|
ins_encode %{
|
|
__ psubw($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub2S_reg(vecS dst, vecS src1, vecS src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (SubVS src1 src2));
|
|
format %{ "vpsubw $dst,$src1,$src2\t! sub packed2S" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsubw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub4S(vecD dst, vecD src) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (SubVS dst src));
|
|
format %{ "psubw $dst,$src\t! sub packed4S" %}
|
|
ins_encode %{
|
|
__ psubw($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub4S_reg(vecD dst, vecD src1, vecD src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (SubVS src1 src2));
|
|
format %{ "vpsubw $dst,$src1,$src2\t! sub packed4S" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsubw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub8S(vecX dst, vecX src) %{
|
|
predicate(n->as_Vector()->length() == 8);
|
|
match(Set dst (SubVS dst src));
|
|
format %{ "psubw $dst,$src\t! sub packed8S" %}
|
|
ins_encode %{
|
|
__ psubw($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub8S_reg(vecX dst, vecX src1, vecX src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
|
|
match(Set dst (SubVS src1 src2));
|
|
format %{ "vpsubw $dst,$src1,$src2\t! sub packed8S" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsubw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub8S_mem(vecX dst, vecX src, memory mem) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
|
|
match(Set dst (SubVS src (LoadVector mem)));
|
|
format %{ "vpsubw $dst,$src,$mem\t! sub packed8S" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsubw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub16S_reg(vecY dst, vecY src1, vecY src2) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 16);
|
|
match(Set dst (SubVS src1 src2));
|
|
format %{ "vpsubw $dst,$src1,$src2\t! sub packed16S" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpsubw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub16S_mem(vecY dst, vecY src, memory mem) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 16);
|
|
match(Set dst (SubVS src (LoadVector mem)));
|
|
format %{ "vpsubw $dst,$src,$mem\t! sub packed16S" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpsubw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Integers vector sub
|
|
instruct vsub2I(vecD dst, vecD src) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (SubVI dst src));
|
|
format %{ "psubd $dst,$src\t! sub packed2I" %}
|
|
ins_encode %{
|
|
__ psubd($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub2I_reg(vecD dst, vecD src1, vecD src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (SubVI src1 src2));
|
|
format %{ "vpsubd $dst,$src1,$src2\t! sub packed2I" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsubd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub4I(vecX dst, vecX src) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (SubVI dst src));
|
|
format %{ "psubd $dst,$src\t! sub packed4I" %}
|
|
ins_encode %{
|
|
__ psubd($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub4I_reg(vecX dst, vecX src1, vecX src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (SubVI src1 src2));
|
|
format %{ "vpsubd $dst,$src1,$src2\t! sub packed4I" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsubd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub4I_mem(vecX dst, vecX src, memory mem) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (SubVI src (LoadVector mem)));
|
|
format %{ "vpsubd $dst,$src,$mem\t! sub packed4I" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsubd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub8I_reg(vecY dst, vecY src1, vecY src2) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 8);
|
|
match(Set dst (SubVI src1 src2));
|
|
format %{ "vpsubd $dst,$src1,$src2\t! sub packed8I" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpsubd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub8I_mem(vecY dst, vecY src, memory mem) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 8);
|
|
match(Set dst (SubVI src (LoadVector mem)));
|
|
format %{ "vpsubd $dst,$src,$mem\t! sub packed8I" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpsubd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Longs vector sub
|
|
instruct vsub2L(vecX dst, vecX src) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (SubVL dst src));
|
|
format %{ "psubq $dst,$src\t! sub packed2L" %}
|
|
ins_encode %{
|
|
__ psubq($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub2L_reg(vecX dst, vecX src1, vecX src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (SubVL src1 src2));
|
|
format %{ "vpsubq $dst,$src1,$src2\t! sub packed2L" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsubq($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub2L_mem(vecX dst, vecX src, memory mem) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (SubVL src (LoadVector mem)));
|
|
format %{ "vpsubq $dst,$src,$mem\t! sub packed2L" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsubq($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub4L_reg(vecY dst, vecY src1, vecY src2) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 4);
|
|
match(Set dst (SubVL src1 src2));
|
|
format %{ "vpsubq $dst,$src1,$src2\t! sub packed4L" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpsubq($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub4L_mem(vecY dst, vecY src, memory mem) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 4);
|
|
match(Set dst (SubVL src (LoadVector mem)));
|
|
format %{ "vpsubq $dst,$src,$mem\t! sub packed4L" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpsubq($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Floats vector sub
|
|
instruct vsub2F(vecD dst, vecD src) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (SubVF dst src));
|
|
format %{ "subps $dst,$src\t! sub packed2F" %}
|
|
ins_encode %{
|
|
__ subps($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub2F_reg(vecD dst, vecD src1, vecD src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (SubVF src1 src2));
|
|
format %{ "vsubps $dst,$src1,$src2\t! sub packed2F" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vsubps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub4F(vecX dst, vecX src) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (SubVF dst src));
|
|
format %{ "subps $dst,$src\t! sub packed4F" %}
|
|
ins_encode %{
|
|
__ subps($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub4F_reg(vecX dst, vecX src1, vecX src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (SubVF src1 src2));
|
|
format %{ "vsubps $dst,$src1,$src2\t! sub packed4F" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vsubps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub4F_mem(vecX dst, vecX src, memory mem) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (SubVF src (LoadVector mem)));
|
|
format %{ "vsubps $dst,$src,$mem\t! sub packed4F" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vsubps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub8F_reg(vecY dst, vecY src1, vecY src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
|
|
match(Set dst (SubVF src1 src2));
|
|
format %{ "vsubps $dst,$src1,$src2\t! sub packed8F" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vsubps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub8F_mem(vecY dst, vecY src, memory mem) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
|
|
match(Set dst (SubVF src (LoadVector mem)));
|
|
format %{ "vsubps $dst,$src,$mem\t! sub packed8F" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vsubps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Doubles vector sub
|
|
instruct vsub2D(vecX dst, vecX src) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (SubVD dst src));
|
|
format %{ "subpd $dst,$src\t! sub packed2D" %}
|
|
ins_encode %{
|
|
__ subpd($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub2D_reg(vecX dst, vecX src1, vecX src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (SubVD src1 src2));
|
|
format %{ "vsubpd $dst,$src1,$src2\t! sub packed2D" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vsubpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub2D_mem(vecX dst, vecX src, memory mem) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (SubVD src (LoadVector mem)));
|
|
format %{ "vsubpd $dst,$src,$mem\t! sub packed2D" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vsubpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub4D_reg(vecY dst, vecY src1, vecY src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (SubVD src1 src2));
|
|
format %{ "vsubpd $dst,$src1,$src2\t! sub packed4D" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vsubpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsub4D_mem(vecY dst, vecY src, memory mem) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (SubVD src (LoadVector mem)));
|
|
format %{ "vsubpd $dst,$src,$mem\t! sub packed4D" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vsubpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// --------------------------------- MUL --------------------------------------
|
|
|
|
// Shorts/Chars vector mul
|
|
instruct vmul2S(vecS dst, vecS src) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (MulVS dst src));
|
|
format %{ "pmullw $dst,$src\t! mul packed2S" %}
|
|
ins_encode %{
|
|
__ pmullw($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vmul2S_reg(vecS dst, vecS src1, vecS src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (MulVS src1 src2));
|
|
format %{ "vpmullw $dst,$src1,$src2\t! mul packed2S" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpmullw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vmul4S(vecD dst, vecD src) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (MulVS dst src));
|
|
format %{ "pmullw $dst,$src\t! mul packed4S" %}
|
|
ins_encode %{
|
|
__ pmullw($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vmul4S_reg(vecD dst, vecD src1, vecD src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (MulVS src1 src2));
|
|
format %{ "vpmullw $dst,$src1,$src2\t! mul packed4S" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpmullw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vmul8S(vecX dst, vecX src) %{
|
|
predicate(n->as_Vector()->length() == 8);
|
|
match(Set dst (MulVS dst src));
|
|
format %{ "pmullw $dst,$src\t! mul packed8S" %}
|
|
ins_encode %{
|
|
__ pmullw($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vmul8S_reg(vecX dst, vecX src1, vecX src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
|
|
match(Set dst (MulVS src1 src2));
|
|
format %{ "vpmullw $dst,$src1,$src2\t! mul packed8S" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpmullw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vmul8S_mem(vecX dst, vecX src, memory mem) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
|
|
match(Set dst (MulVS src (LoadVector mem)));
|
|
format %{ "vpmullw $dst,$src,$mem\t! mul packed8S" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpmullw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vmul16S_reg(vecY dst, vecY src1, vecY src2) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 16);
|
|
match(Set dst (MulVS src1 src2));
|
|
format %{ "vpmullw $dst,$src1,$src2\t! mul packed16S" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpmullw($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vmul16S_mem(vecY dst, vecY src, memory mem) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 16);
|
|
match(Set dst (MulVS src (LoadVector mem)));
|
|
format %{ "vpmullw $dst,$src,$mem\t! mul packed16S" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpmullw($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Integers vector mul (sse4_1)
|
|
instruct vmul2I(vecD dst, vecD src) %{
|
|
predicate(UseSSE > 3 && n->as_Vector()->length() == 2);
|
|
match(Set dst (MulVI dst src));
|
|
format %{ "pmulld $dst,$src\t! mul packed2I" %}
|
|
ins_encode %{
|
|
__ pmulld($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vmul2I_reg(vecD dst, vecD src1, vecD src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (MulVI src1 src2));
|
|
format %{ "vpmulld $dst,$src1,$src2\t! mul packed2I" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpmulld($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vmul4I(vecX dst, vecX src) %{
|
|
predicate(UseSSE > 3 && n->as_Vector()->length() == 4);
|
|
match(Set dst (MulVI dst src));
|
|
format %{ "pmulld $dst,$src\t! mul packed4I" %}
|
|
ins_encode %{
|
|
__ pmulld($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vmul4I_reg(vecX dst, vecX src1, vecX src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (MulVI src1 src2));
|
|
format %{ "vpmulld $dst,$src1,$src2\t! mul packed4I" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpmulld($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vmul4I_mem(vecX dst, vecX src, memory mem) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (MulVI src (LoadVector mem)));
|
|
format %{ "vpmulld $dst,$src,$mem\t! mul packed4I" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpmulld($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vmul8I_reg(vecY dst, vecY src1, vecY src2) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 8);
|
|
match(Set dst (MulVI src1 src2));
|
|
format %{ "vpmulld $dst,$src1,$src2\t! mul packed8I" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpmulld($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vmul8I_mem(vecY dst, vecY src, memory mem) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 8);
|
|
match(Set dst (MulVI src (LoadVector mem)));
|
|
format %{ "vpmulld $dst,$src,$mem\t! mul packed8I" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpmulld($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Floats vector mul
|
|
instruct vmul2F(vecD dst, vecD src) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (MulVF dst src));
|
|
format %{ "mulps $dst,$src\t! mul packed2F" %}
|
|
ins_encode %{
|
|
__ mulps($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vmul2F_reg(vecD dst, vecD src1, vecD src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (MulVF src1 src2));
|
|
format %{ "vmulps $dst,$src1,$src2\t! mul packed2F" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vmulps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vmul4F(vecX dst, vecX src) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (MulVF dst src));
|
|
format %{ "mulps $dst,$src\t! mul packed4F" %}
|
|
ins_encode %{
|
|
__ mulps($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vmul4F_reg(vecX dst, vecX src1, vecX src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (MulVF src1 src2));
|
|
format %{ "vmulps $dst,$src1,$src2\t! mul packed4F" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vmulps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vmul4F_mem(vecX dst, vecX src, memory mem) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (MulVF src (LoadVector mem)));
|
|
format %{ "vmulps $dst,$src,$mem\t! mul packed4F" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vmulps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vmul8F_reg(vecY dst, vecY src1, vecY src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
|
|
match(Set dst (MulVF src1 src2));
|
|
format %{ "vmulps $dst,$src1,$src2\t! mul packed8F" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vmulps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vmul8F_mem(vecY dst, vecY src, memory mem) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
|
|
match(Set dst (MulVF src (LoadVector mem)));
|
|
format %{ "vmulps $dst,$src,$mem\t! mul packed8F" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vmulps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Doubles vector mul
|
|
instruct vmul2D(vecX dst, vecX src) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (MulVD dst src));
|
|
format %{ "mulpd $dst,$src\t! mul packed2D" %}
|
|
ins_encode %{
|
|
__ mulpd($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vmul2D_reg(vecX dst, vecX src1, vecX src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (MulVD src1 src2));
|
|
format %{ "vmulpd $dst,$src1,$src2\t! mul packed2D" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vmulpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vmul2D_mem(vecX dst, vecX src, memory mem) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (MulVD src (LoadVector mem)));
|
|
format %{ "vmulpd $dst,$src,$mem\t! mul packed2D" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vmulpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vmul4D_reg(vecY dst, vecY src1, vecY src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (MulVD src1 src2));
|
|
format %{ "vmulpd $dst,$src1,$src2\t! mul packed4D" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vmulpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vmul4D_mem(vecY dst, vecY src, memory mem) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (MulVD src (LoadVector mem)));
|
|
format %{ "vmulpd $dst,$src,$mem\t! mul packed4D" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vmulpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// --------------------------------- DIV --------------------------------------
|
|
|
|
// Floats vector div
|
|
instruct vdiv2F(vecD dst, vecD src) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (DivVF dst src));
|
|
format %{ "divps $dst,$src\t! div packed2F" %}
|
|
ins_encode %{
|
|
__ divps($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vdiv2F_reg(vecD dst, vecD src1, vecD src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (DivVF src1 src2));
|
|
format %{ "vdivps $dst,$src1,$src2\t! div packed2F" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vdivps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vdiv4F(vecX dst, vecX src) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (DivVF dst src));
|
|
format %{ "divps $dst,$src\t! div packed4F" %}
|
|
ins_encode %{
|
|
__ divps($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vdiv4F_reg(vecX dst, vecX src1, vecX src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (DivVF src1 src2));
|
|
format %{ "vdivps $dst,$src1,$src2\t! div packed4F" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vdivps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vdiv4F_mem(vecX dst, vecX src, memory mem) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (DivVF src (LoadVector mem)));
|
|
format %{ "vdivps $dst,$src,$mem\t! div packed4F" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vdivps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vdiv8F_reg(vecY dst, vecY src1, vecY src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
|
|
match(Set dst (DivVF src1 src2));
|
|
format %{ "vdivps $dst,$src1,$src2\t! div packed8F" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vdivps($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vdiv8F_mem(vecY dst, vecY src, memory mem) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
|
|
match(Set dst (DivVF src (LoadVector mem)));
|
|
format %{ "vdivps $dst,$src,$mem\t! div packed8F" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vdivps($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Doubles vector div
|
|
instruct vdiv2D(vecX dst, vecX src) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (DivVD dst src));
|
|
format %{ "divpd $dst,$src\t! div packed2D" %}
|
|
ins_encode %{
|
|
__ divpd($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vdiv2D_reg(vecX dst, vecX src1, vecX src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (DivVD src1 src2));
|
|
format %{ "vdivpd $dst,$src1,$src2\t! div packed2D" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vdivpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vdiv2D_mem(vecX dst, vecX src, memory mem) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (DivVD src (LoadVector mem)));
|
|
format %{ "vdivpd $dst,$src,$mem\t! div packed2D" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vdivpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vdiv4D_reg(vecY dst, vecY src1, vecY src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (DivVD src1 src2));
|
|
format %{ "vdivpd $dst,$src1,$src2\t! div packed4D" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vdivpd($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vdiv4D_mem(vecY dst, vecY src, memory mem) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (DivVD src (LoadVector mem)));
|
|
format %{ "vdivpd $dst,$src,$mem\t! div packed4D" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vdivpd($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// ------------------------------ Shift ---------------------------------------
|
|
|
|
// Left and right shift count vectors are the same on x86
|
|
// (only lowest bits of xmm reg are used for count).
|
|
instruct vshiftcnt(vecS dst, rRegI cnt) %{
|
|
match(Set dst (LShiftCntV cnt));
|
|
match(Set dst (RShiftCntV cnt));
|
|
format %{ "movd $dst,$cnt\t! load shift count" %}
|
|
ins_encode %{
|
|
__ movdl($dst$$XMMRegister, $cnt$$Register);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// ------------------------------ LeftShift -----------------------------------
|
|
|
|
// Shorts/Chars vector left shift
|
|
instruct vsll2S(vecS dst, vecS shift) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (LShiftVS dst shift));
|
|
format %{ "psllw $dst,$shift\t! left shift packed2S" %}
|
|
ins_encode %{
|
|
__ psllw($dst$$XMMRegister, $shift$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsll2S_imm(vecS dst, immI8 shift) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (LShiftVS dst shift));
|
|
format %{ "psllw $dst,$shift\t! left shift packed2S" %}
|
|
ins_encode %{
|
|
__ psllw($dst$$XMMRegister, (int)$shift$$constant);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsll2S_reg(vecS dst, vecS src, vecS shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (LShiftVS src shift));
|
|
format %{ "vpsllw $dst,$src,$shift\t! left shift packed2S" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsllw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsll2S_reg_imm(vecS dst, vecS src, immI8 shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (LShiftVS src shift));
|
|
format %{ "vpsllw $dst,$src,$shift\t! left shift packed2S" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsllw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsll4S(vecD dst, vecS shift) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (LShiftVS dst shift));
|
|
format %{ "psllw $dst,$shift\t! left shift packed4S" %}
|
|
ins_encode %{
|
|
__ psllw($dst$$XMMRegister, $shift$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsll4S_imm(vecD dst, immI8 shift) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (LShiftVS dst shift));
|
|
format %{ "psllw $dst,$shift\t! left shift packed4S" %}
|
|
ins_encode %{
|
|
__ psllw($dst$$XMMRegister, (int)$shift$$constant);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsll4S_reg(vecD dst, vecD src, vecS shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (LShiftVS src shift));
|
|
format %{ "vpsllw $dst,$src,$shift\t! left shift packed4S" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsllw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsll4S_reg_imm(vecD dst, vecD src, immI8 shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (LShiftVS src shift));
|
|
format %{ "vpsllw $dst,$src,$shift\t! left shift packed4S" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsllw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsll8S(vecX dst, vecS shift) %{
|
|
predicate(n->as_Vector()->length() == 8);
|
|
match(Set dst (LShiftVS dst shift));
|
|
format %{ "psllw $dst,$shift\t! left shift packed8S" %}
|
|
ins_encode %{
|
|
__ psllw($dst$$XMMRegister, $shift$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsll8S_imm(vecX dst, immI8 shift) %{
|
|
predicate(n->as_Vector()->length() == 8);
|
|
match(Set dst (LShiftVS dst shift));
|
|
format %{ "psllw $dst,$shift\t! left shift packed8S" %}
|
|
ins_encode %{
|
|
__ psllw($dst$$XMMRegister, (int)$shift$$constant);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsll8S_reg(vecX dst, vecX src, vecS shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
|
|
match(Set dst (LShiftVS src shift));
|
|
format %{ "vpsllw $dst,$src,$shift\t! left shift packed8S" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsllw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsll8S_reg_imm(vecX dst, vecX src, immI8 shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
|
|
match(Set dst (LShiftVS src shift));
|
|
format %{ "vpsllw $dst,$src,$shift\t! left shift packed8S" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsllw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsll16S_reg(vecY dst, vecY src, vecS shift) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 16);
|
|
match(Set dst (LShiftVS src shift));
|
|
format %{ "vpsllw $dst,$src,$shift\t! left shift packed16S" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpsllw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsll16S_reg_imm(vecY dst, vecY src, immI8 shift) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 16);
|
|
match(Set dst (LShiftVS src shift));
|
|
format %{ "vpsllw $dst,$src,$shift\t! left shift packed16S" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpsllw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Integers vector left shift
|
|
instruct vsll2I(vecD dst, vecS shift) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (LShiftVI dst shift));
|
|
format %{ "pslld $dst,$shift\t! left shift packed2I" %}
|
|
ins_encode %{
|
|
__ pslld($dst$$XMMRegister, $shift$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsll2I_imm(vecD dst, immI8 shift) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (LShiftVI dst shift));
|
|
format %{ "pslld $dst,$shift\t! left shift packed2I" %}
|
|
ins_encode %{
|
|
__ pslld($dst$$XMMRegister, (int)$shift$$constant);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsll2I_reg(vecD dst, vecD src, vecS shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (LShiftVI src shift));
|
|
format %{ "vpslld $dst,$src,$shift\t! left shift packed2I" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpslld($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsll2I_reg_imm(vecD dst, vecD src, immI8 shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (LShiftVI src shift));
|
|
format %{ "vpslld $dst,$src,$shift\t! left shift packed2I" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpslld($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsll4I(vecX dst, vecS shift) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (LShiftVI dst shift));
|
|
format %{ "pslld $dst,$shift\t! left shift packed4I" %}
|
|
ins_encode %{
|
|
__ pslld($dst$$XMMRegister, $shift$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsll4I_imm(vecX dst, immI8 shift) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (LShiftVI dst shift));
|
|
format %{ "pslld $dst,$shift\t! left shift packed4I" %}
|
|
ins_encode %{
|
|
__ pslld($dst$$XMMRegister, (int)$shift$$constant);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsll4I_reg(vecX dst, vecX src, vecS shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (LShiftVI src shift));
|
|
format %{ "vpslld $dst,$src,$shift\t! left shift packed4I" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpslld($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsll4I_reg_imm(vecX dst, vecX src, immI8 shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (LShiftVI src shift));
|
|
format %{ "vpslld $dst,$src,$shift\t! left shift packed4I" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpslld($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsll8I_reg(vecY dst, vecY src, vecS shift) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 8);
|
|
match(Set dst (LShiftVI src shift));
|
|
format %{ "vpslld $dst,$src,$shift\t! left shift packed8I" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpslld($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsll8I_reg_imm(vecY dst, vecY src, immI8 shift) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 8);
|
|
match(Set dst (LShiftVI src shift));
|
|
format %{ "vpslld $dst,$src,$shift\t! left shift packed8I" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpslld($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Longs vector left shift
|
|
instruct vsll2L(vecX dst, vecS shift) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (LShiftVL dst shift));
|
|
format %{ "psllq $dst,$shift\t! left shift packed2L" %}
|
|
ins_encode %{
|
|
__ psllq($dst$$XMMRegister, $shift$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsll2L_imm(vecX dst, immI8 shift) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (LShiftVL dst shift));
|
|
format %{ "psllq $dst,$shift\t! left shift packed2L" %}
|
|
ins_encode %{
|
|
__ psllq($dst$$XMMRegister, (int)$shift$$constant);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsll2L_reg(vecX dst, vecX src, vecS shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (LShiftVL src shift));
|
|
format %{ "vpsllq $dst,$src,$shift\t! left shift packed2L" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsllq($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsll2L_reg_imm(vecX dst, vecX src, immI8 shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (LShiftVL src shift));
|
|
format %{ "vpsllq $dst,$src,$shift\t! left shift packed2L" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsllq($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsll4L_reg(vecY dst, vecY src, vecS shift) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 4);
|
|
match(Set dst (LShiftVL src shift));
|
|
format %{ "vpsllq $dst,$src,$shift\t! left shift packed4L" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpsllq($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsll4L_reg_imm(vecY dst, vecY src, immI8 shift) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 4);
|
|
match(Set dst (LShiftVL src shift));
|
|
format %{ "vpsllq $dst,$src,$shift\t! left shift packed4L" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpsllq($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// ----------------------- LogicalRightShift -----------------------------------
|
|
|
|
// Shorts vector logical right shift produces incorrect Java result
|
|
// for negative data because java code convert short value into int with
|
|
// sign extension before a shift. But char vectors are fine since chars are
|
|
// unsigned values.
|
|
|
|
instruct vsrl2S(vecS dst, vecS shift) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (URShiftVS dst shift));
|
|
format %{ "psrlw $dst,$shift\t! logical right shift packed2S" %}
|
|
ins_encode %{
|
|
__ psrlw($dst$$XMMRegister, $shift$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsrl2S_imm(vecS dst, immI8 shift) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (URShiftVS dst shift));
|
|
format %{ "psrlw $dst,$shift\t! logical right shift packed2S" %}
|
|
ins_encode %{
|
|
__ psrlw($dst$$XMMRegister, (int)$shift$$constant);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsrl2S_reg(vecS dst, vecS src, vecS shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (URShiftVS src shift));
|
|
format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed2S" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsrl2S_reg_imm(vecS dst, vecS src, immI8 shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (URShiftVS src shift));
|
|
format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed2S" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsrl4S(vecD dst, vecS shift) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (URShiftVS dst shift));
|
|
format %{ "psrlw $dst,$shift\t! logical right shift packed4S" %}
|
|
ins_encode %{
|
|
__ psrlw($dst$$XMMRegister, $shift$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsrl4S_imm(vecD dst, immI8 shift) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (URShiftVS dst shift));
|
|
format %{ "psrlw $dst,$shift\t! logical right shift packed4S" %}
|
|
ins_encode %{
|
|
__ psrlw($dst$$XMMRegister, (int)$shift$$constant);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsrl4S_reg(vecD dst, vecD src, vecS shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (URShiftVS src shift));
|
|
format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed4S" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsrl4S_reg_imm(vecD dst, vecD src, immI8 shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (URShiftVS src shift));
|
|
format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed4S" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsrl8S(vecX dst, vecS shift) %{
|
|
predicate(n->as_Vector()->length() == 8);
|
|
match(Set dst (URShiftVS dst shift));
|
|
format %{ "psrlw $dst,$shift\t! logical right shift packed8S" %}
|
|
ins_encode %{
|
|
__ psrlw($dst$$XMMRegister, $shift$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsrl8S_imm(vecX dst, immI8 shift) %{
|
|
predicate(n->as_Vector()->length() == 8);
|
|
match(Set dst (URShiftVS dst shift));
|
|
format %{ "psrlw $dst,$shift\t! logical right shift packed8S" %}
|
|
ins_encode %{
|
|
__ psrlw($dst$$XMMRegister, (int)$shift$$constant);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsrl8S_reg(vecX dst, vecX src, vecS shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
|
|
match(Set dst (URShiftVS src shift));
|
|
format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed8S" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsrl8S_reg_imm(vecX dst, vecX src, immI8 shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
|
|
match(Set dst (URShiftVS src shift));
|
|
format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed8S" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsrl16S_reg(vecY dst, vecY src, vecS shift) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 16);
|
|
match(Set dst (URShiftVS src shift));
|
|
format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed16S" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsrl16S_reg_imm(vecY dst, vecY src, immI8 shift) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 16);
|
|
match(Set dst (URShiftVS src shift));
|
|
format %{ "vpsrlw $dst,$src,$shift\t! logical right shift packed16S" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpsrlw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Integers vector logical right shift
|
|
instruct vsrl2I(vecD dst, vecS shift) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (URShiftVI dst shift));
|
|
format %{ "psrld $dst,$shift\t! logical right shift packed2I" %}
|
|
ins_encode %{
|
|
__ psrld($dst$$XMMRegister, $shift$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsrl2I_imm(vecD dst, immI8 shift) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (URShiftVI dst shift));
|
|
format %{ "psrld $dst,$shift\t! logical right shift packed2I" %}
|
|
ins_encode %{
|
|
__ psrld($dst$$XMMRegister, (int)$shift$$constant);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsrl2I_reg(vecD dst, vecD src, vecS shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (URShiftVI src shift));
|
|
format %{ "vpsrld $dst,$src,$shift\t! logical right shift packed2I" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsrld($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsrl2I_reg_imm(vecD dst, vecD src, immI8 shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (URShiftVI src shift));
|
|
format %{ "vpsrld $dst,$src,$shift\t! logical right shift packed2I" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsrld($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsrl4I(vecX dst, vecS shift) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (URShiftVI dst shift));
|
|
format %{ "psrld $dst,$shift\t! logical right shift packed4I" %}
|
|
ins_encode %{
|
|
__ psrld($dst$$XMMRegister, $shift$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsrl4I_imm(vecX dst, immI8 shift) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (URShiftVI dst shift));
|
|
format %{ "psrld $dst,$shift\t! logical right shift packed4I" %}
|
|
ins_encode %{
|
|
__ psrld($dst$$XMMRegister, (int)$shift$$constant);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsrl4I_reg(vecX dst, vecX src, vecS shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (URShiftVI src shift));
|
|
format %{ "vpsrld $dst,$src,$shift\t! logical right shift packed4I" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsrld($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsrl4I_reg_imm(vecX dst, vecX src, immI8 shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (URShiftVI src shift));
|
|
format %{ "vpsrld $dst,$src,$shift\t! logical right shift packed4I" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsrld($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsrl8I_reg(vecY dst, vecY src, vecS shift) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 8);
|
|
match(Set dst (URShiftVI src shift));
|
|
format %{ "vpsrld $dst,$src,$shift\t! logical right shift packed8I" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpsrld($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsrl8I_reg_imm(vecY dst, vecY src, immI8 shift) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 8);
|
|
match(Set dst (URShiftVI src shift));
|
|
format %{ "vpsrld $dst,$src,$shift\t! logical right shift packed8I" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpsrld($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Longs vector logical right shift
|
|
instruct vsrl2L(vecX dst, vecS shift) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (URShiftVL dst shift));
|
|
format %{ "psrlq $dst,$shift\t! logical right shift packed2L" %}
|
|
ins_encode %{
|
|
__ psrlq($dst$$XMMRegister, $shift$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsrl2L_imm(vecX dst, immI8 shift) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (URShiftVL dst shift));
|
|
format %{ "psrlq $dst,$shift\t! logical right shift packed2L" %}
|
|
ins_encode %{
|
|
__ psrlq($dst$$XMMRegister, (int)$shift$$constant);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsrl2L_reg(vecX dst, vecX src, vecS shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (URShiftVL src shift));
|
|
format %{ "vpsrlq $dst,$src,$shift\t! logical right shift packed2L" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsrlq($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsrl2L_reg_imm(vecX dst, vecX src, immI8 shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (URShiftVL src shift));
|
|
format %{ "vpsrlq $dst,$src,$shift\t! logical right shift packed2L" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsrlq($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsrl4L_reg(vecY dst, vecY src, vecS shift) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 4);
|
|
match(Set dst (URShiftVL src shift));
|
|
format %{ "vpsrlq $dst,$src,$shift\t! logical right shift packed4L" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpsrlq($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsrl4L_reg_imm(vecY dst, vecY src, immI8 shift) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 4);
|
|
match(Set dst (URShiftVL src shift));
|
|
format %{ "vpsrlq $dst,$src,$shift\t! logical right shift packed4L" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpsrlq($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// ------------------- ArithmeticRightShift -----------------------------------
|
|
|
|
// Shorts/Chars vector arithmetic right shift
|
|
instruct vsra2S(vecS dst, vecS shift) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (RShiftVS dst shift));
|
|
format %{ "psraw $dst,$shift\t! arithmetic right shift packed2S" %}
|
|
ins_encode %{
|
|
__ psraw($dst$$XMMRegister, $shift$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsra2S_imm(vecS dst, immI8 shift) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (RShiftVS dst shift));
|
|
format %{ "psraw $dst,$shift\t! arithmetic right shift packed2S" %}
|
|
ins_encode %{
|
|
__ psraw($dst$$XMMRegister, (int)$shift$$constant);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsra2S_reg(vecS dst, vecS src, vecS shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (RShiftVS src shift));
|
|
format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed2S" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsraw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsra2S_reg_imm(vecS dst, vecS src, immI8 shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (RShiftVS src shift));
|
|
format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed2S" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsraw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsra4S(vecD dst, vecS shift) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (RShiftVS dst shift));
|
|
format %{ "psraw $dst,$shift\t! arithmetic right shift packed4S" %}
|
|
ins_encode %{
|
|
__ psraw($dst$$XMMRegister, $shift$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsra4S_imm(vecD dst, immI8 shift) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (RShiftVS dst shift));
|
|
format %{ "psraw $dst,$shift\t! arithmetic right shift packed4S" %}
|
|
ins_encode %{
|
|
__ psraw($dst$$XMMRegister, (int)$shift$$constant);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsra4S_reg(vecD dst, vecD src, vecS shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (RShiftVS src shift));
|
|
format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed4S" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsraw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsra4S_reg_imm(vecD dst, vecD src, immI8 shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (RShiftVS src shift));
|
|
format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed4S" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsraw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsra8S(vecX dst, vecS shift) %{
|
|
predicate(n->as_Vector()->length() == 8);
|
|
match(Set dst (RShiftVS dst shift));
|
|
format %{ "psraw $dst,$shift\t! arithmetic right shift packed8S" %}
|
|
ins_encode %{
|
|
__ psraw($dst$$XMMRegister, $shift$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsra8S_imm(vecX dst, immI8 shift) %{
|
|
predicate(n->as_Vector()->length() == 8);
|
|
match(Set dst (RShiftVS dst shift));
|
|
format %{ "psraw $dst,$shift\t! arithmetic right shift packed8S" %}
|
|
ins_encode %{
|
|
__ psraw($dst$$XMMRegister, (int)$shift$$constant);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsra8S_reg(vecX dst, vecX src, vecS shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
|
|
match(Set dst (RShiftVS src shift));
|
|
format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed8S" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsraw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsra8S_reg_imm(vecX dst, vecX src, immI8 shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
|
|
match(Set dst (RShiftVS src shift));
|
|
format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed8S" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsraw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsra16S_reg(vecY dst, vecY src, vecS shift) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 16);
|
|
match(Set dst (RShiftVS src shift));
|
|
format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed16S" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpsraw($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsra16S_reg_imm(vecY dst, vecY src, immI8 shift) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 16);
|
|
match(Set dst (RShiftVS src shift));
|
|
format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed16S" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpsraw($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// Integers vector arithmetic right shift
|
|
instruct vsra2I(vecD dst, vecS shift) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (RShiftVI dst shift));
|
|
format %{ "psrad $dst,$shift\t! arithmetic right shift packed2I" %}
|
|
ins_encode %{
|
|
__ psrad($dst$$XMMRegister, $shift$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsra2I_imm(vecD dst, immI8 shift) %{
|
|
predicate(n->as_Vector()->length() == 2);
|
|
match(Set dst (RShiftVI dst shift));
|
|
format %{ "psrad $dst,$shift\t! arithmetic right shift packed2I" %}
|
|
ins_encode %{
|
|
__ psrad($dst$$XMMRegister, (int)$shift$$constant);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsra2I_reg(vecD dst, vecD src, vecS shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (RShiftVI src shift));
|
|
format %{ "vpsrad $dst,$src,$shift\t! arithmetic right shift packed2I" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsrad($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsra2I_reg_imm(vecD dst, vecD src, immI8 shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
|
|
match(Set dst (RShiftVI src shift));
|
|
format %{ "vpsrad $dst,$src,$shift\t! arithmetic right shift packed2I" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsrad($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsra4I(vecX dst, vecS shift) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (RShiftVI dst shift));
|
|
format %{ "psrad $dst,$shift\t! arithmetic right shift packed4I" %}
|
|
ins_encode %{
|
|
__ psrad($dst$$XMMRegister, $shift$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsra4I_imm(vecX dst, immI8 shift) %{
|
|
predicate(n->as_Vector()->length() == 4);
|
|
match(Set dst (RShiftVI dst shift));
|
|
format %{ "psrad $dst,$shift\t! arithmetic right shift packed4I" %}
|
|
ins_encode %{
|
|
__ psrad($dst$$XMMRegister, (int)$shift$$constant);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsra4I_reg(vecX dst, vecX src, vecS shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (RShiftVI src shift));
|
|
format %{ "vpsrad $dst,$src,$shift\t! arithmetic right shift packed4I" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsrad($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsra4I_reg_imm(vecX dst, vecX src, immI8 shift) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
|
|
match(Set dst (RShiftVI src shift));
|
|
format %{ "vpsrad $dst,$src,$shift\t! arithmetic right shift packed4I" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpsrad($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsra8I_reg(vecY dst, vecY src, vecS shift) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 8);
|
|
match(Set dst (RShiftVI src shift));
|
|
format %{ "vpsrad $dst,$src,$shift\t! arithmetic right shift packed8I" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpsrad($dst$$XMMRegister, $src$$XMMRegister, $shift$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vsra8I_reg_imm(vecY dst, vecY src, immI8 shift) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length() == 8);
|
|
match(Set dst (RShiftVI src shift));
|
|
format %{ "vpsrad $dst,$src,$shift\t! arithmetic right shift packed8I" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpsrad($dst$$XMMRegister, $src$$XMMRegister, (int)$shift$$constant, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// There are no longs vector arithmetic right shift instructions.
|
|
|
|
|
|
// --------------------------------- AND --------------------------------------
|
|
|
|
instruct vand4B(vecS dst, vecS src) %{
|
|
predicate(n->as_Vector()->length_in_bytes() == 4);
|
|
match(Set dst (AndV dst src));
|
|
format %{ "pand $dst,$src\t! and vectors (4 bytes)" %}
|
|
ins_encode %{
|
|
__ pand($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vand4B_reg(vecS dst, vecS src1, vecS src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 4);
|
|
match(Set dst (AndV src1 src2));
|
|
format %{ "vpand $dst,$src1,$src2\t! and vectors (4 bytes)" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpand($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vand8B(vecD dst, vecD src) %{
|
|
predicate(n->as_Vector()->length_in_bytes() == 8);
|
|
match(Set dst (AndV dst src));
|
|
format %{ "pand $dst,$src\t! and vectors (8 bytes)" %}
|
|
ins_encode %{
|
|
__ pand($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vand8B_reg(vecD dst, vecD src1, vecD src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 8);
|
|
match(Set dst (AndV src1 src2));
|
|
format %{ "vpand $dst,$src1,$src2\t! and vectors (8 bytes)" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpand($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vand16B(vecX dst, vecX src) %{
|
|
predicate(n->as_Vector()->length_in_bytes() == 16);
|
|
match(Set dst (AndV dst src));
|
|
format %{ "pand $dst,$src\t! and vectors (16 bytes)" %}
|
|
ins_encode %{
|
|
__ pand($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vand16B_reg(vecX dst, vecX src1, vecX src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 16);
|
|
match(Set dst (AndV src1 src2));
|
|
format %{ "vpand $dst,$src1,$src2\t! and vectors (16 bytes)" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpand($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vand16B_mem(vecX dst, vecX src, memory mem) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 16);
|
|
match(Set dst (AndV src (LoadVector mem)));
|
|
format %{ "vpand $dst,$src,$mem\t! and vectors (16 bytes)" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpand($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vand32B_reg(vecY dst, vecY src1, vecY src2) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length_in_bytes() == 32);
|
|
match(Set dst (AndV src1 src2));
|
|
format %{ "vpand $dst,$src1,$src2\t! and vectors (32 bytes)" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpand($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vand32B_mem(vecY dst, vecY src, memory mem) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length_in_bytes() == 32);
|
|
match(Set dst (AndV src (LoadVector mem)));
|
|
format %{ "vpand $dst,$src,$mem\t! and vectors (32 bytes)" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpand($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// --------------------------------- OR ---------------------------------------
|
|
|
|
instruct vor4B(vecS dst, vecS src) %{
|
|
predicate(n->as_Vector()->length_in_bytes() == 4);
|
|
match(Set dst (OrV dst src));
|
|
format %{ "por $dst,$src\t! or vectors (4 bytes)" %}
|
|
ins_encode %{
|
|
__ por($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vor4B_reg(vecS dst, vecS src1, vecS src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 4);
|
|
match(Set dst (OrV src1 src2));
|
|
format %{ "vpor $dst,$src1,$src2\t! or vectors (4 bytes)" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vor8B(vecD dst, vecD src) %{
|
|
predicate(n->as_Vector()->length_in_bytes() == 8);
|
|
match(Set dst (OrV dst src));
|
|
format %{ "por $dst,$src\t! or vectors (8 bytes)" %}
|
|
ins_encode %{
|
|
__ por($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vor8B_reg(vecD dst, vecD src1, vecD src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 8);
|
|
match(Set dst (OrV src1 src2));
|
|
format %{ "vpor $dst,$src1,$src2\t! or vectors (8 bytes)" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vor16B(vecX dst, vecX src) %{
|
|
predicate(n->as_Vector()->length_in_bytes() == 16);
|
|
match(Set dst (OrV dst src));
|
|
format %{ "por $dst,$src\t! or vectors (16 bytes)" %}
|
|
ins_encode %{
|
|
__ por($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vor16B_reg(vecX dst, vecX src1, vecX src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 16);
|
|
match(Set dst (OrV src1 src2));
|
|
format %{ "vpor $dst,$src1,$src2\t! or vectors (16 bytes)" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vor16B_mem(vecX dst, vecX src, memory mem) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 16);
|
|
match(Set dst (OrV src (LoadVector mem)));
|
|
format %{ "vpor $dst,$src,$mem\t! or vectors (16 bytes)" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vor32B_reg(vecY dst, vecY src1, vecY src2) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length_in_bytes() == 32);
|
|
match(Set dst (OrV src1 src2));
|
|
format %{ "vpor $dst,$src1,$src2\t! or vectors (32 bytes)" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vor32B_mem(vecY dst, vecY src, memory mem) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length_in_bytes() == 32);
|
|
match(Set dst (OrV src (LoadVector mem)));
|
|
format %{ "vpor $dst,$src,$mem\t! or vectors (32 bytes)" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
// --------------------------------- XOR --------------------------------------
|
|
|
|
instruct vxor4B(vecS dst, vecS src) %{
|
|
predicate(n->as_Vector()->length_in_bytes() == 4);
|
|
match(Set dst (XorV dst src));
|
|
format %{ "pxor $dst,$src\t! xor vectors (4 bytes)" %}
|
|
ins_encode %{
|
|
__ pxor($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vxor4B_reg(vecS dst, vecS src1, vecS src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 4);
|
|
match(Set dst (XorV src1 src2));
|
|
format %{ "vpxor $dst,$src1,$src2\t! xor vectors (4 bytes)" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpxor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vxor8B(vecD dst, vecD src) %{
|
|
predicate(n->as_Vector()->length_in_bytes() == 8);
|
|
match(Set dst (XorV dst src));
|
|
format %{ "pxor $dst,$src\t! xor vectors (8 bytes)" %}
|
|
ins_encode %{
|
|
__ pxor($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vxor8B_reg(vecD dst, vecD src1, vecD src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 8);
|
|
match(Set dst (XorV src1 src2));
|
|
format %{ "vpxor $dst,$src1,$src2\t! xor vectors (8 bytes)" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpxor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vxor16B(vecX dst, vecX src) %{
|
|
predicate(n->as_Vector()->length_in_bytes() == 16);
|
|
match(Set dst (XorV dst src));
|
|
format %{ "pxor $dst,$src\t! xor vectors (16 bytes)" %}
|
|
ins_encode %{
|
|
__ pxor($dst$$XMMRegister, $src$$XMMRegister);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vxor16B_reg(vecX dst, vecX src1, vecX src2) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 16);
|
|
match(Set dst (XorV src1 src2));
|
|
format %{ "vpxor $dst,$src1,$src2\t! xor vectors (16 bytes)" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpxor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vxor16B_mem(vecX dst, vecX src, memory mem) %{
|
|
predicate(UseAVX > 0 && n->as_Vector()->length_in_bytes() == 16);
|
|
match(Set dst (XorV src (LoadVector mem)));
|
|
format %{ "vpxor $dst,$src,$mem\t! xor vectors (16 bytes)" %}
|
|
ins_encode %{
|
|
bool vector256 = false;
|
|
__ vpxor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vxor32B_reg(vecY dst, vecY src1, vecY src2) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length_in_bytes() == 32);
|
|
match(Set dst (XorV src1 src2));
|
|
format %{ "vpxor $dst,$src1,$src2\t! xor vectors (32 bytes)" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpxor($dst$$XMMRegister, $src1$$XMMRegister, $src2$$XMMRegister, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|
|
instruct vxor32B_mem(vecY dst, vecY src, memory mem) %{
|
|
predicate(UseAVX > 1 && n->as_Vector()->length_in_bytes() == 32);
|
|
match(Set dst (XorV src (LoadVector mem)));
|
|
format %{ "vpxor $dst,$src,$mem\t! xor vectors (32 bytes)" %}
|
|
ins_encode %{
|
|
bool vector256 = true;
|
|
__ vpxor($dst$$XMMRegister, $src$$XMMRegister, $mem$$Address, vector256);
|
|
%}
|
|
ins_pipe( pipe_slow );
|
|
%}
|
|
|