2012-02-18 20:03:15 +08:00
|
|
|
//===-- X86InstrFormats.td - X86 Instruction Formats -------*- tablegen -*-===//
|
|
|
|
//
|
2007-07-31 16:04:03 +08:00
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 04:36:04 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2012-02-18 20:03:15 +08:00
|
|
|
//
|
2007-07-31 16:04:03 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// X86 Instruction Format Definitions.
|
|
|
|
//
|
|
|
|
|
|
|
|
// Format specifies the encoding used by the instruction. This is part of the
|
|
|
|
// ad-hoc solution used to emit machine instruction encodings by our machine
|
|
|
|
// code emitter.
|
2014-02-19 16:25:02 +08:00
|
|
|
class Format<bits<7> val> {
|
|
|
|
bits<7> Value = val;
|
2007-07-31 16:04:03 +08:00
|
|
|
}
|
|
|
|
|
2016-08-22 15:38:41 +08:00
|
|
|
def Pseudo : Format<0>;
|
|
|
|
def RawFrm : Format<1>;
|
|
|
|
def AddRegFrm : Format<2>;
|
|
|
|
def RawFrmMemOffs : Format<3>;
|
|
|
|
def RawFrmSrc : Format<4>;
|
|
|
|
def RawFrmDst : Format<5>;
|
|
|
|
def RawFrmDstSrc : Format<6>;
|
|
|
|
def RawFrmImm8 : Format<7>;
|
|
|
|
def RawFrmImm16 : Format<8>;
|
2016-08-22 15:38:50 +08:00
|
|
|
def MRMDestMem : Format<32>;
|
|
|
|
def MRMSrcMem : Format<33>;
|
|
|
|
def MRMSrcMem4VOp3 : Format<34>;
|
|
|
|
def MRMSrcMemOp4 : Format<35>;
|
2016-08-22 15:38:41 +08:00
|
|
|
def MRMXm : Format<39>;
|
|
|
|
def MRM0m : Format<40>; def MRM1m : Format<41>; def MRM2m : Format<42>;
|
|
|
|
def MRM3m : Format<43>; def MRM4m : Format<44>; def MRM5m : Format<45>;
|
|
|
|
def MRM6m : Format<46>; def MRM7m : Format<47>;
|
2016-08-22 15:38:50 +08:00
|
|
|
def MRMDestReg : Format<48>;
|
|
|
|
def MRMSrcReg : Format<49>;
|
|
|
|
def MRMSrcReg4VOp3 : Format<50>;
|
|
|
|
def MRMSrcRegOp4 : Format<51>;
|
2016-08-22 15:38:41 +08:00
|
|
|
def MRMXr : Format<55>;
|
|
|
|
def MRM0r : Format<56>; def MRM1r : Format<57>; def MRM2r : Format<58>;
|
|
|
|
def MRM3r : Format<59>; def MRM4r : Format<60>; def MRM5r : Format<61>;
|
|
|
|
def MRM6r : Format<62>; def MRM7r : Format<63>;
|
|
|
|
def MRM_C0 : Format<64>; def MRM_C1 : Format<65>; def MRM_C2 : Format<66>;
|
|
|
|
def MRM_C3 : Format<67>; def MRM_C4 : Format<68>; def MRM_C5 : Format<69>;
|
|
|
|
def MRM_C6 : Format<70>; def MRM_C7 : Format<71>; def MRM_C8 : Format<72>;
|
|
|
|
def MRM_C9 : Format<73>; def MRM_CA : Format<74>; def MRM_CB : Format<75>;
|
|
|
|
def MRM_CC : Format<76>; def MRM_CD : Format<77>; def MRM_CE : Format<78>;
|
|
|
|
def MRM_CF : Format<79>; def MRM_D0 : Format<80>; def MRM_D1 : Format<81>;
|
|
|
|
def MRM_D2 : Format<82>; def MRM_D3 : Format<83>; def MRM_D4 : Format<84>;
|
|
|
|
def MRM_D5 : Format<85>; def MRM_D6 : Format<86>; def MRM_D7 : Format<87>;
|
|
|
|
def MRM_D8 : Format<88>; def MRM_D9 : Format<89>; def MRM_DA : Format<90>;
|
|
|
|
def MRM_DB : Format<91>; def MRM_DC : Format<92>; def MRM_DD : Format<93>;
|
|
|
|
def MRM_DE : Format<94>; def MRM_DF : Format<95>; def MRM_E0 : Format<96>;
|
|
|
|
def MRM_E1 : Format<97>; def MRM_E2 : Format<98>; def MRM_E3 : Format<99>;
|
|
|
|
def MRM_E4 : Format<100>; def MRM_E5 : Format<101>; def MRM_E6 : Format<102>;
|
|
|
|
def MRM_E7 : Format<103>; def MRM_E8 : Format<104>; def MRM_E9 : Format<105>;
|
|
|
|
def MRM_EA : Format<106>; def MRM_EB : Format<107>; def MRM_EC : Format<108>;
|
|
|
|
def MRM_ED : Format<109>; def MRM_EE : Format<110>; def MRM_EF : Format<111>;
|
|
|
|
def MRM_F0 : Format<112>; def MRM_F1 : Format<113>; def MRM_F2 : Format<114>;
|
|
|
|
def MRM_F3 : Format<115>; def MRM_F4 : Format<116>; def MRM_F5 : Format<117>;
|
|
|
|
def MRM_F6 : Format<118>; def MRM_F7 : Format<119>; def MRM_F8 : Format<120>;
|
|
|
|
def MRM_F9 : Format<121>; def MRM_FA : Format<122>; def MRM_FB : Format<123>;
|
|
|
|
def MRM_FC : Format<124>; def MRM_FD : Format<125>; def MRM_FE : Format<126>;
|
|
|
|
def MRM_FF : Format<127>;
|
2007-07-31 16:04:03 +08:00
|
|
|
|
|
|
|
// ImmType - This specifies the immediate type used by an instruction. This is
|
|
|
|
// part of the ad-hoc solution used to emit machine instruction encodings by our
|
|
|
|
// machine code emitter.
|
2014-01-31 06:20:41 +08:00
|
|
|
class ImmType<bits<4> val> {
|
|
|
|
bits<4> Value = val;
|
2007-07-31 16:04:03 +08:00
|
|
|
}
|
2010-02-13 06:27:07 +08:00
|
|
|
def NoImm : ImmType<0>;
|
|
|
|
def Imm8 : ImmType<1>;
|
|
|
|
def Imm8PCRel : ImmType<2>;
|
2016-08-22 09:37:19 +08:00
|
|
|
def Imm8Reg : ImmType<3>; // Register encoded in [7:4].
|
|
|
|
def Imm16 : ImmType<4>;
|
|
|
|
def Imm16PCRel : ImmType<5>;
|
|
|
|
def Imm32 : ImmType<6>;
|
|
|
|
def Imm32PCRel : ImmType<7>;
|
|
|
|
def Imm32S : ImmType<8>;
|
|
|
|
def Imm64 : ImmType<9>;
|
2007-07-31 16:04:03 +08:00
|
|
|
|
|
|
|
// FPFormat - This specifies what form this FP instruction has. This is used by
|
|
|
|
// the Floating-Point stackifier pass.
|
|
|
|
class FPFormat<bits<3> val> {
|
|
|
|
bits<3> Value = val;
|
|
|
|
}
|
|
|
|
def NotFP : FPFormat<0>;
|
|
|
|
def ZeroArgFP : FPFormat<1>;
|
|
|
|
def OneArgFP : FPFormat<2>;
|
|
|
|
def OneArgFPRW : FPFormat<3>;
|
|
|
|
def TwoArgFP : FPFormat<4>;
|
|
|
|
def CompareFP : FPFormat<5>;
|
|
|
|
def CondMovFP : FPFormat<6>;
|
|
|
|
def SpecialFP : FPFormat<7>;
|
|
|
|
|
2010-03-26 01:25:00 +08:00
|
|
|
// Class specifying the SSE execution domain, used by the SSEDomainFix pass.
|
2010-03-31 06:46:53 +08:00
|
|
|
// Keep in sync with tables in X86InstrInfo.cpp.
|
2010-03-26 01:25:00 +08:00
|
|
|
class Domain<bits<2> val> {
|
|
|
|
bits<2> Value = val;
|
|
|
|
}
|
|
|
|
def GenericDomain : Domain<0>;
|
2010-03-31 06:46:53 +08:00
|
|
|
def SSEPackedSingle : Domain<1>;
|
|
|
|
def SSEPackedDouble : Domain<2>;
|
|
|
|
def SSEPackedInt : Domain<3>;
|
2010-03-26 01:25:00 +08:00
|
|
|
|
2013-07-28 16:28:38 +08:00
|
|
|
// Class specifying the vector form of the decompressed
|
|
|
|
// displacement of 8-bit.
|
|
|
|
class CD8VForm<bits<3> val> {
|
|
|
|
bits<3> Value = val;
|
|
|
|
}
|
|
|
|
def CD8VF : CD8VForm<0>; // v := VL
|
|
|
|
def CD8VH : CD8VForm<1>; // v := VL/2
|
|
|
|
def CD8VQ : CD8VForm<2>; // v := VL/4
|
|
|
|
def CD8VO : CD8VForm<3>; // v := VL/8
|
2014-10-16 07:42:09 +08:00
|
|
|
// The tuple (subvector) forms.
|
2013-07-28 16:28:38 +08:00
|
|
|
def CD8VT1 : CD8VForm<4>; // v := 1
|
|
|
|
def CD8VT2 : CD8VForm<5>; // v := 2
|
|
|
|
def CD8VT4 : CD8VForm<6>; // v := 4
|
|
|
|
def CD8VT8 : CD8VForm<7>; // v := 8
|
|
|
|
|
2014-01-31 16:47:06 +08:00
|
|
|
// Class specifying the prefix used an opcode extension.
|
2014-02-18 08:21:49 +08:00
|
|
|
class Prefix<bits<3> val> {
|
|
|
|
bits<3> Value = val;
|
2014-01-31 16:47:06 +08:00
|
|
|
}
|
|
|
|
def NoPrfx : Prefix<0>;
|
2018-04-03 14:37:04 +08:00
|
|
|
def PD : Prefix<1>;
|
|
|
|
def XS : Prefix<2>;
|
|
|
|
def XD : Prefix<3>;
|
|
|
|
def PS : Prefix<4>; // Similar to NoPrfx, but disassembler uses this to know
|
|
|
|
// that other instructions with this opcode use PD/XS/XD
|
|
|
|
// and if any of those is not supported they shouldn't
|
|
|
|
// decode to this instruction. e.g. ANDSS/ANDSD don't
|
|
|
|
// exist, but the 0xf2/0xf3 encoding shouldn't
|
|
|
|
// disable to ANDPS.
|
2014-01-31 16:47:06 +08:00
|
|
|
|
|
|
|
// Class specifying the opcode map.
|
2014-02-19 16:25:02 +08:00
|
|
|
class Map<bits<3> val> {
|
|
|
|
bits<3> Value = val;
|
2014-01-31 16:47:06 +08:00
|
|
|
}
|
2018-03-24 14:04:12 +08:00
|
|
|
def OB : Map<0>;
|
|
|
|
def TB : Map<1>;
|
|
|
|
def T8 : Map<2>;
|
|
|
|
def TA : Map<3>;
|
|
|
|
def XOP8 : Map<4>;
|
|
|
|
def XOP9 : Map<5>;
|
|
|
|
def XOPA : Map<6>;
|
|
|
|
def ThreeDNow : Map<7>;
|
2014-01-31 16:47:06 +08:00
|
|
|
|
2014-02-02 15:08:01 +08:00
|
|
|
// Class specifying the encoding
|
|
|
|
class Encoding<bits<2> val> {
|
|
|
|
bits<2> Value = val;
|
|
|
|
}
|
|
|
|
def EncNormal : Encoding<0>;
|
|
|
|
def EncVEX : Encoding<1>;
|
|
|
|
def EncXOP : Encoding<2>;
|
|
|
|
def EncEVEX : Encoding<3>;
|
|
|
|
|
2014-02-02 17:25:09 +08:00
|
|
|
// Operand size for encodings that change based on mode.
|
|
|
|
class OperandSize<bits<2> val> {
|
|
|
|
bits<2> Value = val;
|
|
|
|
}
|
2017-11-22 03:28:13 +08:00
|
|
|
def OpSizeFixed : OperandSize<0>; // Never needs a 0x66 prefix.
|
|
|
|
def OpSize16 : OperandSize<1>; // Needs 0x66 prefix in 32-bit mode.
|
|
|
|
def OpSize32 : OperandSize<2>; // Needs 0x66 prefix in 16-bit mode.
|
|
|
|
def OpSizeIgnore : OperandSize<3>; // Takes 0x66 prefix, never emits.
|
2014-02-02 17:25:09 +08:00
|
|
|
|
2014-12-24 14:05:22 +08:00
|
|
|
// Address size for encodings that change based on mode.
|
|
|
|
class AddressSize<bits<2> val> {
|
|
|
|
bits<2> Value = val;
|
|
|
|
}
|
|
|
|
def AdSizeX : AddressSize<0>; // Address size determined using addr operand.
|
|
|
|
def AdSize16 : AddressSize<1>; // Encodes a 16-bit address.
|
|
|
|
def AdSize32 : AddressSize<2>; // Encodes a 32-bit address.
|
|
|
|
def AdSize64 : AddressSize<3>; // Encodes a 64-bit address.
|
|
|
|
|
2007-07-31 16:04:03 +08:00
|
|
|
// Prefix byte classes which are used to indicate to the ad-hoc machine code
|
|
|
|
// emitter that various prefix bytes are required.
|
2014-02-02 17:25:09 +08:00
|
|
|
class OpSize16 { OperandSize OpSize = OpSize16; }
|
|
|
|
class OpSize32 { OperandSize OpSize = OpSize32; }
|
2017-11-22 03:28:13 +08:00
|
|
|
class OpSizeIgnore { OperandSize OpSize = OpSizeIgnore; }
|
2014-12-24 14:05:22 +08:00
|
|
|
class AdSize16 { AddressSize AdSize = AdSize16; }
|
|
|
|
class AdSize32 { AddressSize AdSize = AdSize32; }
|
|
|
|
class AdSize64 { AddressSize AdSize = AdSize64; }
|
2007-07-31 16:04:03 +08:00
|
|
|
class REX_W { bit hasREX_WPrefix = 1; }
|
2008-03-01 21:37:02 +08:00
|
|
|
class LOCK { bit hasLockPrefix = 1; }
|
2014-01-31 15:00:55 +08:00
|
|
|
class REP { bit hasREPPrefix = 1; }
|
2014-02-01 16:17:56 +08:00
|
|
|
class TB { Map OpMap = TB; }
|
2014-01-31 16:47:06 +08:00
|
|
|
class T8 { Map OpMap = T8; }
|
|
|
|
class TA { Map OpMap = TA; }
|
2014-02-18 08:21:49 +08:00
|
|
|
class XOP8 { Map OpMap = XOP8; Prefix OpPrefix = PS; }
|
|
|
|
class XOP9 { Map OpMap = XOP9; Prefix OpPrefix = PS; }
|
|
|
|
class XOPA { Map OpMap = XOPA; Prefix OpPrefix = PS; }
|
2018-03-24 14:04:12 +08:00
|
|
|
class ThreeDNow { Map OpMap = ThreeDNow; }
|
2014-02-20 15:59:43 +08:00
|
|
|
class OBXS { Prefix OpPrefix = XS; }
|
2014-02-18 08:21:49 +08:00
|
|
|
class PS : TB { Prefix OpPrefix = PS; }
|
2014-02-01 16:17:56 +08:00
|
|
|
class PD : TB { Prefix OpPrefix = PD; }
|
|
|
|
class XD : TB { Prefix OpPrefix = XD; }
|
|
|
|
class XS : TB { Prefix OpPrefix = XS; }
|
2014-02-18 08:21:49 +08:00
|
|
|
class T8PS : T8 { Prefix OpPrefix = PS; }
|
2014-02-01 16:17:56 +08:00
|
|
|
class T8PD : T8 { Prefix OpPrefix = PD; }
|
|
|
|
class T8XD : T8 { Prefix OpPrefix = XD; }
|
|
|
|
class T8XS : T8 { Prefix OpPrefix = XS; }
|
2014-02-18 08:21:49 +08:00
|
|
|
class TAPS : TA { Prefix OpPrefix = PS; }
|
2014-02-01 16:17:56 +08:00
|
|
|
class TAPD : TA { Prefix OpPrefix = PD; }
|
|
|
|
class TAXD : TA { Prefix OpPrefix = XD; }
|
2014-02-02 15:08:01 +08:00
|
|
|
class VEX { Encoding OpEnc = EncVEX; }
|
2017-02-20 16:27:54 +08:00
|
|
|
class VEX_W { bits<2> VEX_WPrefix = 1; }
|
|
|
|
class VEX_WIG { bits<2> VEX_WPrefix = 2; }
|
2014-02-02 15:08:01 +08:00
|
|
|
class VEX_4V : VEX { bit hasVEX_4V = 1; }
|
2010-07-14 05:07:28 +08:00
|
|
|
class VEX_L { bit hasVEX_L = 1; }
|
2011-10-04 14:30:42 +08:00
|
|
|
class VEX_LIG { bit ignoresVEX_L = 1; }
|
2018-03-29 11:14:56 +08:00
|
|
|
class EVEX { Encoding OpEnc = EncEVEX; }
|
|
|
|
class EVEX_4V : EVEX { bit hasVEX_4V = 1; }
|
2013-07-28 16:28:38 +08:00
|
|
|
class EVEX_K { bit hasEVEX_K = 1; }
|
|
|
|
class EVEX_KZ : EVEX_K { bit hasEVEX_Z = 1; }
|
|
|
|
class EVEX_B { bit hasEVEX_B = 1; }
|
2014-01-13 20:55:03 +08:00
|
|
|
class EVEX_RC { bit hasEVEX_RC = 1; }
|
2013-07-28 16:28:38 +08:00
|
|
|
class EVEX_V512 { bit hasEVEX_L2 = 1; bit hasVEX_L = 0; }
|
2014-07-21 22:54:21 +08:00
|
|
|
class EVEX_V256 { bit hasEVEX_L2 = 0; bit hasVEX_L = 1; }
|
|
|
|
class EVEX_V128 { bit hasEVEX_L2 = 0; bit hasVEX_L = 0; }
|
2018-03-17 21:29:46 +08:00
|
|
|
class NOTRACK { bit hasNoTrackPrefix = 1; }
|
2014-07-18 01:04:34 +08:00
|
|
|
|
|
|
|
// Specify AVX512 8-bit compressed displacement encoding based on the vector
|
|
|
|
// element size in bits (8, 16, 32, 64) and the CDisp8 form.
|
2013-07-28 16:28:38 +08:00
|
|
|
class EVEX_CD8<int esize, CD8VForm form> {
|
2014-07-18 01:04:34 +08:00
|
|
|
int CD8_EltSize = !srl(esize, 3);
|
2014-07-18 01:04:52 +08:00
|
|
|
bits<3> CD8_Form = form.Value;
|
2013-07-28 16:28:38 +08:00
|
|
|
}
|
2014-07-18 01:04:34 +08:00
|
|
|
|
2014-02-02 15:08:01 +08:00
|
|
|
class XOP { Encoding OpEnc = EncXOP; }
|
|
|
|
class XOP_4V : XOP { bit hasVEX_4V = 1; }
|
|
|
|
|
[X86] Adding FoldGenRegForm helper field (for memory folding tables tableGen backend) to X86Inst class and set its value for the relevant instructions.
Some register-register instructions can be encoded in 2 different ways, this happens when 2 register operands can be folded (separately).
For example if we look at the MOV8rr and MOV8rr_REV, both instructions perform exactly the same operation, but are encoded differently. Here is the relevant information about these instructions from Intel's 64-ia-32-architectures-software-developer-manual:
Opcode Instruction Op/En 64-Bit Mode Compat/Leg Mode Description
8A /r MOV r8,r/m8 RM Valid Valid Move r/m8 to r8.
88 /r MOV r/m8,r8 MR Valid Valid Move r8 to r/m8.
Here we can see that in order to enable the folding of the output and input registers, we had to define 2 "encodings", and as a result we got 2 move 8-bit register-register instructions.
In the X86 backend, we define both of these instructions, usually one has a regular name (MOV8rr) while the other has "_REV" suffix (MOV8rr_REV), must be marked with isCodeGenOnly flag and is not emitted from CodeGen.
Automatically generating the memory folding tables relies on matching encodings of instructions, but in these cases where we want to map both memory forms of the mov 8-bit (MOV8rm & MOV8mr) to MOV8rr (not to MOV8rr_REV) we have to somehow point from the MOV8rr_REV to the "regular" appropriate instruction which in this case is MOV8rr.
This field enable this "pointing" mechanism - which is used in the TableGen backend for generating memory folding tables.
Differential Revision: https://reviews.llvm.org/D32683
llvm-svn: 304087
2017-05-28 20:39:37 +08:00
|
|
|
// Specify the alternative register form instruction to replace the current
|
|
|
|
// instruction in case it was picked during generation of memory folding tables
|
|
|
|
class FoldGenData<string _RegisterForm> {
|
|
|
|
string FoldGenRegForm = _RegisterForm;
|
|
|
|
}
|
|
|
|
|
2017-10-08 16:32:56 +08:00
|
|
|
// Mark the instruction as "illegal to memory fold/unfold"
|
|
|
|
class NotMemoryFoldable { bit isMemoryFoldable = 0; }
|
|
|
|
|
2007-07-31 16:04:03 +08:00
|
|
|
class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
|
2012-02-02 07:20:51 +08:00
|
|
|
string AsmStr,
|
|
|
|
InstrItinClass itin,
|
|
|
|
Domain d = GenericDomain>
|
2007-07-31 16:04:03 +08:00
|
|
|
: Instruction {
|
|
|
|
let Namespace = "X86";
|
|
|
|
|
|
|
|
bits<8> Opcode = opcod;
|
|
|
|
Format Form = f;
|
2014-02-19 16:25:02 +08:00
|
|
|
bits<7> FormBits = Form.Value;
|
2007-07-31 16:04:03 +08:00
|
|
|
ImmType ImmT = i;
|
|
|
|
|
|
|
|
dag OutOperandList = outs;
|
|
|
|
dag InOperandList = ins;
|
|
|
|
string AsmString = AsmStr;
|
|
|
|
|
2010-11-01 03:22:57 +08:00
|
|
|
// If this is a pseudo instruction, mark it isCodeGenOnly.
|
|
|
|
let isCodeGenOnly = !eq(!cast<string>(f), "Pseudo");
|
|
|
|
|
2012-02-02 07:20:51 +08:00
|
|
|
let Itinerary = itin;
|
|
|
|
|
2007-07-31 16:04:03 +08:00
|
|
|
//
|
|
|
|
// Attributes specific to X86 instructions...
|
|
|
|
//
|
2014-01-05 12:17:28 +08:00
|
|
|
bit ForceDisassemble = 0; // Force instruction to disassemble even though it's
|
|
|
|
// isCodeGenonly. Needed to hide an ambiguous
|
|
|
|
// AsmString from the parser, but still disassemble.
|
|
|
|
|
2014-02-02 17:25:09 +08:00
|
|
|
OperandSize OpSize = OpSizeFixed; // Does this instruction's encoding change
|
2014-12-24 14:05:22 +08:00
|
|
|
// based on operand size of the mode?
|
2014-02-26 14:01:21 +08:00
|
|
|
bits<2> OpSizeBits = OpSize.Value;
|
2014-12-24 14:05:22 +08:00
|
|
|
AddressSize AdSize = AdSizeX; // Does this instruction's encoding change
|
|
|
|
// based on address size of the mode?
|
|
|
|
bits<2> AdSizeBits = AdSize.Value;
|
2007-07-31 16:04:03 +08:00
|
|
|
|
2014-01-31 16:47:06 +08:00
|
|
|
Prefix OpPrefix = NoPrfx; // Which prefix byte does this inst have?
|
2014-02-26 14:01:21 +08:00
|
|
|
bits<3> OpPrefixBits = OpPrefix.Value;
|
2014-01-31 16:47:06 +08:00
|
|
|
Map OpMap = OB; // Which opcode map does this inst have?
|
2014-02-26 14:01:21 +08:00
|
|
|
bits<3> OpMapBits = OpMap.Value;
|
2010-11-30 17:11:54 +08:00
|
|
|
bit hasREX_WPrefix = 0; // Does this inst require the REX.W prefix?
|
2010-03-26 02:52:01 +08:00
|
|
|
FPFormat FPForm = NotFP; // What flavor of FP instruction is this?
|
2008-08-20 21:46:21 +08:00
|
|
|
bit hasLockPrefix = 0; // Does this inst have a 0xF0 prefix?
|
2010-03-26 02:52:01 +08:00
|
|
|
Domain ExeDomain = d;
|
2014-01-31 15:00:55 +08:00
|
|
|
bit hasREPPrefix = 0; // Does this inst have a REP prefix?
|
2014-02-02 15:08:01 +08:00
|
|
|
Encoding OpEnc = EncNormal; // Encoding used by this instruction
|
2014-02-26 14:01:21 +08:00
|
|
|
bits<2> OpEncBits = OpEnc.Value;
|
2017-02-20 16:27:54 +08:00
|
|
|
bits<2> VEX_WPrefix = 0; // Does this inst set the VEX_W field?
|
2014-02-02 15:08:01 +08:00
|
|
|
bit hasVEX_4V = 0; // Does this inst require the VEX.VVVV field?
|
2010-11-30 17:11:54 +08:00
|
|
|
bit hasVEX_L = 0; // Does this inst use large (256-bit) registers?
|
2011-10-04 14:30:42 +08:00
|
|
|
bit ignoresVEX_L = 0; // Does this instruction ignore the L-bit
|
2013-07-28 16:28:38 +08:00
|
|
|
bit hasEVEX_K = 0; // Does this inst require masking?
|
|
|
|
bit hasEVEX_Z = 0; // Does this inst set the EVEX_Z field?
|
|
|
|
bit hasEVEX_L2 = 0; // Does this inst set the EVEX_L2 field?
|
|
|
|
bit hasEVEX_B = 0; // Does this inst set the EVEX_B field?
|
2014-07-18 01:04:52 +08:00
|
|
|
bits<3> CD8_Form = 0; // Compressed disp8 form - vector-width.
|
2014-07-18 01:04:34 +08:00
|
|
|
// Declare it int rather than bits<4> so that all bits are defined when
|
|
|
|
// assigning to bits<7>.
|
|
|
|
int CD8_EltSize = 0; // Compressed disp8 form - element-size in bytes.
|
2014-01-13 20:55:03 +08:00
|
|
|
bit hasEVEX_RC = 0; // Explicitly specified rounding control in FP instruction.
|
2018-03-17 21:29:46 +08:00
|
|
|
bit hasNoTrackPrefix = 0; // Does this inst has 0x3E (NoTrack) prefix?
|
2010-04-05 11:10:20 +08:00
|
|
|
|
2014-07-18 01:04:34 +08:00
|
|
|
bits<2> EVEX_LL;
|
|
|
|
let EVEX_LL{0} = hasVEX_L;
|
|
|
|
let EVEX_LL{1} = hasEVEX_L2;
|
|
|
|
// Vector size in bytes.
|
|
|
|
bits<7> VectSize = !shl(16, EVEX_LL);
|
|
|
|
|
|
|
|
// The scaling factor for AVX512's compressed displacement is either
|
|
|
|
// - the size of a power-of-two number of elements or
|
|
|
|
// - the size of a single element for broadcasts or
|
|
|
|
// - the total vector size divided by a power-of-two number.
|
|
|
|
// Possible values are: 0 (non-AVX512 inst), 1, 2, 4, 8, 16, 32 and 64.
|
|
|
|
bits<7> CD8_Scale = !if (!eq (OpEnc.Value, EncEVEX.Value),
|
2014-07-18 01:04:52 +08:00
|
|
|
!if (CD8_Form{2},
|
|
|
|
!shl(CD8_EltSize, CD8_Form{1-0}),
|
2014-07-18 01:04:34 +08:00
|
|
|
!if (hasEVEX_B,
|
|
|
|
CD8_EltSize,
|
2014-07-18 01:04:52 +08:00
|
|
|
!srl(VectSize, CD8_Form{1-0}))), 0);
|
2014-07-18 01:04:34 +08:00
|
|
|
|
[X86] Adding FoldGenRegForm helper field (for memory folding tables tableGen backend) to X86Inst class and set its value for the relevant instructions.
Some register-register instructions can be encoded in 2 different ways, this happens when 2 register operands can be folded (separately).
For example if we look at the MOV8rr and MOV8rr_REV, both instructions perform exactly the same operation, but are encoded differently. Here is the relevant information about these instructions from Intel's 64-ia-32-architectures-software-developer-manual:
Opcode Instruction Op/En 64-Bit Mode Compat/Leg Mode Description
8A /r MOV r8,r/m8 RM Valid Valid Move r/m8 to r8.
88 /r MOV r/m8,r8 MR Valid Valid Move r8 to r/m8.
Here we can see that in order to enable the folding of the output and input registers, we had to define 2 "encodings", and as a result we got 2 move 8-bit register-register instructions.
In the X86 backend, we define both of these instructions, usually one has a regular name (MOV8rr) while the other has "_REV" suffix (MOV8rr_REV), must be marked with isCodeGenOnly flag and is not emitted from CodeGen.
Automatically generating the memory folding tables relies on matching encodings of instructions, but in these cases where we want to map both memory forms of the mov 8-bit (MOV8rm & MOV8mr) to MOV8rr (not to MOV8rr_REV) we have to somehow point from the MOV8rr_REV to the "regular" appropriate instruction which in this case is MOV8rr.
This field enable this "pointing" mechanism - which is used in the TableGen backend for generating memory folding tables.
Differential Revision: https://reviews.llvm.org/D32683
llvm-svn: 304087
2017-05-28 20:39:37 +08:00
|
|
|
// Used in the memory folding generation (TableGen backend) to point to an alternative
|
|
|
|
// instruction to replace the current one in case it got picked during generation.
|
|
|
|
string FoldGenRegForm = ?;
|
|
|
|
|
2017-10-08 16:32:56 +08:00
|
|
|
bit isMemoryFoldable = 1; // Is it allowed to memory fold/unfold this instruction?
|
|
|
|
|
2014-12-24 14:05:22 +08:00
|
|
|
// TSFlags layout should be kept in sync with X86BaseInfo.h.
|
2014-02-19 16:25:02 +08:00
|
|
|
let TSFlags{6-0} = FormBits;
|
2014-02-26 14:01:21 +08:00
|
|
|
let TSFlags{8-7} = OpSizeBits;
|
2014-12-24 14:05:22 +08:00
|
|
|
let TSFlags{10-9} = AdSizeBits;
|
2018-04-03 14:37:04 +08:00
|
|
|
// No need for 3rd bit, we don't need to distinguish NoPrfx from PS.
|
|
|
|
let TSFlags{12-11} = OpPrefixBits{1-0};
|
|
|
|
let TSFlags{15-13} = OpMapBits;
|
|
|
|
let TSFlags{16} = hasREX_WPrefix;
|
|
|
|
let TSFlags{20-17} = ImmT.Value;
|
|
|
|
let TSFlags{23-21} = FPForm.Value;
|
|
|
|
let TSFlags{24} = hasLockPrefix;
|
|
|
|
let TSFlags{25} = hasREPPrefix;
|
|
|
|
let TSFlags{27-26} = ExeDomain.Value;
|
|
|
|
let TSFlags{29-28} = OpEncBits;
|
|
|
|
let TSFlags{37-30} = Opcode;
|
2017-02-20 16:27:54 +08:00
|
|
|
// Currently no need for second bit in TSFlags - W Ignore is equivalent to 0.
|
2018-04-03 14:37:04 +08:00
|
|
|
let TSFlags{38} = VEX_WPrefix{0};
|
|
|
|
let TSFlags{39} = hasVEX_4V;
|
|
|
|
let TSFlags{40} = hasVEX_L;
|
|
|
|
let TSFlags{41} = hasEVEX_K;
|
|
|
|
let TSFlags{42} = hasEVEX_Z;
|
|
|
|
let TSFlags{43} = hasEVEX_L2;
|
|
|
|
let TSFlags{44} = hasEVEX_B;
|
2014-07-18 01:04:50 +08:00
|
|
|
// If we run out of TSFlags bits, it's possible to encode this in 3 bits.
|
2018-04-03 14:37:04 +08:00
|
|
|
let TSFlags{51-45} = CD8_Scale;
|
|
|
|
let TSFlags{52} = hasEVEX_RC;
|
|
|
|
let TSFlags{53} = hasNoTrackPrefix;
|
2007-07-31 16:04:03 +08:00
|
|
|
}
|
|
|
|
|
2017-12-23 18:47:21 +08:00
|
|
|
class PseudoI<dag oops, dag iops, list<dag> pattern,
|
|
|
|
InstrItinClass itin = NoItinerary>
|
|
|
|
: X86Inst<0, Pseudo, NoImm, oops, iops, "", itin> {
|
2010-11-30 16:57:23 +08:00
|
|
|
let Pattern = pattern;
|
|
|
|
}
|
|
|
|
|
2010-03-26 01:25:00 +08:00
|
|
|
class I<bits<8> o, Format f, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary,
|
2012-02-02 07:20:51 +08:00
|
|
|
Domain d = GenericDomain>
|
|
|
|
: X86Inst<o, f, NoImm, outs, ins, asm, itin, d> {
|
2007-07-31 16:04:03 +08:00
|
|
|
let Pattern = pattern;
|
|
|
|
let CodeSize = 3;
|
|
|
|
}
|
2014-12-04 13:20:33 +08:00
|
|
|
class Ii8 <bits<8> o, Format f, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary,
|
2012-02-02 07:20:51 +08:00
|
|
|
Domain d = GenericDomain>
|
|
|
|
: X86Inst<o, f, Imm8, outs, ins, asm, itin, d> {
|
2007-07-31 16:04:03 +08:00
|
|
|
let Pattern = pattern;
|
|
|
|
let CodeSize = 3;
|
|
|
|
}
|
2016-08-22 09:37:19 +08:00
|
|
|
class Ii8Reg<bits<8> o, Format f, dag outs, dag ins, string asm,
|
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary,
|
|
|
|
Domain d = GenericDomain>
|
|
|
|
: X86Inst<o, f, Imm8Reg, outs, ins, asm, itin, d> {
|
|
|
|
let Pattern = pattern;
|
|
|
|
let CodeSize = 3;
|
|
|
|
}
|
2014-12-04 13:20:33 +08:00
|
|
|
class Ii8PCRel<bits<8> o, Format f, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2012-02-02 07:20:51 +08:00
|
|
|
: X86Inst<o, f, Imm8PCRel, outs, ins, asm, itin> {
|
2010-02-13 06:27:07 +08:00
|
|
|
let Pattern = pattern;
|
|
|
|
let CodeSize = 3;
|
|
|
|
}
|
2014-12-04 13:20:33 +08:00
|
|
|
class Ii16<bits<8> o, Format f, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2012-02-02 07:20:51 +08:00
|
|
|
: X86Inst<o, f, Imm16, outs, ins, asm, itin> {
|
2007-07-31 16:04:03 +08:00
|
|
|
let Pattern = pattern;
|
|
|
|
let CodeSize = 3;
|
|
|
|
}
|
2014-12-04 13:20:33 +08:00
|
|
|
class Ii32<bits<8> o, Format f, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2012-02-02 07:20:51 +08:00
|
|
|
: X86Inst<o, f, Imm32, outs, ins, asm, itin> {
|
2007-07-31 16:04:03 +08:00
|
|
|
let Pattern = pattern;
|
|
|
|
let CodeSize = 3;
|
|
|
|
}
|
2014-01-31 06:20:41 +08:00
|
|
|
class Ii32S<bits<8> o, Format f, dag outs, dag ins, string asm,
|
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
|
|
|
: X86Inst<o, f, Imm32S, outs, ins, asm, itin> {
|
|
|
|
let Pattern = pattern;
|
|
|
|
let CodeSize = 3;
|
|
|
|
}
|
2007-07-31 16:04:03 +08:00
|
|
|
|
2018-03-29 11:14:57 +08:00
|
|
|
class Ii64<bits<8> o, Format f, dag outs, dag ins, string asm,
|
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
|
|
|
: X86Inst<o, f, Imm64, outs, ins, asm, itin> {
|
|
|
|
let Pattern = pattern;
|
|
|
|
let CodeSize = 3;
|
|
|
|
}
|
|
|
|
|
2014-12-04 13:20:33 +08:00
|
|
|
class Ii16PCRel<bits<8> o, Format f, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2012-02-02 07:20:51 +08:00
|
|
|
: X86Inst<o, f, Imm16PCRel, outs, ins, asm, itin> {
|
2010-07-08 06:27:31 +08:00
|
|
|
let Pattern = pattern;
|
|
|
|
let CodeSize = 3;
|
|
|
|
}
|
|
|
|
|
2014-12-04 13:20:33 +08:00
|
|
|
class Ii32PCRel<bits<8> o, Format f, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2012-02-02 07:20:51 +08:00
|
|
|
: X86Inst<o, f, Imm32PCRel, outs, ins, asm, itin> {
|
2010-02-13 06:27:07 +08:00
|
|
|
let Pattern = pattern;
|
|
|
|
let CodeSize = 3;
|
|
|
|
}
|
|
|
|
|
2007-07-31 16:04:03 +08:00
|
|
|
// FPStack Instruction Templates:
|
|
|
|
// FPI - Floating Point Instruction template.
|
2018-04-12 18:27:37 +08:00
|
|
|
class FPI<bits<8> o, Format F, dag outs, dag ins, string asm>
|
|
|
|
: I<o, F, outs, ins, asm, []> {}
|
2007-07-31 16:04:03 +08:00
|
|
|
|
2010-08-27 02:08:11 +08:00
|
|
|
// FpI_ - Floating Point Pseudo Instruction template. Not Predicated.
|
2018-04-12 18:27:37 +08:00
|
|
|
class FpI_<dag outs, dag ins, FPFormat fp, list<dag> pattern>
|
|
|
|
: PseudoI<outs, ins, pattern> {
|
2010-03-26 02:52:01 +08:00
|
|
|
let FPForm = fp;
|
2007-07-31 16:04:03 +08:00
|
|
|
}
|
|
|
|
|
2009-09-15 08:35:17 +08:00
|
|
|
// Templates for instructions that use a 16- or 32-bit segmented address as
|
|
|
|
// their only operand: lcall (FAR CALL) and ljmp (FAR JMP)
|
|
|
|
//
|
|
|
|
// Iseg16 - 16-bit segment selector, 16-bit offset
|
|
|
|
// Iseg32 - 16-bit segment selector, 32-bit offset
|
|
|
|
|
2014-12-04 13:20:33 +08:00
|
|
|
class Iseg16 <bits<8> o, Format f, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2012-02-02 07:20:51 +08:00
|
|
|
: X86Inst<o, f, Imm16, outs, ins, asm, itin> {
|
2009-09-15 08:35:17 +08:00
|
|
|
let Pattern = pattern;
|
|
|
|
let CodeSize = 3;
|
|
|
|
}
|
|
|
|
|
2014-12-04 13:20:33 +08:00
|
|
|
class Iseg32 <bits<8> o, Format f, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2012-02-02 07:20:51 +08:00
|
|
|
: X86Inst<o, f, Imm32, outs, ins, asm, itin> {
|
2009-09-15 08:35:17 +08:00
|
|
|
let Pattern = pattern;
|
|
|
|
let CodeSize = 3;
|
|
|
|
}
|
|
|
|
|
2010-06-18 07:05:30 +08:00
|
|
|
// SI - SSE 1 & 2 scalar instructions
|
2012-02-02 07:20:51 +08:00
|
|
|
class SI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2015-02-04 18:58:53 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary,
|
|
|
|
Domain d = GenericDomain>
|
|
|
|
: I<o, F, outs, ins, asm, pattern, itin, d> {
|
2014-02-02 15:08:01 +08:00
|
|
|
let Predicates = !if(!eq(OpEnc.Value, EncEVEX.Value), [HasAVX512],
|
|
|
|
!if(!eq(OpEnc.Value, EncVEX.Value), [UseAVX],
|
|
|
|
!if(!eq(OpPrefix.Value, XS.Value), [UseSSE1],
|
|
|
|
!if(!eq(OpPrefix.Value, XD.Value), [UseSSE2],
|
|
|
|
!if(!eq(OpPrefix.Value, PD.Value), [UseSSE2],
|
2014-01-31 16:47:06 +08:00
|
|
|
[UseSSE1])))));
|
2010-06-18 07:05:30 +08:00
|
|
|
|
|
|
|
// AVX instructions have a 'v' prefix in the mnemonic
|
2014-02-02 15:08:01 +08:00
|
|
|
let AsmString = !if(!eq(OpEnc.Value, EncEVEX.Value), !strconcat("v", asm),
|
|
|
|
!if(!eq(OpEnc.Value, EncVEX.Value), !strconcat("v", asm),
|
|
|
|
asm));
|
2010-06-24 08:32:06 +08:00
|
|
|
}
|
|
|
|
|
2015-05-21 22:01:32 +08:00
|
|
|
// SI - SSE 1 & 2 scalar intrinsics - vex form available on AVX512
|
|
|
|
class SI_Int<bits<8> o, Format F, dag outs, dag ins, string asm,
|
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary,
|
|
|
|
Domain d = GenericDomain>
|
|
|
|
: I<o, F, outs, ins, asm, pattern, itin, d> {
|
|
|
|
let Predicates = !if(!eq(OpEnc.Value, EncEVEX.Value), [HasAVX512],
|
2017-02-22 14:54:18 +08:00
|
|
|
!if(!eq(OpEnc.Value, EncVEX.Value), [UseAVX],
|
2015-05-21 22:01:32 +08:00
|
|
|
!if(!eq(OpPrefix.Value, XS.Value), [UseSSE1],
|
|
|
|
!if(!eq(OpPrefix.Value, XD.Value), [UseSSE2],
|
|
|
|
!if(!eq(OpPrefix.Value, PD.Value), [UseSSE2],
|
|
|
|
[UseSSE1])))));
|
|
|
|
|
|
|
|
// AVX instructions have a 'v' prefix in the mnemonic
|
|
|
|
let AsmString = !if(!eq(OpEnc.Value, EncEVEX.Value), !strconcat("v", asm),
|
|
|
|
!if(!eq(OpEnc.Value, EncVEX.Value), !strconcat("v", asm),
|
|
|
|
asm));
|
|
|
|
}
|
|
|
|
// SIi8 - SSE 1 & 2 scalar instructions - vex form available on AVX512
|
2010-06-24 08:32:06 +08:00
|
|
|
class SIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2012-02-02 07:20:51 +08:00
|
|
|
: Ii8<o, F, outs, ins, asm, pattern, itin> {
|
2014-02-02 15:08:01 +08:00
|
|
|
let Predicates = !if(!eq(OpEnc.Value, EncEVEX.Value), [HasAVX512],
|
2015-05-21 22:01:32 +08:00
|
|
|
!if(!eq(OpEnc.Value, EncVEX.Value), [HasAVX],
|
2014-02-02 15:08:01 +08:00
|
|
|
!if(!eq(OpPrefix.Value, XS.Value), [UseSSE1],
|
2014-01-31 16:47:06 +08:00
|
|
|
[UseSSE2])));
|
2010-06-24 08:32:06 +08:00
|
|
|
|
|
|
|
// AVX instructions have a 'v' prefix in the mnemonic
|
2014-02-02 15:08:01 +08:00
|
|
|
let AsmString = !if(!eq(OpEnc.Value, EncEVEX.Value), !strconcat("v", asm),
|
|
|
|
!if(!eq(OpEnc.Value, EncVEX.Value), !strconcat("v", asm),
|
|
|
|
asm));
|
2010-06-19 07:13:35 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// PI - SSE 1 & 2 packed instructions
|
|
|
|
class PI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern,
|
2012-02-02 07:20:51 +08:00
|
|
|
InstrItinClass itin, Domain d>
|
|
|
|
: I<o, F, outs, ins, asm, pattern, itin, d> {
|
2014-02-02 15:08:01 +08:00
|
|
|
let Predicates = !if(!eq(OpEnc.Value, EncEVEX.Value), [HasAVX512],
|
|
|
|
!if(!eq(OpEnc.Value, EncVEX.Value), [HasAVX],
|
|
|
|
!if(!eq(OpPrefix.Value, PD.Value), [UseSSE2],
|
2014-01-31 16:47:06 +08:00
|
|
|
[UseSSE1])));
|
2010-06-19 07:13:35 +08:00
|
|
|
|
|
|
|
// AVX instructions have a 'v' prefix in the mnemonic
|
2014-02-02 15:08:01 +08:00
|
|
|
let AsmString = !if(!eq(OpEnc.Value, EncEVEX.Value), !strconcat("v", asm),
|
|
|
|
!if(!eq(OpEnc.Value, EncVEX.Value), !strconcat("v", asm),
|
|
|
|
asm));
|
2010-06-18 07:05:30 +08:00
|
|
|
}
|
|
|
|
|
Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
llvm-svn: 162919
2012-08-31 00:54:46 +08:00
|
|
|
// MMXPI - SSE 1 & 2 packed instructions with MMX operands
|
|
|
|
class MMXPI<bits<8> o, Format F, dag outs, dag ins, string asm, list<dag> pattern,
|
2018-04-12 18:49:57 +08:00
|
|
|
Domain d>
|
|
|
|
: I<o, F, outs, ins, asm, pattern, NoItinerary, d> {
|
2014-02-02 15:08:01 +08:00
|
|
|
let Predicates = !if(!eq(OpPrefix.Value, PD.Value), [HasSSE2],
|
2014-01-31 16:47:06 +08:00
|
|
|
[HasSSE1]);
|
Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
llvm-svn: 162919
2012-08-31 00:54:46 +08:00
|
|
|
}
|
|
|
|
|
2010-06-23 07:37:59 +08:00
|
|
|
// PIi8 - SSE 1 & 2 packed instructions with immediate
|
|
|
|
class PIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2012-02-02 07:20:51 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin, Domain d>
|
|
|
|
: Ii8<o, F, outs, ins, asm, pattern, itin, d> {
|
2014-02-02 15:08:01 +08:00
|
|
|
let Predicates = !if(!eq(OpEnc.Value, EncEVEX.Value), [HasAVX512],
|
|
|
|
!if(!eq(OpEnc.Value, EncVEX.Value), [HasAVX],
|
|
|
|
!if(!eq(OpPrefix.Value, PD.Value), [UseSSE2],
|
2014-01-31 16:47:06 +08:00
|
|
|
[UseSSE1])));
|
2010-06-23 07:37:59 +08:00
|
|
|
|
|
|
|
// AVX instructions have a 'v' prefix in the mnemonic
|
2014-02-02 15:08:01 +08:00
|
|
|
let AsmString = !if(!eq(OpEnc.Value, EncEVEX.Value), !strconcat("v", asm),
|
|
|
|
!if(!eq(OpEnc.Value, EncVEX.Value), !strconcat("v", asm),
|
|
|
|
asm));
|
2010-06-23 07:37:59 +08:00
|
|
|
}
|
|
|
|
|
2007-07-31 16:04:03 +08:00
|
|
|
// SSE1 Instruction Templates:
|
2014-12-04 13:20:33 +08:00
|
|
|
//
|
2007-07-31 16:04:03 +08:00
|
|
|
// SSI - SSE1 instructions with XS prefix.
|
2014-02-18 08:21:49 +08:00
|
|
|
// PSI - SSE1 instructions with PS prefix.
|
|
|
|
// PSIi8 - SSE1 instructions with ImmT == Imm8 and PS prefix.
|
2010-06-09 06:51:23 +08:00
|
|
|
// VSSI - SSE1 instructions with XS prefix in AVX form.
|
2014-02-18 08:21:49 +08:00
|
|
|
// VPSI - SSE1 instructions with PS prefix in AVX form, packed single.
|
2007-07-31 16:04:03 +08:00
|
|
|
|
2012-02-02 07:20:51 +08:00
|
|
|
class SSI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
llvm-svn: 162919
2012-08-31 00:54:46 +08:00
|
|
|
: I<o, F, outs, ins, asm, pattern, itin>, XS, Requires<[UseSSE1]>;
|
2010-03-26 01:25:00 +08:00
|
|
|
class SSIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
llvm-svn: 162919
2012-08-31 00:54:46 +08:00
|
|
|
: Ii8<o, F, outs, ins, asm, pattern, itin>, XS, Requires<[UseSSE1]>;
|
2012-02-02 07:20:51 +08:00
|
|
|
class PSI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2014-02-18 08:21:49 +08:00
|
|
|
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedSingle>, PS,
|
Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
llvm-svn: 162919
2012-08-31 00:54:46 +08:00
|
|
|
Requires<[UseSSE1]>;
|
2007-07-31 16:04:03 +08:00
|
|
|
class PSIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2014-02-18 08:21:49 +08:00
|
|
|
: Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedSingle>, PS,
|
Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
llvm-svn: 162919
2012-08-31 00:54:46 +08:00
|
|
|
Requires<[UseSSE1]>;
|
2010-06-09 06:51:23 +08:00
|
|
|
class VSSI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2012-02-02 07:20:51 +08:00
|
|
|
: I<o, F, outs, ins, !strconcat("v", asm), pattern, itin>, XS,
|
2010-07-13 08:38:47 +08:00
|
|
|
Requires<[HasAVX]>;
|
2010-06-12 09:23:26 +08:00
|
|
|
class VPSI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2014-02-18 08:21:49 +08:00
|
|
|
: I<o, F, outs, ins, !strconcat("v", asm), pattern, itin, SSEPackedSingle>, PS,
|
2010-07-13 08:38:47 +08:00
|
|
|
Requires<[HasAVX]>;
|
2007-07-31 16:04:03 +08:00
|
|
|
|
|
|
|
// SSE2 Instruction Templates:
|
2014-12-04 13:20:33 +08:00
|
|
|
//
|
2008-08-28 05:32:04 +08:00
|
|
|
// SDI - SSE2 instructions with XD prefix.
|
|
|
|
// SDIi8 - SSE2 instructions with ImmT == Imm8 and XD prefix.
|
2012-07-30 10:14:02 +08:00
|
|
|
// S2SI - SSE2 instructions with XS prefix.
|
2008-08-28 05:32:04 +08:00
|
|
|
// SSDIi8 - SSE2 instructions with ImmT == Imm8 and XS prefix.
|
2014-01-14 15:41:20 +08:00
|
|
|
// PDI - SSE2 instructions with PD prefix, packed double domain.
|
|
|
|
// PDIi8 - SSE2 instructions with ImmT == Imm8 and PD prefix.
|
2013-06-09 15:37:10 +08:00
|
|
|
// VSDI - SSE2 scalar instructions with XD prefix in AVX form.
|
2014-01-14 15:41:20 +08:00
|
|
|
// VPDI - SSE2 vector instructions with PD prefix in AVX form,
|
2013-06-09 15:37:10 +08:00
|
|
|
// packed double domain.
|
2014-01-14 15:41:20 +08:00
|
|
|
// VS2I - SSE2 scalar instructions with PD prefix in AVX form.
|
|
|
|
// S2I - SSE2 scalar instructions with PD prefix.
|
Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
llvm-svn: 162919
2012-08-31 00:54:46 +08:00
|
|
|
// MMXSDIi8 - SSE2 instructions with ImmT == Imm8 and XD prefix as well as
|
|
|
|
// MMX operands.
|
|
|
|
// MMXSSDIi8 - SSE2 instructions with ImmT == Imm8 and XS prefix as well as
|
|
|
|
// MMX operands.
|
2007-07-31 16:04:03 +08:00
|
|
|
|
2012-02-02 07:20:51 +08:00
|
|
|
class SDI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
llvm-svn: 162919
2012-08-31 00:54:46 +08:00
|
|
|
: I<o, F, outs, ins, asm, pattern, itin>, XD, Requires<[UseSSE2]>;
|
2007-12-21 03:57:09 +08:00
|
|
|
class SDIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
llvm-svn: 162919
2012-08-31 00:54:46 +08:00
|
|
|
: Ii8<o, F, outs, ins, asm, pattern, itin>, XD, Requires<[UseSSE2]>;
|
2012-07-30 10:14:02 +08:00
|
|
|
class S2SI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
llvm-svn: 162919
2012-08-31 00:54:46 +08:00
|
|
|
: I<o, F, outs, ins, asm, pattern, itin>, XS, Requires<[UseSSE2]>;
|
2012-07-30 10:14:02 +08:00
|
|
|
class S2SIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
llvm-svn: 162919
2012-08-31 00:54:46 +08:00
|
|
|
: Ii8<o, F, outs, ins, asm, pattern>, XS, Requires<[UseSSE2]>;
|
2012-02-02 07:20:51 +08:00
|
|
|
class PDI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2014-01-14 15:41:20 +08:00
|
|
|
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, PD,
|
Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
llvm-svn: 162919
2012-08-31 00:54:46 +08:00
|
|
|
Requires<[UseSSE2]>;
|
2007-07-31 16:04:03 +08:00
|
|
|
class PDIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2014-01-14 15:41:20 +08:00
|
|
|
: Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, PD,
|
Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
llvm-svn: 162919
2012-08-31 00:54:46 +08:00
|
|
|
Requires<[UseSSE2]>;
|
2010-06-09 06:51:23 +08:00
|
|
|
class VSDI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2012-02-02 07:20:51 +08:00
|
|
|
: I<o, F, outs, ins, !strconcat("v", asm), pattern, itin>, XD,
|
2013-08-18 21:08:57 +08:00
|
|
|
Requires<[UseAVX]>;
|
2012-07-30 10:14:02 +08:00
|
|
|
class VS2SI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2012-07-30 10:14:02 +08:00
|
|
|
: I<o, F, outs, ins, !strconcat("v", asm), pattern, itin>, XS,
|
|
|
|
Requires<[HasAVX]>;
|
2010-06-12 09:23:26 +08:00
|
|
|
class VPDI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2014-01-14 15:41:20 +08:00
|
|
|
: I<o, F, outs, ins, !strconcat("v", asm), pattern, itin, SSEPackedDouble>,
|
|
|
|
PD, Requires<[HasAVX]>;
|
2013-06-09 15:37:10 +08:00
|
|
|
class VS2I<bits<8> o, Format F, dag outs, dag ins, string asm,
|
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2014-01-14 15:41:20 +08:00
|
|
|
: I<o, F, outs, ins, !strconcat("v", asm), pattern, itin>, PD,
|
|
|
|
Requires<[UseAVX]>;
|
2013-06-09 15:37:10 +08:00
|
|
|
class S2I<bits<8> o, Format F, dag outs, dag ins, string asm,
|
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2014-01-14 15:41:20 +08:00
|
|
|
: I<o, F, outs, ins, asm, pattern, itin>, PD, Requires<[UseSSE2]>;
|
Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
llvm-svn: 162919
2012-08-31 00:54:46 +08:00
|
|
|
class MMXSDIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2018-04-12 18:49:57 +08:00
|
|
|
list<dag> pattern>
|
|
|
|
: Ii8<o, F, outs, ins, asm, pattern>, XD, Requires<[HasSSE2]>;
|
Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
llvm-svn: 162919
2012-08-31 00:54:46 +08:00
|
|
|
class MMXS2SIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2018-04-12 18:49:57 +08:00
|
|
|
list<dag> pattern>
|
|
|
|
: Ii8<o, F, outs, ins, asm, pattern>, XS, Requires<[HasSSE2]>;
|
2007-07-31 16:04:03 +08:00
|
|
|
|
|
|
|
// SSE3 Instruction Templates:
|
2014-12-04 13:20:33 +08:00
|
|
|
//
|
2014-01-14 15:41:20 +08:00
|
|
|
// S3I - SSE3 instructions with PD prefixes.
|
2007-07-31 16:04:03 +08:00
|
|
|
// S3SI - SSE3 instructions with XS prefix.
|
|
|
|
// S3DI - SSE3 instructions with XD prefix.
|
|
|
|
|
2014-12-04 13:20:33 +08:00
|
|
|
class S3SI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2012-02-02 07:20:51 +08:00
|
|
|
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedSingle>, XS,
|
Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
llvm-svn: 162919
2012-08-31 00:54:46 +08:00
|
|
|
Requires<[UseSSE3]>;
|
2014-12-04 13:20:33 +08:00
|
|
|
class S3DI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2012-02-02 07:20:51 +08:00
|
|
|
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, XD,
|
Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
llvm-svn: 162919
2012-08-31 00:54:46 +08:00
|
|
|
Requires<[UseSSE3]>;
|
2012-02-02 07:20:51 +08:00
|
|
|
class S3I<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2014-01-14 15:41:20 +08:00
|
|
|
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, PD,
|
Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
llvm-svn: 162919
2012-08-31 00:54:46 +08:00
|
|
|
Requires<[UseSSE3]>;
|
2007-07-31 16:04:03 +08:00
|
|
|
|
|
|
|
|
2008-02-13 06:51:28 +08:00
|
|
|
// SSSE3 Instruction Templates:
|
2014-12-04 13:20:33 +08:00
|
|
|
//
|
2008-02-13 06:51:28 +08:00
|
|
|
// SS38I - SSSE3 instructions with T8 prefix.
|
|
|
|
// SS3AI - SSSE3 instructions with TA prefix.
|
Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
llvm-svn: 162919
2012-08-31 00:54:46 +08:00
|
|
|
// MMXSS38I - SSSE3 instructions with T8 prefix and MMX operands.
|
|
|
|
// MMXSS3AI - SSSE3 instructions with TA prefix and MMX operands.
|
2008-02-13 06:51:28 +08:00
|
|
|
//
|
|
|
|
// Note: SSSE3 instructions have 64-bit and 128-bit versions. The 64-bit version
|
2012-01-09 08:11:29 +08:00
|
|
|
// uses the MMX registers. The 64-bit versions are grouped with the MMX
|
|
|
|
// classes. They need to be enabled even if AVX is enabled.
|
2008-02-13 06:51:28 +08:00
|
|
|
|
|
|
|
class SS38I<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2014-01-14 15:41:20 +08:00
|
|
|
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8PD,
|
Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
llvm-svn: 162919
2012-08-31 00:54:46 +08:00
|
|
|
Requires<[UseSSSE3]>;
|
2008-02-13 06:51:28 +08:00
|
|
|
class SS3AI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2014-01-14 15:41:20 +08:00
|
|
|
: Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TAPD,
|
Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
llvm-svn: 162919
2012-08-31 00:54:46 +08:00
|
|
|
Requires<[UseSSSE3]>;
|
|
|
|
class MMXSS38I<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2018-04-12 18:49:57 +08:00
|
|
|
list<dag> pattern>
|
|
|
|
: I<o, F, outs, ins, asm, pattern, NoItinerary, SSEPackedInt>, T8PS,
|
Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
llvm-svn: 162919
2012-08-31 00:54:46 +08:00
|
|
|
Requires<[HasSSSE3]>;
|
|
|
|
class MMXSS3AI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2018-04-12 18:49:57 +08:00
|
|
|
list<dag> pattern>
|
|
|
|
: Ii8<o, F, outs, ins, asm, pattern, NoItinerary, SSEPackedInt>, TAPS,
|
2010-03-26 01:25:00 +08:00
|
|
|
Requires<[HasSSSE3]>;
|
2008-02-13 06:51:28 +08:00
|
|
|
|
|
|
|
// SSE4.1 Instruction Templates:
|
2014-12-04 13:20:33 +08:00
|
|
|
//
|
2008-02-13 06:51:28 +08:00
|
|
|
// SS48I - SSE 4.1 instructions with T8 prefix.
|
2008-03-14 15:39:27 +08:00
|
|
|
// SS41AIi8 - SSE 4.1 instructions with TA prefix and ImmT == Imm8.
|
2008-02-13 06:51:28 +08:00
|
|
|
//
|
|
|
|
class SS48I<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2014-01-14 15:41:20 +08:00
|
|
|
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8PD,
|
Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
llvm-svn: 162919
2012-08-31 00:54:46 +08:00
|
|
|
Requires<[UseSSE41]>;
|
2008-03-14 15:39:27 +08:00
|
|
|
class SS4AIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2014-01-14 15:41:20 +08:00
|
|
|
: Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TAPD,
|
Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
llvm-svn: 162919
2012-08-31 00:54:46 +08:00
|
|
|
Requires<[UseSSE41]>;
|
2008-02-13 06:51:28 +08:00
|
|
|
|
2008-07-18 00:51:19 +08:00
|
|
|
// SSE4.2 Instruction Templates:
|
2014-12-04 13:20:33 +08:00
|
|
|
//
|
2008-07-18 00:51:19 +08:00
|
|
|
// SS428I - SSE 4.2 instructions with T8 prefix.
|
|
|
|
class SS428I<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2014-01-14 15:41:20 +08:00
|
|
|
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8PD,
|
Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
llvm-svn: 162919
2012-08-31 00:54:46 +08:00
|
|
|
Requires<[UseSSE42]>;
|
2008-02-13 06:51:28 +08:00
|
|
|
|
2011-10-17 00:50:08 +08:00
|
|
|
// SS42FI - SSE 4.2 instructions with T8XD prefix.
|
Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
llvm-svn: 162919
2012-08-31 00:54:46 +08:00
|
|
|
// NOTE: 'HasSSE42' is used as SS42FI is only used for CRC32 insns.
|
2009-08-09 05:55:08 +08:00
|
|
|
class SS42FI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2018-04-12 22:31:42 +08:00
|
|
|
list<dag> pattern>
|
|
|
|
: I<o, F, outs, ins, asm, pattern>, T8XD, Requires<[HasSSE42]>;
|
2012-01-02 03:51:58 +08:00
|
|
|
|
2009-08-19 06:50:32 +08:00
|
|
|
// SS42AI = SSE 4.2 instructions with TA prefix
|
|
|
|
class SS42AI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2018-04-12 22:31:42 +08:00
|
|
|
list<dag> pattern>
|
|
|
|
: Ii8<o, F, outs, ins, asm, pattern, NoItinerary, SSEPackedInt>, TAPD,
|
Introduce 'UseSSEx' to force SSE legacy encoding
- Add 'UseSSEx' to force SSE legacy insn not being selected when AVX is
enabled.
As the penalty of inter-mixing SSE and AVX instructions, we need
prevent SSE legacy insn from being generated except explicitly
specified through some intrinsics. For patterns supported by both
SSE and AVX, so far, we force AVX insn will be tried first relying on
AddedComplexity or position in td file. It's error-prone and
introduces bugs accidentally.
'UseSSEx' is disabled when AVX is turned on. For SSE insns inherited
by AVX, we need this predicate to force VEX encoding or SSE legacy
encoding only.
For insns not inherited by AVX, we still use the previous predicates,
i.e. 'HasSSEx'. So far, these insns fall into the following
categories:
* SSE insns with MMX operands
* SSE insns with GPR/MEM operands only (xFENCE, PREFETCH, CLFLUSH,
CRC, and etc.)
* SSE4A insns.
* MMX insns.
* x87 insns added by SSE.
2 test cases are modified:
- test/CodeGen/X86/fast-isel-x86-64.ll
AVX code generation is different from SSE one. 'vcvtsi2sdq' cannot be
selected by fast-isel due to complicated pattern and fast-isel
fallback to materialize it from constant pool.
- test/CodeGen/X86/widen_load-1.ll
AVX code generation is different from SSE one after fixing SSE/AVX
inter-mixing. Exec-domain fixing prefers 'vmovapd' instead of
'vmovaps'.
llvm-svn: 162919
2012-08-31 00:54:46 +08:00
|
|
|
Requires<[UseSSE42]>;
|
2009-08-19 06:50:32 +08:00
|
|
|
|
2010-07-20 08:11:13 +08:00
|
|
|
// AVX Instruction Templates:
|
|
|
|
// Instructions introduced in AVX (no SSE equivalent forms)
|
|
|
|
//
|
2014-01-14 15:41:20 +08:00
|
|
|
// AVX8I - AVX instructions with T8PD prefix.
|
|
|
|
// AVXAIi8 - AVX instructions with TAPD prefix and ImmT = Imm8.
|
2010-07-20 08:11:13 +08:00
|
|
|
class AVX8I<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2014-01-14 15:41:20 +08:00
|
|
|
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8PD,
|
2010-07-20 08:11:13 +08:00
|
|
|
Requires<[HasAVX]>;
|
2010-07-21 03:44:51 +08:00
|
|
|
class AVXAIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2014-01-14 15:41:20 +08:00
|
|
|
: Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TAPD,
|
2010-07-21 03:44:51 +08:00
|
|
|
Requires<[HasAVX]>;
|
2010-07-20 08:11:13 +08:00
|
|
|
|
2011-11-06 14:12:20 +08:00
|
|
|
// AVX2 Instruction Templates:
|
|
|
|
// Instructions introduced in AVX2 (no SSE equivalent forms)
|
|
|
|
//
|
2014-01-14 15:41:20 +08:00
|
|
|
// AVX28I - AVX2 instructions with T8PD prefix.
|
|
|
|
// AVX2AIi8 - AVX2 instructions with TAPD prefix and ImmT = Imm8.
|
2011-11-06 14:12:20 +08:00
|
|
|
class AVX28I<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2014-01-14 15:41:20 +08:00
|
|
|
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8PD,
|
2011-11-06 14:12:20 +08:00
|
|
|
Requires<[HasAVX2]>;
|
2011-11-07 07:04:08 +08:00
|
|
|
class AVX2AIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2014-01-14 15:41:20 +08:00
|
|
|
: Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TAPD,
|
2011-11-06 14:12:20 +08:00
|
|
|
Requires<[HasAVX2]>;
|
|
|
|
|
2013-07-28 16:28:38 +08:00
|
|
|
|
|
|
|
// AVX-512 Instruction Templates:
|
|
|
|
// Instructions introduced in AVX-512 (no SSE equivalent forms)
|
|
|
|
//
|
2014-01-14 15:41:20 +08:00
|
|
|
// AVX5128I - AVX-512 instructions with T8PD prefix.
|
|
|
|
// AVX512AIi8 - AVX-512 instructions with TAPD prefix and ImmT = Imm8.
|
|
|
|
// AVX512PDI - AVX-512 instructions with PD, double packed.
|
2014-02-18 08:21:49 +08:00
|
|
|
// AVX512PSI - AVX-512 instructions with PS, single packed.
|
2013-07-28 16:28:38 +08:00
|
|
|
// AVX512XS8I - AVX-512 instructions with T8 and XS prefixes.
|
|
|
|
// AVX512XSI - AVX-512 instructions with XS prefix, generic domain.
|
2014-01-14 15:41:20 +08:00
|
|
|
// AVX512BI - AVX-512 instructions with PD, int packed domain.
|
|
|
|
// AVX512SI - AVX-512 scalar instructions with PD prefix.
|
2013-07-28 16:28:38 +08:00
|
|
|
|
|
|
|
class AVX5128I<bits<8> o, Format F, dag outs, dag ins, string asm,
|
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2014-01-14 15:41:20 +08:00
|
|
|
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8PD,
|
2013-07-28 16:28:38 +08:00
|
|
|
Requires<[HasAVX512]>;
|
2014-12-12 01:13:05 +08:00
|
|
|
class AVX5128IBase : T8PD {
|
|
|
|
Domain ExeDomain = SSEPackedInt;
|
|
|
|
}
|
2013-07-28 16:28:38 +08:00
|
|
|
class AVX512XS8I<bits<8> o, Format F, dag outs, dag ins, string asm,
|
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
|
|
|
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, T8XS,
|
|
|
|
Requires<[HasAVX512]>;
|
|
|
|
class AVX512XSI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
|
|
|
: I<o, F, outs, ins, asm, pattern, itin>, XS,
|
|
|
|
Requires<[HasAVX512]>;
|
|
|
|
class AVX512XDI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
|
|
|
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, XD,
|
|
|
|
Requires<[HasAVX512]>;
|
|
|
|
class AVX512BI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2014-01-14 15:41:20 +08:00
|
|
|
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, PD,
|
2013-07-28 16:28:38 +08:00
|
|
|
Requires<[HasAVX512]>;
|
2014-10-08 22:37:45 +08:00
|
|
|
class AVX512BIBase : PD {
|
|
|
|
Domain ExeDomain = SSEPackedInt;
|
|
|
|
}
|
2013-07-28 16:28:38 +08:00
|
|
|
class AVX512BIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
|
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2014-01-14 15:41:20 +08:00
|
|
|
: Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, PD,
|
2013-07-28 16:28:38 +08:00
|
|
|
Requires<[HasAVX512]>;
|
2014-11-14 23:43:00 +08:00
|
|
|
class AVX512BIi8Base : PD {
|
|
|
|
Domain ExeDomain = SSEPackedInt;
|
|
|
|
ImmType ImmT = Imm8;
|
|
|
|
}
|
2015-06-01 15:17:23 +08:00
|
|
|
class AVX512XSIi8Base : XS {
|
|
|
|
Domain ExeDomain = SSEPackedInt;
|
|
|
|
ImmType ImmT = Imm8;
|
|
|
|
}
|
|
|
|
class AVX512XDIi8Base : XD {
|
|
|
|
Domain ExeDomain = SSEPackedInt;
|
|
|
|
ImmType ImmT = Imm8;
|
|
|
|
}
|
2015-05-07 19:24:42 +08:00
|
|
|
class AVX512PSIi8Base : PS {
|
|
|
|
Domain ExeDomain = SSEPackedSingle;
|
|
|
|
ImmType ImmT = Imm8;
|
|
|
|
}
|
|
|
|
class AVX512PDIi8Base : PD {
|
|
|
|
Domain ExeDomain = SSEPackedDouble;
|
|
|
|
ImmType ImmT = Imm8;
|
|
|
|
}
|
2013-07-28 16:28:38 +08:00
|
|
|
class AVX512AIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
|
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2014-01-14 15:41:20 +08:00
|
|
|
: Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>, TAPD,
|
2013-07-28 16:28:38 +08:00
|
|
|
Requires<[HasAVX512]>;
|
2014-08-15 01:13:19 +08:00
|
|
|
class AVX512AIi8Base : TAPD {
|
2014-08-08 01:53:55 +08:00
|
|
|
ImmType ImmT = Imm8;
|
|
|
|
}
|
2013-07-28 16:28:38 +08:00
|
|
|
class AVX512Ii8<bits<8> o, Format F, dag outs, dag ins, string asm,
|
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2014-02-01 16:17:56 +08:00
|
|
|
: Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedInt>,
|
2014-01-14 15:41:20 +08:00
|
|
|
Requires<[HasAVX512]>;
|
2013-07-28 16:28:38 +08:00
|
|
|
class AVX512PDI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2014-01-14 15:41:20 +08:00
|
|
|
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedDouble>, PD,
|
|
|
|
Requires<[HasAVX512]>;
|
2013-07-28 16:28:38 +08:00
|
|
|
class AVX512PSI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2014-02-18 08:21:49 +08:00
|
|
|
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedSingle>, PS,
|
2013-07-28 16:28:38 +08:00
|
|
|
Requires<[HasAVX512]>;
|
|
|
|
class AVX512PIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
|
|
|
|
list<dag> pattern, Domain d, InstrItinClass itin = NoItinerary>
|
2014-02-01 16:17:56 +08:00
|
|
|
: Ii8<o, F, outs, ins, asm, pattern, itin, d>, Requires<[HasAVX512]>;
|
2013-07-28 16:28:38 +08:00
|
|
|
class AVX512PI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
|
|
|
list<dag> pattern, Domain d, InstrItinClass itin = NoItinerary>
|
2014-02-01 16:17:56 +08:00
|
|
|
: I<o, F, outs, ins, asm, pattern, itin, d>, Requires<[HasAVX512]>;
|
2017-11-09 16:26:26 +08:00
|
|
|
class AVX512FMA3S<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-07-28 16:28:38 +08:00
|
|
|
list<dag>pattern, InstrItinClass itin = NoItinerary>
|
2014-01-14 15:41:20 +08:00
|
|
|
: I<o, F, outs, ins, asm, pattern, itin>, T8PD,
|
|
|
|
EVEX_4V, Requires<[HasAVX512]>;
|
2014-08-15 01:13:19 +08:00
|
|
|
class AVX512FMA3Base : T8PD, EVEX_4V;
|
2013-07-28 16:28:38 +08:00
|
|
|
|
2014-08-08 01:53:55 +08:00
|
|
|
class AVX512<bits<8> o, Format F, dag outs, dag ins, string asm,
|
|
|
|
list<dag>pattern, InstrItinClass itin = NoItinerary>
|
|
|
|
: I<o, F, outs, ins, asm, pattern, itin>, Requires<[HasAVX512]>;
|
|
|
|
|
2010-04-03 05:54:27 +08:00
|
|
|
// AES Instruction Templates:
|
|
|
|
//
|
|
|
|
// AES8I
|
2010-04-06 05:14:32 +08:00
|
|
|
// These use the same encoding as the SSE4.2 T8 and TA encodings.
|
2010-04-03 05:54:27 +08:00
|
|
|
class AES8I<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2018-04-12 22:31:42 +08:00
|
|
|
list<dag>pattern>
|
|
|
|
: I<o, F, outs, ins, asm, pattern, NoItinerary, SSEPackedInt>, T8PD,
|
2017-09-17 07:18:48 +08:00
|
|
|
Requires<[NoAVX, HasAES]>;
|
2010-04-03 05:54:27 +08:00
|
|
|
|
|
|
|
class AESAI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2018-04-12 22:31:42 +08:00
|
|
|
list<dag> pattern>
|
|
|
|
: Ii8<o, F, outs, ins, asm, pattern, NoItinerary, SSEPackedInt>, TAPD,
|
2017-09-17 07:18:48 +08:00
|
|
|
Requires<[NoAVX, HasAES]>;
|
2010-04-03 05:54:27 +08:00
|
|
|
|
2012-05-31 22:34:17 +08:00
|
|
|
// PCLMUL Instruction Templates
|
|
|
|
class PCLMULIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2018-04-12 22:31:42 +08:00
|
|
|
list<dag>pattern>
|
|
|
|
: Ii8<o, F, outs, ins, asm, pattern, NoItinerary, SSEPackedInt>, TAPD;
|
2010-07-24 02:41:12 +08:00
|
|
|
|
2010-07-23 08:54:35 +08:00
|
|
|
// FMA3 Instruction Templates
|
|
|
|
class FMA3<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2018-04-12 07:24:38 +08:00
|
|
|
list<dag>pattern>
|
|
|
|
: I<o, F, outs, ins, asm, pattern>, T8PD,
|
2017-11-26 02:32:43 +08:00
|
|
|
VEX_4V, FMASC, Requires<[HasFMA, NoFMA4, NoVLX]>;
|
2017-11-09 16:26:26 +08:00
|
|
|
class FMA3S<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2018-04-12 07:24:38 +08:00
|
|
|
list<dag>pattern>
|
|
|
|
: I<o, F, outs, ins, asm, pattern>, T8PD,
|
2017-11-26 02:32:43 +08:00
|
|
|
VEX_4V, FMASC, Requires<[HasFMA, NoFMA4, NoAVX512]>;
|
|
|
|
class FMA3S_Int<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2018-04-12 07:24:38 +08:00
|
|
|
list<dag>pattern>
|
|
|
|
: I<o, F, outs, ins, asm, pattern>, T8PD,
|
2017-11-09 16:26:26 +08:00
|
|
|
VEX_4V, FMASC, Requires<[HasFMA, NoAVX512]>;
|
2010-07-23 08:54:35 +08:00
|
|
|
|
2011-11-26 03:33:42 +08:00
|
|
|
// FMA4 Instruction Templates
|
|
|
|
class FMA4<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2018-04-12 07:24:38 +08:00
|
|
|
list<dag>pattern>
|
|
|
|
: Ii8Reg<o, F, outs, ins, asm, pattern>, TAPD,
|
2017-11-09 16:26:26 +08:00
|
|
|
VEX_4V, FMASC, Requires<[HasFMA4, NoVLX]>;
|
|
|
|
class FMA4S<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2018-04-12 07:24:38 +08:00
|
|
|
list<dag>pattern>
|
|
|
|
: Ii8Reg<o, F, outs, ins, asm, pattern>, TAPD,
|
2017-11-09 16:26:26 +08:00
|
|
|
VEX_4V, FMASC, Requires<[HasFMA4, NoAVX512]>;
|
2017-11-26 02:32:43 +08:00
|
|
|
class FMA4S_Int<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2018-04-12 07:24:38 +08:00
|
|
|
list<dag>pattern>
|
|
|
|
: Ii8Reg<o, F, outs, ins, asm, pattern>, TAPD,
|
2017-11-26 02:32:43 +08:00
|
|
|
VEX_4V, FMASC, Requires<[HasFMA4]>;
|
2011-11-26 03:33:42 +08:00
|
|
|
|
2011-12-13 03:37:49 +08:00
|
|
|
// XOP 2, 3 and 4 Operand Instruction Template
|
|
|
|
class IXOP<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2018-04-12 07:24:38 +08:00
|
|
|
list<dag> pattern>
|
|
|
|
: I<o, F, outs, ins, asm, pattern, NoItinerary, SSEPackedDouble>,
|
2014-02-02 15:08:01 +08:00
|
|
|
XOP9, Requires<[HasXOP]>;
|
2011-12-13 03:37:49 +08:00
|
|
|
|
2016-08-22 09:37:19 +08:00
|
|
|
// XOP 2 and 3 Operand Instruction Templates with imm byte
|
2011-12-13 03:37:49 +08:00
|
|
|
class IXOPi8<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2018-04-12 07:24:38 +08:00
|
|
|
list<dag> pattern>
|
|
|
|
: Ii8<o, F, outs, ins, asm, pattern, NoItinerary, SSEPackedDouble>,
|
2014-02-02 15:08:01 +08:00
|
|
|
XOP8, Requires<[HasXOP]>;
|
2016-08-22 09:37:19 +08:00
|
|
|
// XOP 4 Operand Instruction Templates with imm byte
|
|
|
|
class IXOPi8Reg<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2018-04-12 07:24:38 +08:00
|
|
|
list<dag> pattern>
|
|
|
|
: Ii8Reg<o, F, outs, ins, asm, pattern, NoItinerary, SSEPackedDouble>,
|
2016-08-22 09:37:19 +08:00
|
|
|
XOP8, Requires<[HasXOP]>;
|
2011-12-13 03:37:49 +08:00
|
|
|
|
|
|
|
// XOP 5 operand instruction (VEX encoding!)
|
|
|
|
class IXOP5<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2018-04-12 07:24:38 +08:00
|
|
|
list<dag>pattern>
|
|
|
|
: Ii8Reg<o, F, outs, ins, asm, pattern, NoItinerary, SSEPackedInt>, TAPD,
|
2016-08-22 09:37:19 +08:00
|
|
|
VEX_4V, Requires<[HasXOP]>;
|
2011-12-13 03:37:49 +08:00
|
|
|
|
2007-07-31 16:04:03 +08:00
|
|
|
// X86-64 Instruction templates...
|
|
|
|
//
|
|
|
|
|
2012-02-02 07:20:51 +08:00
|
|
|
class RI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2012-02-02 07:20:51 +08:00
|
|
|
: I<o, F, outs, ins, asm, pattern, itin>, REX_W;
|
2007-07-31 16:04:03 +08:00
|
|
|
class RIi8 <bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2012-02-02 07:20:51 +08:00
|
|
|
: Ii8<o, F, outs, ins, asm, pattern, itin>, REX_W;
|
2014-01-13 22:05:59 +08:00
|
|
|
class RIi16 <bits<8> o, Format F, dag outs, dag ins, string asm,
|
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
|
|
|
: Ii16<o, F, outs, ins, asm, pattern, itin>, REX_W;
|
2007-07-31 16:04:03 +08:00
|
|
|
class RIi32 <bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-03-26 07:12:41 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2012-02-02 07:20:51 +08:00
|
|
|
: Ii32<o, F, outs, ins, asm, pattern, itin>, REX_W;
|
2014-01-31 06:20:41 +08:00
|
|
|
class RIi32S <bits<8> o, Format F, dag outs, dag ins, string asm,
|
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
|
|
|
: Ii32S<o, F, outs, ins, asm, pattern, itin>, REX_W;
|
2018-03-29 11:14:57 +08:00
|
|
|
class RIi64<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2013-07-23 05:25:31 +08:00
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
2018-03-29 11:14:57 +08:00
|
|
|
: Ii64<o, F, outs, ins, asm, pattern, itin>, REX_W;
|
2013-07-23 05:25:31 +08:00
|
|
|
|
2013-06-09 15:37:10 +08:00
|
|
|
class RS2I<bits<8> o, Format F, dag outs, dag ins, string asm,
|
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
|
|
|
: S2I<o, F, outs, ins, asm, pattern, itin>, REX_W;
|
|
|
|
class VRS2I<bits<8> o, Format F, dag outs, dag ins, string asm,
|
|
|
|
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
|
|
|
: VS2I<o, F, outs, ins, asm, pattern, itin>, VEX_W;
|
2007-07-31 16:04:03 +08:00
|
|
|
|
|
|
|
// MMX Instruction templates
|
|
|
|
//
|
|
|
|
|
|
|
|
// MMXI - MMX instructions with TB prefix.
|
2013-10-09 10:18:34 +08:00
|
|
|
// MMXI32 - MMX instructions with TB prefix valid only in 32 bit mode.
|
2008-08-23 23:53:19 +08:00
|
|
|
// MMXI64 - MMX instructions with TB prefix valid only in 64 bit mode.
|
2014-01-14 15:41:20 +08:00
|
|
|
// MMX2I - MMX / SSE2 instructions with PD prefix.
|
2014-02-18 08:21:49 +08:00
|
|
|
// MMXIi8 - MMX instructions with ImmT == Imm8 and PS prefix.
|
|
|
|
// MMXIi8 - MMX instructions with ImmT == Imm8 and PS prefix.
|
2007-07-31 16:04:03 +08:00
|
|
|
// MMXID - MMX instructions with XD prefix.
|
|
|
|
// MMXIS - MMX instructions with XS prefix.
|
2014-12-04 13:20:33 +08:00
|
|
|
class MMXI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2018-04-12 18:49:57 +08:00
|
|
|
list<dag> pattern>
|
|
|
|
: I<o, F, outs, ins, asm, pattern>, PS, Requires<[HasMMX]>;
|
2014-12-04 13:20:33 +08:00
|
|
|
class MMXI32<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2018-04-12 18:49:57 +08:00
|
|
|
list<dag> pattern>
|
|
|
|
: I<o, F, outs, ins, asm, pattern>, PS, Requires<[HasMMX,Not64BitMode]>;
|
2014-12-04 13:20:33 +08:00
|
|
|
class MMXI64<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2018-04-12 18:49:57 +08:00
|
|
|
list<dag> pattern>
|
|
|
|
: I<o, F, outs, ins, asm, pattern>, PS, Requires<[HasMMX,In64BitMode]>;
|
2014-12-04 13:20:33 +08:00
|
|
|
class MMXRI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2018-04-12 18:49:57 +08:00
|
|
|
list<dag> pattern>
|
|
|
|
: I<o, F, outs, ins, asm, pattern>, PS, REX_W, Requires<[HasMMX]>;
|
2014-12-04 13:20:33 +08:00
|
|
|
class MMX2I<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2018-04-12 18:49:57 +08:00
|
|
|
list<dag> pattern>
|
|
|
|
: I<o, F, outs, ins, asm, pattern>, PD, Requires<[HasMMX]>;
|
2014-12-04 13:20:33 +08:00
|
|
|
class MMXIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2018-04-12 18:49:57 +08:00
|
|
|
list<dag> pattern>
|
|
|
|
: Ii8<o, F, outs, ins, asm, pattern>, PS, Requires<[HasMMX]>;
|
2014-12-04 13:20:33 +08:00
|
|
|
class MMXID<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2018-04-12 18:49:57 +08:00
|
|
|
list<dag> pattern>
|
|
|
|
: Ii8<o, F, outs, ins, asm, pattern>, XD, Requires<[HasMMX]>;
|
2014-12-04 13:20:33 +08:00
|
|
|
class MMXIS<bits<8> o, Format F, dag outs, dag ins, string asm,
|
2018-04-12 18:49:57 +08:00
|
|
|
list<dag> pattern>
|
|
|
|
: Ii8<o, F, outs, ins, asm, pattern>, XS, Requires<[HasMMX]>;
|