2012-02-19 10:03:36 +08:00
|
|
|
//===-- X86InstrFMA.td - FMA Instruction Set ---------------*- tablegen -*-===//
|
2010-07-23 08:54:35 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file describes FMA (Fused Multiply-Add) instructions.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// FMA3 - Intel 3 operand Fused Multiply-Add instructions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2017-09-06 11:35:58 +08:00
|
|
|
// For all FMA opcodes declared in fma3p_rm_* and fma3s_rm_* milticlasses
|
|
|
|
// defined below, both the register and memory variants are commutable.
|
2015-11-07 03:47:25 +08:00
|
|
|
// For the register form the commutable operands are 1, 2 and 3.
|
|
|
|
// For the memory variant the folded operand must be in 3. Thus,
|
|
|
|
// in that case, only the operands 1 and 2 can be swapped.
|
|
|
|
// Commuting some of operands may require the opcode change.
|
|
|
|
// FMA*213*:
|
|
|
|
// operands 1 and 2 (memory & register forms): *213* --> *213*(no changes);
|
|
|
|
// operands 1 and 3 (register forms only): *213* --> *231*;
|
|
|
|
// operands 2 and 3 (register forms only): *213* --> *132*.
|
|
|
|
// FMA*132*:
|
|
|
|
// operands 1 and 2 (memory & register forms): *132* --> *231*;
|
|
|
|
// operands 1 and 3 (register forms only): *132* --> *132*(no changes);
|
|
|
|
// operands 2 and 3 (register forms only): *132* --> *213*.
|
|
|
|
// FMA*231*:
|
|
|
|
// operands 1 and 2 (memory & register forms): *231* --> *132*;
|
|
|
|
// operands 1 and 3 (register forms only): *231* --> *213*;
|
|
|
|
// operands 2 and 3 (register forms only): *231* --> *231*(no changes).
|
|
|
|
|
2017-09-06 11:35:58 +08:00
|
|
|
multiclass fma3p_rm_213<bits<8> opc, string OpcodeStr, RegisterClass RC,
|
|
|
|
ValueType VT, X86MemOperand x86memop, PatFrag MemFrag,
|
|
|
|
SDNode Op> {
|
|
|
|
def r : FMA3<opc, MRMSrcReg, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, RC:$src2, RC:$src3),
|
2012-08-01 20:06:00 +08:00
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
2017-09-06 11:35:58 +08:00
|
|
|
[(set RC:$dst, (VT (Op RC:$src2, RC:$src1, RC:$src3)))]>;
|
2012-08-01 20:06:00 +08:00
|
|
|
|
2015-11-07 03:47:25 +08:00
|
|
|
let mayLoad = 1 in
|
2017-09-06 11:35:58 +08:00
|
|
|
def m : FMA3<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, RC:$src2, x86memop:$src3),
|
2012-08-20 07:37:46 +08:00
|
|
|
!strconcat(OpcodeStr,
|
2012-08-01 20:06:00 +08:00
|
|
|
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
2017-09-06 11:35:58 +08:00
|
|
|
[(set RC:$dst, (VT (Op RC:$src2, RC:$src1,
|
|
|
|
(MemFrag addr:$src3))))]>;
|
|
|
|
}
|
2012-08-01 20:06:00 +08:00
|
|
|
|
2017-09-06 11:35:58 +08:00
|
|
|
multiclass fma3p_rm_231<bits<8> opc, string OpcodeStr, RegisterClass RC,
|
|
|
|
ValueType VT, X86MemOperand x86memop, PatFrag MemFrag,
|
|
|
|
SDNode Op> {
|
|
|
|
let hasSideEffects = 0 in
|
|
|
|
def r : FMA3<opc, MRMSrcReg, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, RC:$src2, RC:$src3),
|
2012-08-20 07:37:46 +08:00
|
|
|
!strconcat(OpcodeStr,
|
2012-08-01 20:06:00 +08:00
|
|
|
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
2017-09-06 11:35:58 +08:00
|
|
|
[]>;
|
2012-08-01 20:06:00 +08:00
|
|
|
|
2015-11-07 03:47:25 +08:00
|
|
|
let mayLoad = 1 in
|
2017-09-06 11:35:58 +08:00
|
|
|
def m : FMA3<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, RC:$src2, x86memop:$src3),
|
2012-08-20 07:37:46 +08:00
|
|
|
!strconcat(OpcodeStr,
|
2012-08-01 20:06:00 +08:00
|
|
|
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
2017-09-06 11:35:58 +08:00
|
|
|
[(set RC:$dst, (VT (Op RC:$src2, (MemFrag addr:$src3),
|
|
|
|
RC:$src1)))]>;
|
2012-05-31 17:20:20 +08:00
|
|
|
}
|
|
|
|
|
2017-09-06 11:35:58 +08:00
|
|
|
multiclass fma3p_rm_132<bits<8> opc, string OpcodeStr, RegisterClass RC,
|
|
|
|
ValueType VT, X86MemOperand x86memop, PatFrag MemFrag,
|
|
|
|
SDNode Op> {
|
|
|
|
let hasSideEffects = 0 in
|
|
|
|
def r : FMA3<opc, MRMSrcReg, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, RC:$src2, RC:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
|
|
|
[]>;
|
|
|
|
|
|
|
|
// Pattern is 312 order so that the load is in a different place from the
|
|
|
|
// 213 and 231 patterns this helps tablegen's duplicate pattern detection.
|
|
|
|
let mayLoad = 1 in
|
|
|
|
def m : FMA3<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, RC:$src2, x86memop:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
|
|
|
[(set RC:$dst, (VT (Op (MemFrag addr:$src3), RC:$src1,
|
|
|
|
RC:$src2)))]>;
|
|
|
|
}
|
|
|
|
|
|
|
|
let Constraints = "$src1 = $dst", hasSideEffects = 0, isCommutable = 1 in
|
2011-12-30 04:43:40 +08:00
|
|
|
multiclass fma3p_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231,
|
2016-07-24 16:26:38 +08:00
|
|
|
string OpcodeStr, string PackTy, string Suff,
|
2012-05-31 17:20:20 +08:00
|
|
|
PatFrag MemFrag128, PatFrag MemFrag256,
|
2012-08-20 14:21:25 +08:00
|
|
|
SDNode Op, ValueType OpTy128, ValueType OpTy256> {
|
2017-09-06 11:35:58 +08:00
|
|
|
defm NAME#213#Suff : fma3p_rm_213<opc213, !strconcat(OpcodeStr, "213", PackTy),
|
|
|
|
VR128, OpTy128, f128mem, MemFrag128, Op>;
|
|
|
|
defm NAME#231#Suff : fma3p_rm_231<opc231, !strconcat(OpcodeStr, "231", PackTy),
|
|
|
|
VR128, OpTy128, f128mem, MemFrag128, Op>;
|
|
|
|
defm NAME#132#Suff : fma3p_rm_132<opc132, !strconcat(OpcodeStr, "132", PackTy),
|
|
|
|
VR128, OpTy128, f128mem, MemFrag128, Op>;
|
|
|
|
|
|
|
|
defm NAME#213#Suff#Y : fma3p_rm_213<opc213, !strconcat(OpcodeStr, "213", PackTy),
|
|
|
|
VR256, OpTy256, f256mem, MemFrag256, Op>,
|
|
|
|
VEX_L;
|
|
|
|
defm NAME#231#Suff#Y : fma3p_rm_231<opc231, !strconcat(OpcodeStr, "231", PackTy),
|
|
|
|
VR256, OpTy256, f256mem, MemFrag256, Op>,
|
|
|
|
VEX_L;
|
|
|
|
defm NAME#132#Suff#Y : fma3p_rm_132<opc132, !strconcat(OpcodeStr, "132", PackTy),
|
|
|
|
VR256, OpTy256, f256mem, MemFrag256, Op>,
|
|
|
|
VEX_L;
|
2010-07-23 08:54:35 +08:00
|
|
|
}
|
|
|
|
|
2011-12-30 04:03:14 +08:00
|
|
|
// Fused Multiply-Add
|
2011-12-30 04:43:40 +08:00
|
|
|
let ExeDomain = SSEPackedSingle in {
|
2016-07-24 16:26:38 +08:00
|
|
|
defm VFMADD : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "ps", "PS",
|
2017-09-04 14:59:50 +08:00
|
|
|
loadv4f32, loadv8f32, X86Fmadd, v4f32, v8f32>;
|
2016-07-24 16:26:38 +08:00
|
|
|
defm VFMSUB : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "ps", "PS",
|
|
|
|
loadv4f32, loadv8f32, X86Fmsub, v4f32, v8f32>;
|
|
|
|
defm VFMADDSUB : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "ps", "PS",
|
2017-09-06 11:35:58 +08:00
|
|
|
loadv4f32, loadv8f32, X86Fmaddsub, v4f32, v8f32>;
|
2016-07-24 16:26:38 +08:00
|
|
|
defm VFMSUBADD : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "ps", "PS",
|
2017-09-06 11:35:58 +08:00
|
|
|
loadv4f32, loadv8f32, X86Fmsubadd, v4f32, v8f32>;
|
2011-12-30 04:43:40 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
let ExeDomain = SSEPackedDouble in {
|
2016-07-24 16:26:38 +08:00
|
|
|
defm VFMADD : fma3p_forms<0x98, 0xA8, 0xB8, "vfmadd", "pd", "PD",
|
2017-09-04 14:59:50 +08:00
|
|
|
loadv2f64, loadv4f64, X86Fmadd, v2f64,
|
2016-07-24 16:26:38 +08:00
|
|
|
v4f64>, VEX_W;
|
|
|
|
defm VFMSUB : fma3p_forms<0x9A, 0xAA, 0xBA, "vfmsub", "pd", "PD",
|
|
|
|
loadv2f64, loadv4f64, X86Fmsub, v2f64,
|
|
|
|
v4f64>, VEX_W;
|
|
|
|
defm VFMADDSUB : fma3p_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "pd", "PD",
|
|
|
|
loadv2f64, loadv4f64, X86Fmaddsub,
|
|
|
|
v2f64, v4f64>, VEX_W;
|
|
|
|
defm VFMSUBADD : fma3p_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "pd", "PD",
|
|
|
|
loadv2f64, loadv4f64, X86Fmsubadd,
|
|
|
|
v2f64, v4f64>, VEX_W;
|
2011-12-30 04:43:40 +08:00
|
|
|
}
|
2010-07-23 08:54:35 +08:00
|
|
|
|
2011-12-30 04:03:14 +08:00
|
|
|
// Fused Negative Multiply-Add
|
2011-12-30 04:43:40 +08:00
|
|
|
let ExeDomain = SSEPackedSingle in {
|
2016-07-24 16:26:38 +08:00
|
|
|
defm VFNMADD : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "ps", "PS", loadv4f32,
|
|
|
|
loadv8f32, X86Fnmadd, v4f32, v8f32>;
|
|
|
|
defm VFNMSUB : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "ps", "PS", loadv4f32,
|
|
|
|
loadv8f32, X86Fnmsub, v4f32, v8f32>;
|
2011-12-30 04:43:40 +08:00
|
|
|
}
|
|
|
|
let ExeDomain = SSEPackedDouble in {
|
2016-07-24 16:26:38 +08:00
|
|
|
defm VFNMADD : fma3p_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "pd", "PD", loadv2f64,
|
|
|
|
loadv4f64, X86Fnmadd, v2f64, v4f64>, VEX_W;
|
|
|
|
defm VFNMSUB : fma3p_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "pd", "PD", loadv2f64,
|
|
|
|
loadv4f64, X86Fnmsub, v2f64, v4f64>, VEX_W;
|
2011-12-30 04:43:40 +08:00
|
|
|
}
|
|
|
|
|
2015-11-07 03:47:25 +08:00
|
|
|
// All source register operands of FMA opcodes defined in fma3s_rm multiclass
|
|
|
|
// can be commuted. In many cases such commute transformation requres an opcode
|
|
|
|
// adjustment, for example, commuting the operands 1 and 2 in FMA*132 form
|
|
|
|
// would require an opcode change to FMA*231:
|
2015-11-05 02:10:41 +08:00
|
|
|
// FMA*132* reg1, reg2, reg3; // reg1 * reg3 + reg2;
|
|
|
|
// -->
|
|
|
|
// FMA*231* reg2, reg1, reg3; // reg1 * reg3 + reg2;
|
2015-11-07 03:47:25 +08:00
|
|
|
// Please see more detailed comment at the very beginning of the section
|
|
|
|
// defining FMA3 opcodes above.
|
2017-09-06 11:35:58 +08:00
|
|
|
multiclass fma3s_rm_213<bits<8> opc, string OpcodeStr,
|
|
|
|
X86MemOperand x86memop, RegisterClass RC,
|
|
|
|
SDPatternOperator OpNode> {
|
|
|
|
def r : FMA3<opc, MRMSrcReg, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, RC:$src2, RC:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
|
|
|
[(set RC:$dst, (OpNode RC:$src2, RC:$src1, RC:$src3))]>;
|
2014-02-01 05:29:19 +08:00
|
|
|
|
2015-11-07 03:47:25 +08:00
|
|
|
let mayLoad = 1 in
|
2017-09-06 11:35:58 +08:00
|
|
|
def m : FMA3<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, RC:$src2, x86memop:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
|
|
|
[(set RC:$dst,
|
|
|
|
(OpNode RC:$src2, RC:$src1, (load addr:$src3)))]>;
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass fma3s_rm_231<bits<8> opc, string OpcodeStr,
|
|
|
|
X86MemOperand x86memop, RegisterClass RC,
|
|
|
|
SDPatternOperator OpNode> {
|
|
|
|
let hasSideEffects = 0 in
|
|
|
|
def r : FMA3<opc, MRMSrcReg, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, RC:$src2, RC:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
|
|
|
[]>;
|
|
|
|
|
|
|
|
let mayLoad = 1 in
|
|
|
|
def m : FMA3<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, RC:$src2, x86memop:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
|
|
|
[(set RC:$dst,
|
|
|
|
(OpNode RC:$src2, (load addr:$src3), RC:$src1))]>;
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass fma3s_rm_132<bits<8> opc, string OpcodeStr,
|
|
|
|
X86MemOperand x86memop, RegisterClass RC,
|
|
|
|
SDPatternOperator OpNode> {
|
|
|
|
let hasSideEffects = 0 in
|
|
|
|
def r : FMA3<opc, MRMSrcReg, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, RC:$src2, RC:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
|
|
|
[]>;
|
|
|
|
|
|
|
|
// Pattern is 312 order so that the load is in a different place from the
|
|
|
|
// 213 and 231 patterns this helps tablegen's duplicate pattern detection.
|
|
|
|
let mayLoad = 1 in
|
|
|
|
def m : FMA3<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, RC:$src2, x86memop:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
|
|
|
[(set RC:$dst,
|
|
|
|
(OpNode (load addr:$src3), RC:$src1, RC:$src2))]>;
|
|
|
|
}
|
|
|
|
|
|
|
|
let Constraints = "$src1 = $dst", isCommutable = 1, hasSideEffects = 0 in
|
|
|
|
multiclass fma3s_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231,
|
|
|
|
string OpStr, string PackTy, string Suff,
|
|
|
|
SDNode OpNode, RegisterClass RC,
|
|
|
|
X86MemOperand x86memop> {
|
|
|
|
let Predicates = [HasFMA, NoAVX512] in {
|
|
|
|
defm NAME#213#Suff : fma3s_rm_213<opc213, !strconcat(OpStr, "213", PackTy),
|
|
|
|
x86memop, RC, OpNode>;
|
|
|
|
defm NAME#231#Suff : fma3s_rm_231<opc231, !strconcat(OpStr, "231", PackTy),
|
|
|
|
x86memop, RC, OpNode>;
|
|
|
|
defm NAME#132#Suff : fma3s_rm_132<opc132, !strconcat(OpStr, "132", PackTy),
|
|
|
|
x86memop, RC, OpNode>;
|
|
|
|
}
|
2011-12-30 04:43:40 +08:00
|
|
|
}
|
2015-11-05 02:10:41 +08:00
|
|
|
|
|
|
|
// These FMA*_Int instructions are defined specially for being used when
|
|
|
|
// the scalar FMA intrinsics are lowered to machine instructions, and in that
|
2015-11-25 23:33:36 +08:00
|
|
|
// sense, they are similar to existing ADD*_Int, SUB*_Int, MUL*_Int, etc.
|
2015-11-05 02:10:41 +08:00
|
|
|
// instructions.
|
|
|
|
//
|
2015-11-13 08:07:35 +08:00
|
|
|
// All of the FMA*_Int opcodes are defined as commutable here.
|
2015-11-07 03:47:25 +08:00
|
|
|
// Commuting the 2nd and 3rd source register operands of FMAs is quite trivial
|
2015-11-13 08:07:35 +08:00
|
|
|
// and the corresponding optimizations have been developed.
|
2015-11-07 03:47:25 +08:00
|
|
|
// Commuting the 1st operand of FMA*_Int requires some additional analysis,
|
|
|
|
// the commute optimization is legal only if all users of FMA*_Int use only
|
2015-11-13 08:07:35 +08:00
|
|
|
// the lowest element of the FMA*_Int instruction. Even though such analysis
|
2015-11-26 15:45:30 +08:00
|
|
|
// may be not implemented yet we allow the routines doing the actual commute
|
2015-11-13 08:07:35 +08:00
|
|
|
// transformation to decide if one or another instruction is commutable or not.
|
|
|
|
let Constraints = "$src1 = $dst", isCommutable = 1, isCodeGenOnly = 1,
|
2015-11-07 03:47:25 +08:00
|
|
|
hasSideEffects = 0 in
|
2015-11-05 02:10:41 +08:00
|
|
|
multiclass fma3s_rm_int<bits<8> opc, string OpcodeStr,
|
|
|
|
Operand memopr, RegisterClass RC> {
|
|
|
|
def r_Int : FMA3<opc, MRMSrcReg, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, RC:$src2, RC:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
|
|
|
[]>;
|
|
|
|
|
|
|
|
let mayLoad = 1 in
|
|
|
|
def m_Int : FMA3<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, RC:$src2, memopr:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
|
|
|
[]>;
|
|
|
|
}
|
2012-05-31 17:20:20 +08:00
|
|
|
|
2015-11-05 02:10:41 +08:00
|
|
|
// The FMA 213 form is created for lowering of scalar FMA intrinscis
|
|
|
|
// to machine instructions.
|
|
|
|
// The FMA 132 form can trivially be get by commuting the 2nd and 3rd operands
|
|
|
|
// of FMA 213 form.
|
|
|
|
// The FMA 231 form can be get only by commuting the 1st operand of 213 or 132
|
|
|
|
// forms and is possible only after special analysis of all uses of the initial
|
|
|
|
// instruction. Such analysis do not exist yet and thus introducing the 231
|
|
|
|
// form of FMA*_Int instructions is done using an optimistic assumption that
|
|
|
|
// such analysis will be implemented eventually.
|
|
|
|
multiclass fma3s_int_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231,
|
2016-07-24 16:26:38 +08:00
|
|
|
string OpStr, string PackTy, string Suff,
|
2015-11-05 02:10:41 +08:00
|
|
|
RegisterClass RC, Operand memop> {
|
2016-07-24 16:26:38 +08:00
|
|
|
defm NAME#132#Suff : fma3s_rm_int<opc132, !strconcat(OpStr, "132", PackTy),
|
|
|
|
memop, RC>;
|
|
|
|
defm NAME#213#Suff : fma3s_rm_int<opc213, !strconcat(OpStr, "213", PackTy),
|
|
|
|
memop, RC>;
|
|
|
|
defm NAME#231#Suff : fma3s_rm_int<opc231, !strconcat(OpStr, "231", PackTy),
|
|
|
|
memop, RC>;
|
2012-08-21 15:11:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
multiclass fma3s<bits<8> opc132, bits<8> opc213, bits<8> opc231,
|
2017-11-06 13:48:26 +08:00
|
|
|
string OpStr, SDNode OpNodeIntrin, SDNode OpNode> {
|
2015-12-09 08:12:13 +08:00
|
|
|
let ExeDomain = SSEPackedSingle in
|
2016-07-24 16:26:38 +08:00
|
|
|
defm NAME : fma3s_forms<opc132, opc213, opc231, OpStr, "ss", "SS", OpNode,
|
|
|
|
FR32, f32mem>,
|
|
|
|
fma3s_int_forms<opc132, opc213, opc231, OpStr, "ss", "SS",
|
|
|
|
VR128, ssmem>;
|
2015-12-09 08:12:13 +08:00
|
|
|
|
|
|
|
let ExeDomain = SSEPackedDouble in
|
2016-07-24 16:26:38 +08:00
|
|
|
defm NAME : fma3s_forms<opc132, opc213, opc231, OpStr, "sd", "SD", OpNode,
|
2015-11-05 02:10:41 +08:00
|
|
|
FR64, f64mem>,
|
2016-07-24 16:26:38 +08:00
|
|
|
fma3s_int_forms<opc132, opc213, opc231, OpStr, "sd", "SD",
|
|
|
|
VR128, sdmem>, VEX_W;
|
2015-11-05 02:10:41 +08:00
|
|
|
|
|
|
|
// These patterns use the 123 ordering, instead of 213, even though
|
|
|
|
// they match the intrinsic to the 213 version of the instruction.
|
|
|
|
// This is because src1 is tied to dest, and the scalar intrinsics
|
|
|
|
// require the pass-through values to come from the first source
|
|
|
|
// operand, not the second.
|
2017-11-06 13:48:26 +08:00
|
|
|
let Predicates = [HasFMA, NoAVX512] in {
|
|
|
|
def : Pat<(v4f32 (OpNodeIntrin VR128:$src1, VR128:$src2, VR128:$src3)),
|
2017-09-01 15:58:11 +08:00
|
|
|
(!cast<Instruction>(NAME#"213SSr_Int")
|
|
|
|
VR128:$src1, VR128:$src2, VR128:$src3)>;
|
2014-02-01 05:29:19 +08:00
|
|
|
|
2017-11-06 13:48:26 +08:00
|
|
|
def : Pat<(v2f64 (OpNodeIntrin VR128:$src1, VR128:$src2, VR128:$src3)),
|
2017-09-01 15:58:11 +08:00
|
|
|
(!cast<Instruction>(NAME#"213SDr_Int")
|
|
|
|
VR128:$src1, VR128:$src2, VR128:$src3)>;
|
2017-09-01 15:58:13 +08:00
|
|
|
|
2017-11-06 13:48:26 +08:00
|
|
|
def : Pat<(v4f32 (OpNodeIntrin VR128:$src1, VR128:$src2,
|
|
|
|
sse_load_f32:$src3)),
|
2017-09-01 15:58:13 +08:00
|
|
|
(!cast<Instruction>(NAME#"213SSm_Int")
|
|
|
|
VR128:$src1, VR128:$src2, sse_load_f32:$src3)>;
|
|
|
|
|
2017-11-06 13:48:26 +08:00
|
|
|
def : Pat<(v2f64 (OpNodeIntrin VR128:$src1, VR128:$src2,
|
|
|
|
sse_load_f64:$src3)),
|
2017-09-01 15:58:13 +08:00
|
|
|
(!cast<Instruction>(NAME#"213SDm_Int")
|
|
|
|
VR128:$src1, VR128:$src2, sse_load_f64:$src3)>;
|
|
|
|
|
2017-11-06 13:48:26 +08:00
|
|
|
def : Pat<(v4f32 (OpNodeIntrin VR128:$src1, sse_load_f32:$src3,
|
|
|
|
VR128:$src2)),
|
2017-09-01 15:58:13 +08:00
|
|
|
(!cast<Instruction>(NAME#"132SSm_Int")
|
|
|
|
VR128:$src1, VR128:$src2, sse_load_f32:$src3)>;
|
|
|
|
|
2017-11-06 13:48:26 +08:00
|
|
|
def : Pat<(v2f64 (OpNodeIntrin VR128:$src1, sse_load_f64:$src3,
|
|
|
|
VR128:$src2)),
|
2017-09-01 15:58:13 +08:00
|
|
|
(!cast<Instruction>(NAME#"132SDm_Int")
|
|
|
|
VR128:$src1, VR128:$src2, sse_load_f64:$src3)>;
|
2016-11-28 05:37:02 +08:00
|
|
|
}
|
2011-12-30 04:43:40 +08:00
|
|
|
}
|
|
|
|
|
2017-11-06 13:48:26 +08:00
|
|
|
defm VFMADD : fma3s<0x99, 0xA9, 0xB9, "vfmadd", X86Fmadds1, X86Fmadd>, VEX_LIG;
|
|
|
|
defm VFMSUB : fma3s<0x9B, 0xAB, 0xBB, "vfmsub", X86Fmsubs1, X86Fmsub>, VEX_LIG;
|
2012-06-03 09:40:43 +08:00
|
|
|
|
2017-11-06 13:48:26 +08:00
|
|
|
defm VFNMADD : fma3s<0x9D, 0xAD, 0xBD, "vfnmadd", X86Fnmadds1, X86Fnmadd>,
|
|
|
|
VEX_LIG;
|
|
|
|
defm VFNMSUB : fma3s<0x9F, 0xAF, 0xBF, "vfnmsub", X86Fnmsubs1, X86Fnmsub>,
|
|
|
|
VEX_LIG;
|
2012-05-31 17:20:20 +08:00
|
|
|
|
|
|
|
|
2011-11-26 03:33:42 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// FMA4 - AMD 4 operand Fused Multiply-Add instructions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
|
2012-08-31 23:40:30 +08:00
|
|
|
multiclass fma4s<bits<8> opc, string OpcodeStr, RegisterClass RC,
|
|
|
|
X86MemOperand x86memop, ValueType OpVT, SDNode OpNode,
|
|
|
|
PatFrag mem_frag> {
|
2012-09-01 07:10:34 +08:00
|
|
|
let isCommutable = 1 in
|
2016-08-22 15:38:45 +08:00
|
|
|
def rr : FMA4<opc, MRMSrcRegOp4, (outs RC:$dst),
|
2012-08-31 23:40:30 +08:00
|
|
|
(ins RC:$src1, RC:$src2, RC:$src3),
|
2011-11-26 03:33:42 +08:00
|
|
|
!strconcat(OpcodeStr,
|
2011-12-08 22:43:19 +08:00
|
|
|
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
|
2012-08-31 23:40:30 +08:00
|
|
|
[(set RC:$dst,
|
2016-08-22 15:38:45 +08:00
|
|
|
(OpVT (OpNode RC:$src1, RC:$src2, RC:$src3)))]>, VEX_W, VEX_LIG;
|
|
|
|
def rm : FMA4<opc, MRMSrcMemOp4, (outs RC:$dst),
|
2012-08-31 23:40:30 +08:00
|
|
|
(ins RC:$src1, RC:$src2, x86memop:$src3),
|
2011-11-26 03:33:42 +08:00
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
|
2012-08-31 23:40:30 +08:00
|
|
|
[(set RC:$dst, (OpNode RC:$src1, RC:$src2,
|
2016-08-22 15:38:45 +08:00
|
|
|
(mem_frag addr:$src3)))]>, VEX_W, VEX_LIG;
|
2012-08-31 23:40:30 +08:00
|
|
|
def mr : FMA4<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, x86memop:$src2, RC:$src3),
|
2011-11-26 03:33:42 +08:00
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
|
2012-08-31 23:40:30 +08:00
|
|
|
[(set RC:$dst,
|
Various x86 disassembler fixes.
Add VEX_LIG to scalar FMA4 instructions.
Use VEX_LIG in some of the inheriting checks in disassembler table generator.
Make use of VEX_L_W, VEX_L_W_XS, VEX_L_W_XD contexts.
Don't let VEX_L_W, VEX_L_W_XS, VEX_L_W_XD, VEX_L_W_OPSIZE inherit from their non-L forms unless VEX_LIG is set.
Let VEX_L_W, VEX_L_W_XS, VEX_L_W_XD, VEX_L_W_OPSIZE inherit from all of their non-L or non-W cases.
Increase ranking on VEX_L_W, VEX_L_W_XS, VEX_L_W_XD, VEX_L_W_OPSIZE so they get chosen over non-L/non-W forms.
llvm-svn: 191649
2013-09-30 10:46:36 +08:00
|
|
|
(OpNode RC:$src1, (mem_frag addr:$src2), RC:$src3))]>, VEX_LIG;
|
2011-12-30 13:20:36 +08:00
|
|
|
// For disassembler
|
2014-01-05 12:17:28 +08:00
|
|
|
let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in
|
2012-08-31 23:40:30 +08:00
|
|
|
def rr_REV : FMA4<opc, MRMSrcReg, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, RC:$src2, RC:$src3),
|
2011-12-30 13:20:36 +08:00
|
|
|
!strconcat(OpcodeStr,
|
Various x86 disassembler fixes.
Add VEX_LIG to scalar FMA4 instructions.
Use VEX_LIG in some of the inheriting checks in disassembler table generator.
Make use of VEX_L_W, VEX_L_W_XS, VEX_L_W_XD contexts.
Don't let VEX_L_W, VEX_L_W_XS, VEX_L_W_XD, VEX_L_W_OPSIZE inherit from their non-L forms unless VEX_LIG is set.
Let VEX_L_W, VEX_L_W_XS, VEX_L_W_XD, VEX_L_W_OPSIZE inherit from all of their non-L or non-W cases.
Increase ranking on VEX_L_W, VEX_L_W_XS, VEX_L_W_XD, VEX_L_W_OPSIZE so they get chosen over non-L/non-W forms.
llvm-svn: 191649
2013-09-30 10:46:36 +08:00
|
|
|
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>,
|
[X86] Adding FoldGenRegForm helper field (for memory folding tables tableGen backend) to X86Inst class and set its value for the relevant instructions.
Some register-register instructions can be encoded in 2 different ways, this happens when 2 register operands can be folded (separately).
For example if we look at the MOV8rr and MOV8rr_REV, both instructions perform exactly the same operation, but are encoded differently. Here is the relevant information about these instructions from Intel's 64-ia-32-architectures-software-developer-manual:
Opcode Instruction Op/En 64-Bit Mode Compat/Leg Mode Description
8A /r MOV r8,r/m8 RM Valid Valid Move r/m8 to r8.
88 /r MOV r/m8,r8 MR Valid Valid Move r8 to r/m8.
Here we can see that in order to enable the folding of the output and input registers, we had to define 2 "encodings", and as a result we got 2 move 8-bit register-register instructions.
In the X86 backend, we define both of these instructions, usually one has a regular name (MOV8rr) while the other has "_REV" suffix (MOV8rr_REV), must be marked with isCodeGenOnly flag and is not emitted from CodeGen.
Automatically generating the memory folding tables relies on matching encodings of instructions, but in these cases where we want to map both memory forms of the mov 8-bit (MOV8rm & MOV8mr) to MOV8rr (not to MOV8rr_REV) we have to somehow point from the MOV8rr_REV to the "regular" appropriate instruction which in this case is MOV8rr.
This field enable this "pointing" mechanism - which is used in the TableGen backend for generating memory folding tables.
Differential Revision: https://reviews.llvm.org/D32683
llvm-svn: 304087
2017-05-28 20:39:37 +08:00
|
|
|
VEX_LIG, FoldGenData<NAME#rr>;
|
2011-11-26 03:33:42 +08:00
|
|
|
}
|
|
|
|
|
2012-08-31 23:40:30 +08:00
|
|
|
multiclass fma4s_int<bits<8> opc, string OpcodeStr, Operand memop,
|
2017-11-06 13:48:26 +08:00
|
|
|
ValueType VT, ComplexPattern mem_cpat, SDNode OpNode> {
|
2014-01-03 01:28:14 +08:00
|
|
|
let isCodeGenOnly = 1 in {
|
2016-08-22 15:38:45 +08:00
|
|
|
def rr_Int : FMA4<opc, MRMSrcRegOp4, (outs VR128:$dst),
|
2012-08-31 23:40:30 +08:00
|
|
|
(ins VR128:$src1, VR128:$src2, VR128:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
|
|
|
|
[(set VR128:$dst,
|
2017-11-06 13:48:26 +08:00
|
|
|
(VT (OpNode VR128:$src1, VR128:$src2, VR128:$src3)))]>, VEX_W,
|
|
|
|
VEX_LIG;
|
2016-08-22 15:38:45 +08:00
|
|
|
def rm_Int : FMA4<opc, MRMSrcMemOp4, (outs VR128:$dst),
|
2012-08-31 23:40:30 +08:00
|
|
|
(ins VR128:$src1, VR128:$src2, memop:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
|
2017-11-06 13:48:26 +08:00
|
|
|
[(set VR128:$dst, (VT (OpNode VR128:$src1, VR128:$src2,
|
|
|
|
mem_cpat:$src3)))]>, VEX_W, VEX_LIG;
|
2012-08-31 23:40:30 +08:00
|
|
|
def mr_Int : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, memop:$src2, VR128:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
|
|
|
|
[(set VR128:$dst,
|
2017-11-06 13:48:26 +08:00
|
|
|
(VT (OpNode VR128:$src1, mem_cpat:$src2, VR128:$src3)))]>,
|
|
|
|
VEX_LIG;
|
[X86] Adding FoldGenRegForm helper field (for memory folding tables tableGen backend) to X86Inst class and set its value for the relevant instructions.
Some register-register instructions can be encoded in 2 different ways, this happens when 2 register operands can be folded (separately).
For example if we look at the MOV8rr and MOV8rr_REV, both instructions perform exactly the same operation, but are encoded differently. Here is the relevant information about these instructions from Intel's 64-ia-32-architectures-software-developer-manual:
Opcode Instruction Op/En 64-Bit Mode Compat/Leg Mode Description
8A /r MOV r8,r/m8 RM Valid Valid Move r/m8 to r8.
88 /r MOV r/m8,r8 MR Valid Valid Move r8 to r/m8.
Here we can see that in order to enable the folding of the output and input registers, we had to define 2 "encodings", and as a result we got 2 move 8-bit register-register instructions.
In the X86 backend, we define both of these instructions, usually one has a regular name (MOV8rr) while the other has "_REV" suffix (MOV8rr_REV), must be marked with isCodeGenOnly flag and is not emitted from CodeGen.
Automatically generating the memory folding tables relies on matching encodings of instructions, but in these cases where we want to map both memory forms of the mov 8-bit (MOV8rm & MOV8mr) to MOV8rr (not to MOV8rr_REV) we have to somehow point from the MOV8rr_REV to the "regular" appropriate instruction which in this case is MOV8rr.
This field enable this "pointing" mechanism - which is used in the TableGen backend for generating memory folding tables.
Differential Revision: https://reviews.llvm.org/D32683
llvm-svn: 304087
2017-05-28 20:39:37 +08:00
|
|
|
let hasSideEffects = 0 in
|
|
|
|
def rr_Int_REV : FMA4<opc, MRMSrcReg, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, VR128:$src2, VR128:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
|
|
|
|
[]>, VEX_LIG, FoldGenData<NAME#rr_Int>;
|
2014-01-03 01:28:14 +08:00
|
|
|
} // isCodeGenOnly = 1
|
2012-08-31 23:40:30 +08:00
|
|
|
}
|
|
|
|
|
2012-08-29 15:18:25 +08:00
|
|
|
multiclass fma4p<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
|
|
|
ValueType OpVT128, ValueType OpVT256,
|
2011-12-30 11:17:15 +08:00
|
|
|
PatFrag ld_frag128, PatFrag ld_frag256> {
|
2012-09-01 07:10:34 +08:00
|
|
|
let isCommutable = 1 in
|
2016-08-22 15:38:45 +08:00
|
|
|
def rr : FMA4<opc, MRMSrcRegOp4, (outs VR128:$dst),
|
2011-12-01 06:09:42 +08:00
|
|
|
(ins VR128:$src1, VR128:$src2, VR128:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
2011-12-08 22:43:19 +08:00
|
|
|
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
|
2011-12-30 11:17:15 +08:00
|
|
|
[(set VR128:$dst,
|
2012-08-29 15:18:25 +08:00
|
|
|
(OpVT128 (OpNode VR128:$src1, VR128:$src2, VR128:$src3)))]>,
|
2016-08-22 15:38:45 +08:00
|
|
|
VEX_W;
|
|
|
|
def rm : FMA4<opc, MRMSrcMemOp4, (outs VR128:$dst),
|
2011-12-01 06:09:42 +08:00
|
|
|
(ins VR128:$src1, VR128:$src2, f128mem:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
|
2012-08-29 15:18:25 +08:00
|
|
|
[(set VR128:$dst, (OpNode VR128:$src1, VR128:$src2,
|
2016-08-22 15:38:45 +08:00
|
|
|
(ld_frag128 addr:$src3)))]>, VEX_W;
|
2011-12-01 06:09:42 +08:00
|
|
|
def mr : FMA4<opc, MRMSrcMem, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, f128mem:$src2, VR128:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
|
2011-12-30 11:17:15 +08:00
|
|
|
[(set VR128:$dst,
|
2012-08-29 15:18:25 +08:00
|
|
|
(OpNode VR128:$src1, (ld_frag128 addr:$src2), VR128:$src3))]>;
|
2012-09-01 07:10:34 +08:00
|
|
|
let isCommutable = 1 in
|
2016-08-22 15:38:45 +08:00
|
|
|
def Yrr : FMA4<opc, MRMSrcRegOp4, (outs VR256:$dst),
|
2011-12-01 06:09:42 +08:00
|
|
|
(ins VR256:$src1, VR256:$src2, VR256:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
2011-12-08 22:43:19 +08:00
|
|
|
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
|
2011-12-30 11:17:15 +08:00
|
|
|
[(set VR256:$dst,
|
2012-08-29 15:18:25 +08:00
|
|
|
(OpVT256 (OpNode VR256:$src1, VR256:$src2, VR256:$src3)))]>,
|
2016-08-22 15:38:45 +08:00
|
|
|
VEX_W, VEX_L;
|
|
|
|
def Yrm : FMA4<opc, MRMSrcMemOp4, (outs VR256:$dst),
|
2011-12-01 06:09:42 +08:00
|
|
|
(ins VR256:$src1, VR256:$src2, f256mem:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
|
2012-08-29 15:18:25 +08:00
|
|
|
[(set VR256:$dst, (OpNode VR256:$src1, VR256:$src2,
|
2016-08-22 15:38:45 +08:00
|
|
|
(ld_frag256 addr:$src3)))]>, VEX_W, VEX_L;
|
2016-07-24 16:26:38 +08:00
|
|
|
def Ymr : FMA4<opc, MRMSrcMem, (outs VR256:$dst),
|
2011-12-01 06:09:42 +08:00
|
|
|
(ins VR256:$src1, f256mem:$src2, VR256:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
|
2012-09-19 14:06:34 +08:00
|
|
|
[(set VR256:$dst, (OpNode VR256:$src1,
|
|
|
|
(ld_frag256 addr:$src2), VR256:$src3))]>, VEX_L;
|
2011-12-30 13:20:36 +08:00
|
|
|
// For disassembler
|
2014-01-05 12:17:28 +08:00
|
|
|
let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
|
2011-12-30 13:20:36 +08:00
|
|
|
def rr_REV : FMA4<opc, MRMSrcReg, (outs VR128:$dst),
|
|
|
|
(ins VR128:$src1, VR128:$src2, VR128:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
[X86] Adding FoldGenRegForm helper field (for memory folding tables tableGen backend) to X86Inst class and set its value for the relevant instructions.
Some register-register instructions can be encoded in 2 different ways, this happens when 2 register operands can be folded (separately).
For example if we look at the MOV8rr and MOV8rr_REV, both instructions perform exactly the same operation, but are encoded differently. Here is the relevant information about these instructions from Intel's 64-ia-32-architectures-software-developer-manual:
Opcode Instruction Op/En 64-Bit Mode Compat/Leg Mode Description
8A /r MOV r8,r/m8 RM Valid Valid Move r/m8 to r8.
88 /r MOV r/m8,r8 MR Valid Valid Move r8 to r/m8.
Here we can see that in order to enable the folding of the output and input registers, we had to define 2 "encodings", and as a result we got 2 move 8-bit register-register instructions.
In the X86 backend, we define both of these instructions, usually one has a regular name (MOV8rr) while the other has "_REV" suffix (MOV8rr_REV), must be marked with isCodeGenOnly flag and is not emitted from CodeGen.
Automatically generating the memory folding tables relies on matching encodings of instructions, but in these cases where we want to map both memory forms of the mov 8-bit (MOV8rm & MOV8mr) to MOV8rr (not to MOV8rr_REV) we have to somehow point from the MOV8rr_REV to the "regular" appropriate instruction which in this case is MOV8rr.
This field enable this "pointing" mechanism - which is used in the TableGen backend for generating memory folding tables.
Differential Revision: https://reviews.llvm.org/D32683
llvm-svn: 304087
2017-05-28 20:39:37 +08:00
|
|
|
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>,
|
|
|
|
FoldGenData<NAME#rr>;
|
2016-07-24 16:26:38 +08:00
|
|
|
def Yrr_REV : FMA4<opc, MRMSrcReg, (outs VR256:$dst),
|
2011-12-30 13:20:36 +08:00
|
|
|
(ins VR256:$src1, VR256:$src2, VR256:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
2012-09-19 14:06:34 +08:00
|
|
|
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), []>,
|
[X86] Adding FoldGenRegForm helper field (for memory folding tables tableGen backend) to X86Inst class and set its value for the relevant instructions.
Some register-register instructions can be encoded in 2 different ways, this happens when 2 register operands can be folded (separately).
For example if we look at the MOV8rr and MOV8rr_REV, both instructions perform exactly the same operation, but are encoded differently. Here is the relevant information about these instructions from Intel's 64-ia-32-architectures-software-developer-manual:
Opcode Instruction Op/En 64-Bit Mode Compat/Leg Mode Description
8A /r MOV r8,r/m8 RM Valid Valid Move r/m8 to r8.
88 /r MOV r/m8,r8 MR Valid Valid Move r8 to r/m8.
Here we can see that in order to enable the folding of the output and input registers, we had to define 2 "encodings", and as a result we got 2 move 8-bit register-register instructions.
In the X86 backend, we define both of these instructions, usually one has a regular name (MOV8rr) while the other has "_REV" suffix (MOV8rr_REV), must be marked with isCodeGenOnly flag and is not emitted from CodeGen.
Automatically generating the memory folding tables relies on matching encodings of instructions, but in these cases where we want to map both memory forms of the mov 8-bit (MOV8rm & MOV8mr) to MOV8rr (not to MOV8rr_REV) we have to somehow point from the MOV8rr_REV to the "regular" appropriate instruction which in this case is MOV8rr.
This field enable this "pointing" mechanism - which is used in the TableGen backend for generating memory folding tables.
Differential Revision: https://reviews.llvm.org/D32683
llvm-svn: 304087
2017-05-28 20:39:37 +08:00
|
|
|
VEX_L, FoldGenData<NAME#Yrr>;
|
2011-12-30 13:20:36 +08:00
|
|
|
} // isCodeGenOnly = 1
|
2011-12-01 06:09:42 +08:00
|
|
|
}
|
|
|
|
|
2012-11-21 16:08:21 +08:00
|
|
|
let ExeDomain = SSEPackedSingle in {
|
2015-12-05 15:07:42 +08:00
|
|
|
// Scalar Instructions
|
2017-09-04 14:59:50 +08:00
|
|
|
defm VFMADDSS4 : fma4s<0x6A, "vfmaddss", FR32, f32mem, f32, X86Fmadd, loadf32>,
|
2017-11-06 13:48:26 +08:00
|
|
|
fma4s_int<0x6A, "vfmaddss", ssmem, v4f32, sse_load_f32,
|
|
|
|
X86Fmadds1>;
|
2015-12-05 15:07:42 +08:00
|
|
|
defm VFMSUBSS4 : fma4s<0x6E, "vfmsubss", FR32, f32mem, f32, X86Fmsub, loadf32>,
|
2017-11-06 13:48:26 +08:00
|
|
|
fma4s_int<0x6E, "vfmsubss", ssmem, v4f32, sse_load_f32,
|
|
|
|
X86Fmsubs1>;
|
2015-12-05 15:07:42 +08:00
|
|
|
defm VFNMADDSS4 : fma4s<0x7A, "vfnmaddss", FR32, f32mem, f32,
|
|
|
|
X86Fnmadd, loadf32>,
|
2017-11-06 13:48:26 +08:00
|
|
|
fma4s_int<0x7A, "vfnmaddss", ssmem, v4f32, sse_load_f32,
|
|
|
|
X86Fnmadds1>;
|
2015-12-05 15:07:42 +08:00
|
|
|
defm VFNMSUBSS4 : fma4s<0x7E, "vfnmsubss", FR32, f32mem, f32,
|
|
|
|
X86Fnmsub, loadf32>,
|
2017-11-06 13:48:26 +08:00
|
|
|
fma4s_int<0x7E, "vfnmsubss", ssmem, v4f32, sse_load_f32,
|
|
|
|
X86Fnmsubs1>;
|
2015-12-05 15:07:42 +08:00
|
|
|
// Packed Instructions
|
2017-09-04 14:59:50 +08:00
|
|
|
defm VFMADDPS4 : fma4p<0x68, "vfmaddps", X86Fmadd, v4f32, v8f32,
|
2013-09-21 13:58:59 +08:00
|
|
|
loadv4f32, loadv8f32>;
|
2012-11-21 16:08:21 +08:00
|
|
|
defm VFMSUBPS4 : fma4p<0x6C, "vfmsubps", X86Fmsub, v4f32, v8f32,
|
2013-09-21 13:58:59 +08:00
|
|
|
loadv4f32, loadv8f32>;
|
2012-11-21 16:08:21 +08:00
|
|
|
defm VFNMADDPS4 : fma4p<0x78, "vfnmaddps", X86Fnmadd, v4f32, v8f32,
|
2013-09-21 13:58:59 +08:00
|
|
|
loadv4f32, loadv8f32>;
|
2012-11-21 16:08:21 +08:00
|
|
|
defm VFNMSUBPS4 : fma4p<0x7C, "vfnmsubps", X86Fnmsub, v4f32, v8f32,
|
2013-09-21 13:58:59 +08:00
|
|
|
loadv4f32, loadv8f32>;
|
2012-11-21 16:08:21 +08:00
|
|
|
defm VFMADDSUBPS4 : fma4p<0x5C, "vfmaddsubps", X86Fmaddsub, v4f32, v8f32,
|
2013-09-21 13:58:59 +08:00
|
|
|
loadv4f32, loadv8f32>;
|
2012-11-21 16:08:21 +08:00
|
|
|
defm VFMSUBADDPS4 : fma4p<0x5E, "vfmsubaddps", X86Fmsubadd, v4f32, v8f32,
|
2013-09-21 13:58:59 +08:00
|
|
|
loadv4f32, loadv8f32>;
|
2012-11-21 16:08:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
let ExeDomain = SSEPackedDouble in {
|
2015-12-05 15:07:42 +08:00
|
|
|
// Scalar Instructions
|
2017-09-04 14:59:50 +08:00
|
|
|
defm VFMADDSD4 : fma4s<0x6B, "vfmaddsd", FR64, f64mem, f64, X86Fmadd, loadf64>,
|
2017-11-06 13:48:26 +08:00
|
|
|
fma4s_int<0x6B, "vfmaddsd", sdmem, v2f64, sse_load_f64,
|
|
|
|
X86Fmadds1>;
|
2015-12-05 15:07:42 +08:00
|
|
|
defm VFMSUBSD4 : fma4s<0x6F, "vfmsubsd", FR64, f64mem, f64, X86Fmsub, loadf64>,
|
2017-11-06 13:48:26 +08:00
|
|
|
fma4s_int<0x6F, "vfmsubsd", sdmem, v2f64, sse_load_f64,
|
|
|
|
X86Fmsubs1>;
|
2015-12-05 15:07:42 +08:00
|
|
|
defm VFNMADDSD4 : fma4s<0x7B, "vfnmaddsd", FR64, f64mem, f64,
|
|
|
|
X86Fnmadd, loadf64>,
|
2017-11-06 13:48:26 +08:00
|
|
|
fma4s_int<0x7B, "vfnmaddsd", sdmem, v2f64, sse_load_f64,
|
|
|
|
X86Fnmadds1>;
|
2015-12-05 15:07:42 +08:00
|
|
|
defm VFNMSUBSD4 : fma4s<0x7F, "vfnmsubsd", FR64, f64mem, f64,
|
|
|
|
X86Fnmsub, loadf64>,
|
2017-11-06 13:48:26 +08:00
|
|
|
fma4s_int<0x7F, "vfnmsubsd", sdmem, v2f64, sse_load_f64,
|
|
|
|
X86Fnmsubs1>;
|
2015-12-05 15:07:42 +08:00
|
|
|
// Packed Instructions
|
2017-09-04 14:59:50 +08:00
|
|
|
defm VFMADDPD4 : fma4p<0x69, "vfmaddpd", X86Fmadd, v2f64, v4f64,
|
2013-09-21 13:58:59 +08:00
|
|
|
loadv2f64, loadv4f64>;
|
2012-11-21 16:08:21 +08:00
|
|
|
defm VFMSUBPD4 : fma4p<0x6D, "vfmsubpd", X86Fmsub, v2f64, v4f64,
|
2013-09-21 13:58:59 +08:00
|
|
|
loadv2f64, loadv4f64>;
|
2012-11-21 16:08:21 +08:00
|
|
|
defm VFNMADDPD4 : fma4p<0x79, "vfnmaddpd", X86Fnmadd, v2f64, v4f64,
|
2013-09-21 13:58:59 +08:00
|
|
|
loadv2f64, loadv4f64>;
|
2012-11-21 16:08:21 +08:00
|
|
|
defm VFNMSUBPD4 : fma4p<0x7D, "vfnmsubpd", X86Fnmsub, v2f64, v4f64,
|
2013-09-21 13:58:59 +08:00
|
|
|
loadv2f64, loadv4f64>;
|
2012-11-21 16:08:21 +08:00
|
|
|
defm VFMADDSUBPD4 : fma4p<0x5D, "vfmaddsubpd", X86Fmaddsub, v2f64, v4f64,
|
2013-09-21 13:58:59 +08:00
|
|
|
loadv2f64, loadv4f64>;
|
2012-11-21 16:08:21 +08:00
|
|
|
defm VFMSUBADDPD4 : fma4p<0x5F, "vfmsubaddpd", X86Fmsubadd, v2f64, v4f64,
|
2013-09-21 13:58:59 +08:00
|
|
|
loadv2f64, loadv4f64>;
|
2012-11-21 16:08:21 +08:00
|
|
|
}
|
2012-05-31 17:20:20 +08:00
|
|
|
|