2014-08-22 03:50:07 +08:00
|
|
|
// Group template arguments that can be derived from the vector type (EltNum x
|
|
|
|
// EltVT). These are things like the register class for the writemask, etc.
|
|
|
|
// The idea is to pass one of these as the template argument rather than the
|
|
|
|
// individual arguments.
|
2014-10-16 07:42:09 +08:00
|
|
|
class X86VectorVTInfo<int numelts, ValueType EltVT, RegisterClass rc,
|
2014-08-22 03:50:07 +08:00
|
|
|
string suffix = ""> {
|
|
|
|
RegisterClass RC = rc;
|
2014-10-16 07:42:09 +08:00
|
|
|
int NumElts = numelts;
|
2014-08-22 03:50:07 +08:00
|
|
|
|
|
|
|
// Corresponding mask register class.
|
|
|
|
RegisterClass KRC = !cast<RegisterClass>("VK" # NumElts);
|
|
|
|
|
|
|
|
// Corresponding write-mask register class.
|
|
|
|
RegisterClass KRCWM = !cast<RegisterClass>("VK" # NumElts # "WM");
|
|
|
|
|
|
|
|
// The GPR register class that can hold the write mask. Use GR8 for fewer
|
|
|
|
// than 8 elements. Use shift-right and equal to work around the lack of
|
|
|
|
// !lt in tablegen.
|
|
|
|
RegisterClass MRC =
|
|
|
|
!cast<RegisterClass>("GR" #
|
|
|
|
!if (!eq (!srl(NumElts, 3), 0), 8, NumElts));
|
|
|
|
|
|
|
|
// Suffix used in the instruction mnemonic.
|
|
|
|
string Suffix = suffix;
|
|
|
|
|
2014-08-25 22:49:34 +08:00
|
|
|
string VTName = "v" # NumElts # EltVT;
|
|
|
|
|
2014-08-22 03:50:07 +08:00
|
|
|
// The vector VT.
|
2014-08-25 22:49:34 +08:00
|
|
|
ValueType VT = !cast<ValueType>(VTName);
|
2014-08-22 03:50:07 +08:00
|
|
|
|
|
|
|
string EltTypeName = !cast<string>(EltVT);
|
|
|
|
// Size of the element type in bits, e.g. 32 for v16i32.
|
2014-08-25 22:49:34 +08:00
|
|
|
string EltSizeName = !subst("i", "", !subst("f", "", EltTypeName));
|
|
|
|
int EltSize = EltVT.Size;
|
2014-08-22 03:50:07 +08:00
|
|
|
|
|
|
|
// "i" for integer types and "f" for floating-point types
|
2014-08-25 22:49:34 +08:00
|
|
|
string TypeVariantName = !subst(EltSizeName, "", EltTypeName);
|
2014-08-22 03:50:07 +08:00
|
|
|
|
|
|
|
// Size of RC in bits, e.g. 512 for VR512.
|
|
|
|
int Size = VT.Size;
|
|
|
|
|
|
|
|
// The corresponding memory operand, e.g. i512mem for VR512.
|
|
|
|
X86MemOperand MemOp = !cast<X86MemOperand>(TypeVariantName # Size # "mem");
|
2014-08-25 22:49:34 +08:00
|
|
|
X86MemOperand ScalarMemOp = !cast<X86MemOperand>(EltVT # "mem");
|
|
|
|
|
|
|
|
// Load patterns
|
|
|
|
// Note: For 128/256-bit integer VT we choose loadv2i64/loadv4i64
|
|
|
|
// due to load promotion during legalization
|
|
|
|
PatFrag LdFrag = !cast<PatFrag>("load" #
|
|
|
|
!if (!eq (TypeVariantName, "i"),
|
|
|
|
!if (!eq (Size, 128), "v2i64",
|
|
|
|
!if (!eq (Size, 256), "v4i64",
|
|
|
|
VTName)), VTName));
|
|
|
|
PatFrag ScalarLdFrag = !cast<PatFrag>("load" # EltVT);
|
2014-08-22 03:50:07 +08:00
|
|
|
|
2014-09-30 06:54:41 +08:00
|
|
|
// Load patterns used for memory operands. We only have this defined in
|
|
|
|
// case of i64 element types for sub-512 integer vectors. For now, keep
|
|
|
|
// MemOpFrag undefined in these cases.
|
|
|
|
PatFrag MemOpFrag =
|
|
|
|
!if (!eq (TypeVariantName, "f"), !cast<PatFrag>("memop" # VTName),
|
|
|
|
!if (!eq (EltTypeName, "i64"), !cast<PatFrag>("memop" # VTName),
|
|
|
|
!if (!eq (VTName, "v16i32"), !cast<PatFrag>("memop" # VTName), ?)));
|
|
|
|
|
2014-08-22 03:50:07 +08:00
|
|
|
// The corresponding float type, e.g. v16f32 for v16i32
|
2014-08-25 22:49:34 +08:00
|
|
|
// Note: For EltSize < 32, FloatVT is illegal and TableGen
|
|
|
|
// fails to compile, so we choose FloatVT = VT
|
|
|
|
ValueType FloatVT = !cast<ValueType>(
|
|
|
|
!if (!eq (!srl(EltSize,5),0),
|
|
|
|
VTName,
|
|
|
|
!if (!eq(TypeVariantName, "i"),
|
|
|
|
"v" # NumElts # "f" # EltSize,
|
|
|
|
VTName)));
|
2014-08-22 03:50:07 +08:00
|
|
|
|
|
|
|
// The string to specify embedded broadcast in assembly.
|
|
|
|
string BroadcastStr = "{1to" # NumElts # "}";
|
2014-09-26 07:48:45 +08:00
|
|
|
|
2014-10-16 07:42:09 +08:00
|
|
|
// 8-bit compressed displacement tuple/subvector format. This is only
|
|
|
|
// defined for NumElts <= 8.
|
|
|
|
CD8VForm CD8TupleForm = !if (!eq (!srl(NumElts, 4), 0),
|
|
|
|
!cast<CD8VForm>("CD8VT" # NumElts), ?);
|
|
|
|
|
2014-09-26 07:48:45 +08:00
|
|
|
SubRegIndex SubRegIdx = !if (!eq (Size, 128), sub_xmm,
|
|
|
|
!if (!eq (Size, 256), sub_ymm, ?));
|
|
|
|
|
|
|
|
Domain ExeDomain = !if (!eq (EltTypeName, "f32"), SSEPackedSingle,
|
|
|
|
!if (!eq (EltTypeName, "f64"), SSEPackedDouble,
|
|
|
|
SSEPackedInt));
|
2014-10-09 07:25:31 +08:00
|
|
|
|
|
|
|
// A vector type of the same width with element type i32. This is used to
|
|
|
|
// create the canonical constant zero node ImmAllZerosV.
|
|
|
|
ValueType i32VT = !cast<ValueType>("v" # !srl(Size, 5) # "i32");
|
|
|
|
dag ImmAllZerosV = (VT (bitconvert (i32VT immAllZerosV)));
|
2014-08-22 03:50:07 +08:00
|
|
|
}
|
|
|
|
|
2014-08-25 22:49:34 +08:00
|
|
|
def v64i8_info : X86VectorVTInfo<64, i8, VR512, "b">;
|
|
|
|
def v32i16_info : X86VectorVTInfo<32, i16, VR512, "w">;
|
2014-08-22 03:50:07 +08:00
|
|
|
def v16i32_info : X86VectorVTInfo<16, i32, VR512, "d">;
|
|
|
|
def v8i64_info : X86VectorVTInfo<8, i64, VR512, "q">;
|
2014-09-30 06:54:41 +08:00
|
|
|
def v16f32_info : X86VectorVTInfo<16, f32, VR512, "ps">;
|
|
|
|
def v8f64_info : X86VectorVTInfo<8, f64, VR512, "pd">;
|
2014-08-22 03:50:07 +08:00
|
|
|
|
2014-08-25 22:49:34 +08:00
|
|
|
// "x" in v32i8x_info means RC = VR256X
|
|
|
|
def v32i8x_info : X86VectorVTInfo<32, i8, VR256X, "b">;
|
|
|
|
def v16i16x_info : X86VectorVTInfo<16, i16, VR256X, "w">;
|
|
|
|
def v8i32x_info : X86VectorVTInfo<8, i32, VR256X, "d">;
|
|
|
|
def v4i64x_info : X86VectorVTInfo<4, i64, VR256X, "q">;
|
2014-10-29 00:37:13 +08:00
|
|
|
def v8f32x_info : X86VectorVTInfo<8, f32, VR256X, "ps">;
|
|
|
|
def v4f64x_info : X86VectorVTInfo<4, f64, VR256X, "pd">;
|
2014-08-25 22:49:34 +08:00
|
|
|
|
|
|
|
def v16i8x_info : X86VectorVTInfo<16, i8, VR128X, "b">;
|
|
|
|
def v8i16x_info : X86VectorVTInfo<8, i16, VR128X, "w">;
|
|
|
|
def v4i32x_info : X86VectorVTInfo<4, i32, VR128X, "d">;
|
|
|
|
def v2i64x_info : X86VectorVTInfo<2, i64, VR128X, "q">;
|
2014-10-29 00:37:13 +08:00
|
|
|
def v4f32x_info : X86VectorVTInfo<4, f32, VR128X, "ps">;
|
|
|
|
def v2f64x_info : X86VectorVTInfo<2, f64, VR128X, "pd">;
|
2014-08-25 22:49:34 +08:00
|
|
|
|
|
|
|
class AVX512VLVectorVTInfo<X86VectorVTInfo i512, X86VectorVTInfo i256,
|
|
|
|
X86VectorVTInfo i128> {
|
|
|
|
X86VectorVTInfo info512 = i512;
|
|
|
|
X86VectorVTInfo info256 = i256;
|
|
|
|
X86VectorVTInfo info128 = i128;
|
|
|
|
}
|
|
|
|
|
|
|
|
def avx512vl_i8_info : AVX512VLVectorVTInfo<v64i8_info, v32i8x_info,
|
|
|
|
v16i8x_info>;
|
|
|
|
def avx512vl_i16_info : AVX512VLVectorVTInfo<v32i16_info, v16i16x_info,
|
|
|
|
v8i16x_info>;
|
|
|
|
def avx512vl_i32_info : AVX512VLVectorVTInfo<v16i32_info, v8i32x_info,
|
|
|
|
v4i32x_info>;
|
|
|
|
def avx512vl_i64_info : AVX512VLVectorVTInfo<v8i64_info, v4i64x_info,
|
|
|
|
v2i64x_info>;
|
2014-10-30 22:21:47 +08:00
|
|
|
def avx512vl_f32_info : AVX512VLVectorVTInfo<v16f32_info, v8f32x_info,
|
|
|
|
v4f32x_info>;
|
|
|
|
def avx512vl_f64_info : AVX512VLVectorVTInfo<v8f64_info, v4f64x_info,
|
|
|
|
v2f64x_info>;
|
2014-08-25 22:49:34 +08:00
|
|
|
|
2014-10-09 07:25:23 +08:00
|
|
|
// This multiclass generates the masking variants from the non-masking
|
|
|
|
// variant. It only provides the assembly pieces for the masking variants.
|
|
|
|
// It assumes custom ISel patterns for masking which can be provided as
|
|
|
|
// template arguments.
|
2014-10-09 07:25:39 +08:00
|
|
|
multiclass AVX512_maskable_custom<bits<8> O, Format F,
|
|
|
|
dag Outs,
|
|
|
|
dag Ins, dag MaskingIns, dag ZeroMaskingIns,
|
|
|
|
string OpcodeStr,
|
|
|
|
string AttSrcAsm, string IntelSrcAsm,
|
|
|
|
list<dag> Pattern,
|
|
|
|
list<dag> MaskingPattern,
|
|
|
|
list<dag> ZeroMaskingPattern,
|
2014-11-12 15:31:03 +08:00
|
|
|
string Round = "",
|
2014-10-09 07:25:39 +08:00
|
|
|
string MaskingConstraint = "",
|
|
|
|
InstrItinClass itin = NoItinerary,
|
|
|
|
bit IsCommutable = 0> {
|
2014-10-08 22:37:45 +08:00
|
|
|
let isCommutable = IsCommutable in
|
|
|
|
def NAME: AVX512<O, F, Outs, Ins,
|
2014-11-12 15:31:03 +08:00
|
|
|
OpcodeStr#"\t{"#AttSrcAsm#", $dst "#Round#"|"#
|
|
|
|
"$dst "#Round#", "#IntelSrcAsm#"}",
|
2014-10-09 07:25:23 +08:00
|
|
|
Pattern, itin>;
|
2014-08-08 01:53:55 +08:00
|
|
|
|
2014-08-08 07:18:18 +08:00
|
|
|
// Prefer over VMOV*rrk Pat<>
|
2014-08-15 01:13:19 +08:00
|
|
|
let AddedComplexity = 20 in
|
|
|
|
def NAME#k: AVX512<O, F, Outs, MaskingIns,
|
2014-11-12 15:31:03 +08:00
|
|
|
OpcodeStr#"\t{"#AttSrcAsm#", $dst {${mask}}"#Round#"|"#
|
|
|
|
"$dst {${mask}}"#Round#", "#IntelSrcAsm#"}",
|
2014-10-09 07:25:23 +08:00
|
|
|
MaskingPattern, itin>,
|
2014-08-15 01:13:19 +08:00
|
|
|
EVEX_K {
|
|
|
|
// In case of the 3src subclass this is overridden with a let.
|
|
|
|
string Constraints = MaskingConstraint;
|
|
|
|
}
|
2014-08-08 07:53:38 +08:00
|
|
|
let AddedComplexity = 30 in // Prefer over VMOV*rrkz Pat<>
|
2014-08-15 01:13:19 +08:00
|
|
|
def NAME#kz: AVX512<O, F, Outs, ZeroMaskingIns,
|
2014-11-12 15:31:03 +08:00
|
|
|
OpcodeStr#"\t{"#AttSrcAsm#", $dst {${mask}} {z}"#Round#"|"#
|
|
|
|
"$dst {${mask}} {z}"#Round#", "#IntelSrcAsm#"}",
|
2014-10-09 07:25:23 +08:00
|
|
|
ZeroMaskingPattern,
|
2014-10-08 22:37:45 +08:00
|
|
|
itin>,
|
2014-08-08 07:53:38 +08:00
|
|
|
EVEX_KZ;
|
2014-08-08 01:53:55 +08:00
|
|
|
}
|
|
|
|
|
2014-10-09 07:25:23 +08:00
|
|
|
|
2014-10-09 07:25:39 +08:00
|
|
|
// Common base class of AVX512_maskable and AVX512_maskable_3src.
|
|
|
|
multiclass AVX512_maskable_common<bits<8> O, Format F, X86VectorVTInfo _,
|
|
|
|
dag Outs,
|
|
|
|
dag Ins, dag MaskingIns, dag ZeroMaskingIns,
|
|
|
|
string OpcodeStr,
|
|
|
|
string AttSrcAsm, string IntelSrcAsm,
|
|
|
|
dag RHS, dag MaskingRHS,
|
2014-11-12 15:31:03 +08:00
|
|
|
string Round = "",
|
2014-10-09 07:25:39 +08:00
|
|
|
string MaskingConstraint = "",
|
|
|
|
InstrItinClass itin = NoItinerary,
|
|
|
|
bit IsCommutable = 0> :
|
|
|
|
AVX512_maskable_custom<O, F, Outs, Ins, MaskingIns, ZeroMaskingIns, OpcodeStr,
|
|
|
|
AttSrcAsm, IntelSrcAsm,
|
|
|
|
[(set _.RC:$dst, RHS)],
|
|
|
|
[(set _.RC:$dst, MaskingRHS)],
|
|
|
|
[(set _.RC:$dst,
|
|
|
|
(vselect _.KRCWM:$mask, RHS, _.ImmAllZerosV))],
|
2014-11-12 15:31:03 +08:00
|
|
|
Round, MaskingConstraint, NoItinerary, IsCommutable>;
|
2014-10-09 07:25:23 +08:00
|
|
|
|
2014-08-15 01:13:19 +08:00
|
|
|
// This multiclass generates the unconditional/non-masking, the masking and
|
|
|
|
// the zero-masking variant of the instruction. In the masking case, the
|
|
|
|
// perserved vector elements come from a new dummy input operand tied to $dst.
|
2014-10-09 07:25:39 +08:00
|
|
|
multiclass AVX512_maskable<bits<8> O, Format F, X86VectorVTInfo _,
|
|
|
|
dag Outs, dag Ins, string OpcodeStr,
|
|
|
|
string AttSrcAsm, string IntelSrcAsm,
|
2014-11-12 15:31:03 +08:00
|
|
|
dag RHS, string Round = "",
|
|
|
|
InstrItinClass itin = NoItinerary,
|
2014-10-09 07:25:39 +08:00
|
|
|
bit IsCommutable = 0> :
|
|
|
|
AVX512_maskable_common<O, F, _, Outs, Ins,
|
|
|
|
!con((ins _.RC:$src0, _.KRCWM:$mask), Ins),
|
|
|
|
!con((ins _.KRCWM:$mask), Ins),
|
|
|
|
OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS,
|
2014-11-12 15:31:03 +08:00
|
|
|
(vselect _.KRCWM:$mask, RHS, _.RC:$src0), Round,
|
2014-10-09 07:25:39 +08:00
|
|
|
"$src0 = $dst", itin, IsCommutable>;
|
|
|
|
|
|
|
|
// Similar to AVX512_maskable but in this case one of the source operands
|
2014-08-15 01:13:19 +08:00
|
|
|
// ($src1) is already tied to $dst so we just use that for the preserved
|
|
|
|
// vector elements. NOTE that the NonTiedIns (the ins dag) should exclude
|
|
|
|
// $src1.
|
2014-10-09 07:25:39 +08:00
|
|
|
multiclass AVX512_maskable_3src<bits<8> O, Format F, X86VectorVTInfo _,
|
|
|
|
dag Outs, dag NonTiedIns, string OpcodeStr,
|
|
|
|
string AttSrcAsm, string IntelSrcAsm,
|
|
|
|
dag RHS> :
|
|
|
|
AVX512_maskable_common<O, F, _, Outs,
|
|
|
|
!con((ins _.RC:$src1), NonTiedIns),
|
|
|
|
!con((ins _.RC:$src1, _.KRCWM:$mask), NonTiedIns),
|
|
|
|
!con((ins _.RC:$src1, _.KRCWM:$mask), NonTiedIns),
|
|
|
|
OpcodeStr, AttSrcAsm, IntelSrcAsm, RHS,
|
|
|
|
(vselect _.KRCWM:$mask, RHS, _.RC:$src1)>;
|
|
|
|
|
|
|
|
|
|
|
|
multiclass AVX512_maskable_in_asm<bits<8> O, Format F, X86VectorVTInfo _,
|
|
|
|
dag Outs, dag Ins,
|
|
|
|
string OpcodeStr,
|
|
|
|
string AttSrcAsm, string IntelSrcAsm,
|
|
|
|
list<dag> Pattern> :
|
|
|
|
AVX512_maskable_custom<O, F, Outs, Ins,
|
|
|
|
!con((ins _.RC:$src0, _.KRCWM:$mask), Ins),
|
|
|
|
!con((ins _.KRCWM:$mask), Ins),
|
2014-11-12 15:31:03 +08:00
|
|
|
OpcodeStr, AttSrcAsm, IntelSrcAsm, Pattern, [], [], "",
|
2014-10-09 07:25:39 +08:00
|
|
|
"$src0 = $dst">;
|
2014-10-09 07:25:33 +08:00
|
|
|
|
2013-09-17 15:34:34 +08:00
|
|
|
// Bitcasts between 512-bit vector types. Return the original type since
|
|
|
|
// no instruction is needed for the conversion
|
|
|
|
let Predicates = [HasAVX512] in {
|
|
|
|
def : Pat<(v8f64 (bitconvert (v8i64 VR512:$src))), (v8f64 VR512:$src)>;
|
2014-07-21 22:54:21 +08:00
|
|
|
def : Pat<(v8f64 (bitconvert (v16i32 VR512:$src))), (v8f64 VR512:$src)>;
|
|
|
|
def : Pat<(v8f64 (bitconvert (v32i16 VR512:$src))), (v8f64 VR512:$src)>;
|
|
|
|
def : Pat<(v8f64 (bitconvert (v64i8 VR512:$src))), (v8f64 VR512:$src)>;
|
|
|
|
def : Pat<(v8f64 (bitconvert (v16f32 VR512:$src))), (v8f64 VR512:$src)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
def : Pat<(v16f32 (bitconvert (v8i64 VR512:$src))), (v16f32 VR512:$src)>;
|
2014-07-21 22:54:21 +08:00
|
|
|
def : Pat<(v16f32 (bitconvert (v16i32 VR512:$src))), (v16f32 VR512:$src)>;
|
|
|
|
def : Pat<(v16f32 (bitconvert (v32i16 VR512:$src))), (v16f32 VR512:$src)>;
|
|
|
|
def : Pat<(v16f32 (bitconvert (v64i8 VR512:$src))), (v16f32 VR512:$src)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
def : Pat<(v16f32 (bitconvert (v8f64 VR512:$src))), (v16f32 VR512:$src)>;
|
|
|
|
def : Pat<(v8i64 (bitconvert (v16i32 VR512:$src))), (v8i64 VR512:$src)>;
|
2014-07-21 22:54:21 +08:00
|
|
|
def : Pat<(v8i64 (bitconvert (v32i16 VR512:$src))), (v8i64 VR512:$src)>;
|
|
|
|
def : Pat<(v8i64 (bitconvert (v64i8 VR512:$src))), (v8i64 VR512:$src)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
def : Pat<(v8i64 (bitconvert (v8f64 VR512:$src))), (v8i64 VR512:$src)>;
|
2014-07-21 22:54:21 +08:00
|
|
|
def : Pat<(v8i64 (bitconvert (v16f32 VR512:$src))), (v8i64 VR512:$src)>;
|
|
|
|
def : Pat<(v16i32 (bitconvert (v8i64 VR512:$src))), (v16i32 VR512:$src)>;
|
2014-08-11 17:59:08 +08:00
|
|
|
def : Pat<(v16i32 (bitconvert (v16f32 VR512:$src))), (v16i32 VR512:$src)>;
|
2014-07-21 22:54:21 +08:00
|
|
|
def : Pat<(v16i32 (bitconvert (v32i16 VR512:$src))), (v16i32 VR512:$src)>;
|
|
|
|
def : Pat<(v16i32 (bitconvert (v64i8 VR512:$src))), (v16i32 VR512:$src)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
def : Pat<(v16i32 (bitconvert (v8f64 VR512:$src))), (v16i32 VR512:$src)>;
|
2014-07-21 22:54:21 +08:00
|
|
|
def : Pat<(v32i16 (bitconvert (v8i64 VR512:$src))), (v32i16 VR512:$src)>;
|
|
|
|
def : Pat<(v32i16 (bitconvert (v16i32 VR512:$src))), (v32i16 VR512:$src)>;
|
|
|
|
def : Pat<(v32i16 (bitconvert (v64i8 VR512:$src))), (v32i16 VR512:$src)>;
|
|
|
|
def : Pat<(v32i16 (bitconvert (v8f64 VR512:$src))), (v32i16 VR512:$src)>;
|
|
|
|
def : Pat<(v32i16 (bitconvert (v16f32 VR512:$src))), (v32i16 VR512:$src)>;
|
|
|
|
def : Pat<(v32i16 (bitconvert (v16f32 VR512:$src))), (v32i16 VR512:$src)>;
|
|
|
|
def : Pat<(v64i8 (bitconvert (v8i64 VR512:$src))), (v64i8 VR512:$src)>;
|
|
|
|
def : Pat<(v64i8 (bitconvert (v16i32 VR512:$src))), (v64i8 VR512:$src)>;
|
|
|
|
def : Pat<(v64i8 (bitconvert (v32i16 VR512:$src))), (v64i8 VR512:$src)>;
|
|
|
|
def : Pat<(v64i8 (bitconvert (v8f64 VR512:$src))), (v64i8 VR512:$src)>;
|
|
|
|
def : Pat<(v64i8 (bitconvert (v16f32 VR512:$src))), (v64i8 VR512:$src)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
def : Pat<(v2i64 (bitconvert (v4i32 VR128X:$src))), (v2i64 VR128X:$src)>;
|
|
|
|
def : Pat<(v2i64 (bitconvert (v8i16 VR128X:$src))), (v2i64 VR128X:$src)>;
|
|
|
|
def : Pat<(v2i64 (bitconvert (v16i8 VR128X:$src))), (v2i64 VR128X:$src)>;
|
|
|
|
def : Pat<(v2i64 (bitconvert (v2f64 VR128X:$src))), (v2i64 VR128X:$src)>;
|
|
|
|
def : Pat<(v2i64 (bitconvert (v4f32 VR128X:$src))), (v2i64 VR128X:$src)>;
|
|
|
|
def : Pat<(v4i32 (bitconvert (v2i64 VR128X:$src))), (v4i32 VR128X:$src)>;
|
|
|
|
def : Pat<(v4i32 (bitconvert (v8i16 VR128X:$src))), (v4i32 VR128X:$src)>;
|
|
|
|
def : Pat<(v4i32 (bitconvert (v16i8 VR128X:$src))), (v4i32 VR128X:$src)>;
|
|
|
|
def : Pat<(v4i32 (bitconvert (v2f64 VR128X:$src))), (v4i32 VR128X:$src)>;
|
|
|
|
def : Pat<(v4i32 (bitconvert (v4f32 VR128X:$src))), (v4i32 VR128X:$src)>;
|
|
|
|
def : Pat<(v8i16 (bitconvert (v2i64 VR128X:$src))), (v8i16 VR128X:$src)>;
|
|
|
|
def : Pat<(v8i16 (bitconvert (v4i32 VR128X:$src))), (v8i16 VR128X:$src)>;
|
|
|
|
def : Pat<(v8i16 (bitconvert (v16i8 VR128X:$src))), (v8i16 VR128X:$src)>;
|
|
|
|
def : Pat<(v8i16 (bitconvert (v2f64 VR128X:$src))), (v8i16 VR128X:$src)>;
|
|
|
|
def : Pat<(v8i16 (bitconvert (v4f32 VR128X:$src))), (v8i16 VR128X:$src)>;
|
|
|
|
def : Pat<(v16i8 (bitconvert (v2i64 VR128X:$src))), (v16i8 VR128X:$src)>;
|
|
|
|
def : Pat<(v16i8 (bitconvert (v4i32 VR128X:$src))), (v16i8 VR128X:$src)>;
|
|
|
|
def : Pat<(v16i8 (bitconvert (v8i16 VR128X:$src))), (v16i8 VR128X:$src)>;
|
|
|
|
def : Pat<(v16i8 (bitconvert (v2f64 VR128X:$src))), (v16i8 VR128X:$src)>;
|
|
|
|
def : Pat<(v16i8 (bitconvert (v4f32 VR128X:$src))), (v16i8 VR128X:$src)>;
|
|
|
|
def : Pat<(v4f32 (bitconvert (v2i64 VR128X:$src))), (v4f32 VR128X:$src)>;
|
|
|
|
def : Pat<(v4f32 (bitconvert (v4i32 VR128X:$src))), (v4f32 VR128X:$src)>;
|
|
|
|
def : Pat<(v4f32 (bitconvert (v8i16 VR128X:$src))), (v4f32 VR128X:$src)>;
|
|
|
|
def : Pat<(v4f32 (bitconvert (v16i8 VR128X:$src))), (v4f32 VR128X:$src)>;
|
|
|
|
def : Pat<(v4f32 (bitconvert (v2f64 VR128X:$src))), (v4f32 VR128X:$src)>;
|
|
|
|
def : Pat<(v2f64 (bitconvert (v2i64 VR128X:$src))), (v2f64 VR128X:$src)>;
|
|
|
|
def : Pat<(v2f64 (bitconvert (v4i32 VR128X:$src))), (v2f64 VR128X:$src)>;
|
|
|
|
def : Pat<(v2f64 (bitconvert (v8i16 VR128X:$src))), (v2f64 VR128X:$src)>;
|
|
|
|
def : Pat<(v2f64 (bitconvert (v16i8 VR128X:$src))), (v2f64 VR128X:$src)>;
|
|
|
|
def : Pat<(v2f64 (bitconvert (v4f32 VR128X:$src))), (v2f64 VR128X:$src)>;
|
|
|
|
|
|
|
|
// Bitcasts between 256-bit vector types. Return the original type since
|
|
|
|
// no instruction is needed for the conversion
|
|
|
|
def : Pat<(v4f64 (bitconvert (v8f32 VR256X:$src))), (v4f64 VR256X:$src)>;
|
|
|
|
def : Pat<(v4f64 (bitconvert (v8i32 VR256X:$src))), (v4f64 VR256X:$src)>;
|
|
|
|
def : Pat<(v4f64 (bitconvert (v4i64 VR256X:$src))), (v4f64 VR256X:$src)>;
|
|
|
|
def : Pat<(v4f64 (bitconvert (v16i16 VR256X:$src))), (v4f64 VR256X:$src)>;
|
|
|
|
def : Pat<(v4f64 (bitconvert (v32i8 VR256X:$src))), (v4f64 VR256X:$src)>;
|
|
|
|
def : Pat<(v8f32 (bitconvert (v8i32 VR256X:$src))), (v8f32 VR256X:$src)>;
|
|
|
|
def : Pat<(v8f32 (bitconvert (v4i64 VR256X:$src))), (v8f32 VR256X:$src)>;
|
|
|
|
def : Pat<(v8f32 (bitconvert (v4f64 VR256X:$src))), (v8f32 VR256X:$src)>;
|
|
|
|
def : Pat<(v8f32 (bitconvert (v32i8 VR256X:$src))), (v8f32 VR256X:$src)>;
|
|
|
|
def : Pat<(v8f32 (bitconvert (v16i16 VR256X:$src))), (v8f32 VR256X:$src)>;
|
|
|
|
def : Pat<(v4i64 (bitconvert (v8f32 VR256X:$src))), (v4i64 VR256X:$src)>;
|
|
|
|
def : Pat<(v4i64 (bitconvert (v8i32 VR256X:$src))), (v4i64 VR256X:$src)>;
|
|
|
|
def : Pat<(v4i64 (bitconvert (v4f64 VR256X:$src))), (v4i64 VR256X:$src)>;
|
|
|
|
def : Pat<(v4i64 (bitconvert (v32i8 VR256X:$src))), (v4i64 VR256X:$src)>;
|
|
|
|
def : Pat<(v4i64 (bitconvert (v16i16 VR256X:$src))), (v4i64 VR256X:$src)>;
|
|
|
|
def : Pat<(v32i8 (bitconvert (v4f64 VR256X:$src))), (v32i8 VR256X:$src)>;
|
|
|
|
def : Pat<(v32i8 (bitconvert (v4i64 VR256X:$src))), (v32i8 VR256X:$src)>;
|
|
|
|
def : Pat<(v32i8 (bitconvert (v8f32 VR256X:$src))), (v32i8 VR256X:$src)>;
|
|
|
|
def : Pat<(v32i8 (bitconvert (v8i32 VR256X:$src))), (v32i8 VR256X:$src)>;
|
|
|
|
def : Pat<(v32i8 (bitconvert (v16i16 VR256X:$src))), (v32i8 VR256X:$src)>;
|
|
|
|
def : Pat<(v8i32 (bitconvert (v32i8 VR256X:$src))), (v8i32 VR256X:$src)>;
|
|
|
|
def : Pat<(v8i32 (bitconvert (v16i16 VR256X:$src))), (v8i32 VR256X:$src)>;
|
|
|
|
def : Pat<(v8i32 (bitconvert (v8f32 VR256X:$src))), (v8i32 VR256X:$src)>;
|
|
|
|
def : Pat<(v8i32 (bitconvert (v4i64 VR256X:$src))), (v8i32 VR256X:$src)>;
|
|
|
|
def : Pat<(v8i32 (bitconvert (v4f64 VR256X:$src))), (v8i32 VR256X:$src)>;
|
|
|
|
def : Pat<(v16i16 (bitconvert (v8f32 VR256X:$src))), (v16i16 VR256X:$src)>;
|
|
|
|
def : Pat<(v16i16 (bitconvert (v8i32 VR256X:$src))), (v16i16 VR256X:$src)>;
|
|
|
|
def : Pat<(v16i16 (bitconvert (v4i64 VR256X:$src))), (v16i16 VR256X:$src)>;
|
|
|
|
def : Pat<(v16i16 (bitconvert (v4f64 VR256X:$src))), (v16i16 VR256X:$src)>;
|
|
|
|
def : Pat<(v16i16 (bitconvert (v32i8 VR256X:$src))), (v16i16 VR256X:$src)>;
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// AVX-512: VPXOR instruction writes zero to its upper part, it's safe build zeros.
|
|
|
|
//
|
|
|
|
|
|
|
|
let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1,
|
|
|
|
isPseudo = 1, Predicates = [HasAVX512] in {
|
|
|
|
def AVX512_512_SET0 : I<0, Pseudo, (outs VR512:$dst), (ins), "",
|
|
|
|
[(set VR512:$dst, (v16f32 immAllZerosV))]>;
|
|
|
|
}
|
|
|
|
|
2014-01-30 14:03:19 +08:00
|
|
|
let Predicates = [HasAVX512] in {
|
2013-09-17 15:34:34 +08:00
|
|
|
def : Pat<(v8i64 immAllZerosV), (AVX512_512_SET0)>;
|
|
|
|
def : Pat<(v16i32 immAllZerosV), (AVX512_512_SET0)>;
|
|
|
|
def : Pat<(v8f64 immAllZerosV), (AVX512_512_SET0)>;
|
2014-01-30 14:03:19 +08:00
|
|
|
}
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AVX-512 - VECTOR INSERT
|
|
|
|
//
|
|
|
|
|
2014-10-16 07:42:17 +08:00
|
|
|
multiclass vinsert_for_size_no_alt<int Opcode,
|
|
|
|
X86VectorVTInfo From, X86VectorVTInfo To,
|
|
|
|
PatFrag vinsert_insert,
|
|
|
|
SDNodeXForm INSERT_get_vinsert_imm> {
|
2014-10-03 07:18:28 +08:00
|
|
|
let hasSideEffects = 0, ExeDomain = To.ExeDomain in {
|
|
|
|
def rr : AVX512AIi8<Opcode, MRMSrcReg, (outs VR512:$dst),
|
|
|
|
(ins VR512:$src1, From.RC:$src2, i8imm:$src3),
|
2014-10-16 07:42:09 +08:00
|
|
|
"vinsert" # From.EltTypeName # "x" # From.NumElts #
|
|
|
|
"\t{$src3, $src2, $src1, $dst|"
|
2014-10-03 07:18:28 +08:00
|
|
|
"$dst, $src1, $src2, $src3}",
|
2014-10-03 07:18:30 +08:00
|
|
|
[(set To.RC:$dst, (vinsert_insert:$src3 (To.VT VR512:$src1),
|
|
|
|
(From.VT From.RC:$src2),
|
|
|
|
(iPTR imm)))]>,
|
|
|
|
EVEX_4V, EVEX_V512;
|
2014-10-03 07:18:28 +08:00
|
|
|
|
|
|
|
let mayLoad = 1 in
|
|
|
|
def rm : AVX512AIi8<Opcode, MRMSrcMem, (outs VR512:$dst),
|
|
|
|
(ins VR512:$src1, From.MemOp:$src2, i8imm:$src3),
|
2014-10-16 07:42:09 +08:00
|
|
|
"vinsert" # From.EltTypeName # "x" # From.NumElts #
|
|
|
|
"\t{$src3, $src2, $src1, $dst|"
|
2014-10-03 07:18:28 +08:00
|
|
|
"$dst, $src1, $src2, $src3}",
|
2014-10-16 07:42:09 +08:00
|
|
|
[]>,
|
|
|
|
EVEX_4V, EVEX_V512, EVEX_CD8<From.EltSize, From.CD8TupleForm>;
|
2014-10-03 07:18:28 +08:00
|
|
|
}
|
2014-10-16 07:42:17 +08:00
|
|
|
}
|
2014-10-03 07:18:28 +08:00
|
|
|
|
2014-10-16 07:42:17 +08:00
|
|
|
multiclass vinsert_for_size<int Opcode,
|
|
|
|
X86VectorVTInfo From, X86VectorVTInfo To,
|
|
|
|
X86VectorVTInfo AltFrom, X86VectorVTInfo AltTo,
|
|
|
|
PatFrag vinsert_insert,
|
|
|
|
SDNodeXForm INSERT_get_vinsert_imm> :
|
|
|
|
vinsert_for_size_no_alt<Opcode, From, To,
|
|
|
|
vinsert_insert, INSERT_get_vinsert_imm> {
|
2014-10-03 07:18:28 +08:00
|
|
|
// Codegen pattern with the alternative types, e.g. v2i64 -> v8i64 for
|
2014-10-16 07:42:17 +08:00
|
|
|
// vinserti32x4. Only add this if 64x2 and friends are not supported
|
|
|
|
// natively via AVX512DQ.
|
|
|
|
let Predicates = [NoDQI] in
|
|
|
|
def : Pat<(vinsert_insert:$ins
|
|
|
|
(AltTo.VT VR512:$src1), (AltFrom.VT From.RC:$src2), (iPTR imm)),
|
|
|
|
(AltTo.VT (!cast<Instruction>(NAME # From.EltSize # "x4rr")
|
|
|
|
VR512:$src1, From.RC:$src2,
|
|
|
|
(INSERT_get_vinsert_imm VR512:$ins)))>;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
2014-10-16 07:42:04 +08:00
|
|
|
multiclass vinsert_for_type<ValueType EltVT32, int Opcode128,
|
|
|
|
ValueType EltVT64, int Opcode256> {
|
|
|
|
defm NAME # "32x4" : vinsert_for_size<Opcode128,
|
2014-10-03 07:18:28 +08:00
|
|
|
X86VectorVTInfo< 4, EltVT32, VR128X>,
|
|
|
|
X86VectorVTInfo<16, EltVT32, VR512>,
|
|
|
|
X86VectorVTInfo< 2, EltVT64, VR128X>,
|
|
|
|
X86VectorVTInfo< 8, EltVT64, VR512>,
|
|
|
|
vinsert128_insert,
|
|
|
|
INSERT_get_vinsert128_imm>;
|
2014-10-16 07:42:17 +08:00
|
|
|
let Predicates = [HasDQI] in
|
|
|
|
defm NAME # "64x2" : vinsert_for_size_no_alt<Opcode128,
|
|
|
|
X86VectorVTInfo< 2, EltVT64, VR128X>,
|
|
|
|
X86VectorVTInfo< 8, EltVT64, VR512>,
|
|
|
|
vinsert128_insert,
|
|
|
|
INSERT_get_vinsert128_imm>, VEX_W;
|
2014-10-16 07:42:04 +08:00
|
|
|
defm NAME # "64x4" : vinsert_for_size<Opcode256,
|
2014-10-03 07:18:28 +08:00
|
|
|
X86VectorVTInfo< 4, EltVT64, VR256X>,
|
|
|
|
X86VectorVTInfo< 8, EltVT64, VR512>,
|
|
|
|
X86VectorVTInfo< 8, EltVT32, VR256>,
|
|
|
|
X86VectorVTInfo<16, EltVT32, VR512>,
|
|
|
|
vinsert256_insert,
|
|
|
|
INSERT_get_vinsert256_imm>, VEX_W;
|
2014-10-16 07:42:17 +08:00
|
|
|
let Predicates = [HasDQI] in
|
|
|
|
defm NAME # "32x8" : vinsert_for_size_no_alt<Opcode256,
|
|
|
|
X86VectorVTInfo< 8, EltVT32, VR256X>,
|
|
|
|
X86VectorVTInfo<16, EltVT32, VR512>,
|
|
|
|
vinsert256_insert,
|
|
|
|
INSERT_get_vinsert256_imm>;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
2014-10-03 07:18:28 +08:00
|
|
|
defm VINSERTF : vinsert_for_type<f32, 0x18, f64, 0x1a>;
|
|
|
|
defm VINSERTI : vinsert_for_type<i32, 0x38, i64, 0x3a>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
// vinsertps - insert f32 to XMM
|
|
|
|
def VINSERTPSzrr : AVX512AIi8<0x21, MRMSrcReg, (outs VR128X:$dst),
|
[x86] Fix a pretty horrible bug and inconsistency in the x86 asm
parsing (and latent bug in the instruction definitions).
This is effectively a revert of r136287 which tried to address
a specific and narrow case of immediate operands failing to be accepted
by x86 instructions with a pretty heavy hammer: it introduced a new kind
of operand that behaved differently. All of that is removed with this
commit, but the test cases are both preserved and enhanced.
The core problem that r136287 and this commit are trying to handle is
that gas accepts both of the following instructions:
insertps $192, %xmm0, %xmm1
insertps $-64, %xmm0, %xmm1
These will encode to the same byte sequence, with the immediate
occupying an 8-bit entry. The first form was fixed by r136287 but that
broke the prior handling of the second form! =[ Ironically, we would
still emit the second form in some cases and then be unable to
re-assemble the output.
The reason why the first instruction failed to be handled is because
prior to r136287 the operands ere marked 'i32i8imm' which forces them to
be sign-extenable. Clearly, that won't work for 192 in a single byte.
However, making thim zero-extended or "unsigned" doesn't really address
the core issue either because it breaks negative immediates. The correct
fix is to make these operands 'i8imm' reflecting that they can be either
signed or unsigned but must be 8-bit immediates. This patch backs out
r136287 and then changes those places as well as some others to use
'i8imm' rather than one of the extended variants.
Naturally, this broke something else. The custom DAG nodes had to be
updated to have a much more accurate type constraint of an i8 node, and
a bunch of Pat immediates needed to be specified as i8 values.
The fallout didn't end there though. We also then ceased to be able to
match the instruction-specific intrinsics to the instructions so
modified. Digging, this is because they too used i32 rather than i8 in
their signature. So I've also switched those intrinsics to i8 arguments
in line with the instructions.
In order to make the intrinsic adjustments of course, I also had to add
auto upgrading for the intrinsics.
I suspect that the intrinsic argument types may have led everything down
this rabbit hole. Pretty happy with the result.
llvm-svn: 217310
2014-09-06 18:00:01 +08:00
|
|
|
(ins VR128X:$src1, VR128X:$src2, i8imm:$src3),
|
2013-12-11 22:31:04 +08:00
|
|
|
"vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
|
2014-04-22 04:07:29 +08:00
|
|
|
[(set VR128X:$dst, (X86insertps VR128X:$src1, VR128X:$src2, imm:$src3))]>,
|
2013-09-17 15:34:34 +08:00
|
|
|
EVEX_4V;
|
|
|
|
def VINSERTPSzrm: AVX512AIi8<0x21, MRMSrcMem, (outs VR128X:$dst),
|
[x86] Fix a pretty horrible bug and inconsistency in the x86 asm
parsing (and latent bug in the instruction definitions).
This is effectively a revert of r136287 which tried to address
a specific and narrow case of immediate operands failing to be accepted
by x86 instructions with a pretty heavy hammer: it introduced a new kind
of operand that behaved differently. All of that is removed with this
commit, but the test cases are both preserved and enhanced.
The core problem that r136287 and this commit are trying to handle is
that gas accepts both of the following instructions:
insertps $192, %xmm0, %xmm1
insertps $-64, %xmm0, %xmm1
These will encode to the same byte sequence, with the immediate
occupying an 8-bit entry. The first form was fixed by r136287 but that
broke the prior handling of the second form! =[ Ironically, we would
still emit the second form in some cases and then be unable to
re-assemble the output.
The reason why the first instruction failed to be handled is because
prior to r136287 the operands ere marked 'i32i8imm' which forces them to
be sign-extenable. Clearly, that won't work for 192 in a single byte.
However, making thim zero-extended or "unsigned" doesn't really address
the core issue either because it breaks negative immediates. The correct
fix is to make these operands 'i8imm' reflecting that they can be either
signed or unsigned but must be 8-bit immediates. This patch backs out
r136287 and then changes those places as well as some others to use
'i8imm' rather than one of the extended variants.
Naturally, this broke something else. The custom DAG nodes had to be
updated to have a much more accurate type constraint of an i8 node, and
a bunch of Pat immediates needed to be specified as i8 values.
The fallout didn't end there though. We also then ceased to be able to
match the instruction-specific intrinsics to the instructions so
modified. Digging, this is because they too used i32 rather than i8 in
their signature. So I've also switched those intrinsics to i8 arguments
in line with the instructions.
In order to make the intrinsic adjustments of course, I also had to add
auto upgrading for the intrinsics.
I suspect that the intrinsic argument types may have led everything down
this rabbit hole. Pretty happy with the result.
llvm-svn: 217310
2014-09-06 18:00:01 +08:00
|
|
|
(ins VR128X:$src1, f32mem:$src2, i8imm:$src3),
|
2013-12-11 22:31:04 +08:00
|
|
|
"vinsertps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
|
2014-04-22 04:07:29 +08:00
|
|
|
[(set VR128X:$dst, (X86insertps VR128X:$src1,
|
2013-09-17 15:34:34 +08:00
|
|
|
(v4f32 (scalar_to_vector (loadf32 addr:$src2))),
|
|
|
|
imm:$src3))]>, EVEX_4V, EVEX_CD8<32, CD8VT1>;
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AVX-512 VECTOR EXTRACT
|
|
|
|
//---
|
|
|
|
|
2014-09-26 07:48:45 +08:00
|
|
|
multiclass vextract_for_size<int Opcode,
|
|
|
|
X86VectorVTInfo From, X86VectorVTInfo To,
|
|
|
|
X86VectorVTInfo AltFrom, X86VectorVTInfo AltTo,
|
|
|
|
PatFrag vextract_extract,
|
|
|
|
SDNodeXForm EXTRACT_get_vextract_imm> {
|
|
|
|
let hasSideEffects = 0, ExeDomain = To.ExeDomain in {
|
2014-10-09 07:25:39 +08:00
|
|
|
defm rr : AVX512_maskable_in_asm<Opcode, MRMDestReg, To, (outs To.RC:$dst),
|
2014-10-09 07:25:33 +08:00
|
|
|
(ins VR512:$src1, i8imm:$idx),
|
|
|
|
"vextract" # To.EltTypeName # "x4",
|
|
|
|
"$idx, $src1", "$src1, $idx",
|
|
|
|
[(set To.RC:$dst, (vextract_extract:$idx (From.VT VR512:$src1),
|
|
|
|
(iPTR imm)))]>,
|
|
|
|
AVX512AIi8Base, EVEX, EVEX_V512;
|
2014-09-26 07:48:45 +08:00
|
|
|
let mayStore = 1 in
|
|
|
|
def rm : AVX512AIi8<Opcode, MRMDestMem, (outs),
|
|
|
|
(ins To.MemOp:$dst, VR512:$src1, i8imm:$src2),
|
|
|
|
"vextract" # To.EltTypeName # "x4\t{$src2, $src1, $dst|"
|
|
|
|
"$dst, $src1, $src2}",
|
|
|
|
[]>, EVEX, EVEX_V512, EVEX_CD8<To.EltSize, CD8VT4>;
|
|
|
|
}
|
2013-09-17 15:34:34 +08:00
|
|
|
|
2014-09-26 07:48:45 +08:00
|
|
|
// Codegen pattern with the alternative types, e.g. v8i64 -> v2i64 for
|
|
|
|
// vextracti32x4
|
|
|
|
def : Pat<(vextract_extract:$ext (AltFrom.VT VR512:$src1), (iPTR imm)),
|
|
|
|
(AltTo.VT (!cast<Instruction>(NAME # To.EltSize # "x4rr")
|
|
|
|
VR512:$src1,
|
|
|
|
(EXTRACT_get_vextract_imm To.RC:$ext)))>;
|
|
|
|
|
|
|
|
// A 128/256-bit subvector extract from the first 512-bit vector position is
|
|
|
|
// a subregister copy that needs no instruction.
|
|
|
|
def : Pat<(To.VT (extract_subvector (From.VT VR512:$src), (iPTR 0))),
|
|
|
|
(To.VT
|
|
|
|
(EXTRACT_SUBREG (From.VT VR512:$src), To.SubRegIdx))>;
|
|
|
|
|
|
|
|
// And for the alternative types.
|
|
|
|
def : Pat<(AltTo.VT (extract_subvector (AltFrom.VT VR512:$src), (iPTR 0))),
|
|
|
|
(AltTo.VT
|
|
|
|
(EXTRACT_SUBREG (AltFrom.VT VR512:$src), AltTo.SubRegIdx))>;
|
2014-10-09 07:25:37 +08:00
|
|
|
|
|
|
|
// Intrinsic call with masking.
|
|
|
|
def : Pat<(!cast<Intrinsic>("int_x86_avx512_mask_vextract" # To.EltTypeName #
|
|
|
|
"x4_512")
|
|
|
|
VR512:$src1, (iPTR imm:$idx), To.RC:$src0, GR8:$mask),
|
|
|
|
(!cast<Instruction>(NAME # To.EltSize # "x4rrk") To.RC:$src0,
|
|
|
|
(v4i1 (COPY_TO_REGCLASS GR8:$mask, VK4WM)),
|
|
|
|
VR512:$src1, imm:$idx)>;
|
|
|
|
|
|
|
|
// Intrinsic call with zero-masking.
|
|
|
|
def : Pat<(!cast<Intrinsic>("int_x86_avx512_mask_vextract" # To.EltTypeName #
|
|
|
|
"x4_512")
|
|
|
|
VR512:$src1, (iPTR imm:$idx), To.ImmAllZerosV, GR8:$mask),
|
|
|
|
(!cast<Instruction>(NAME # To.EltSize # "x4rrkz")
|
|
|
|
(v4i1 (COPY_TO_REGCLASS GR8:$mask, VK4WM)),
|
|
|
|
VR512:$src1, imm:$idx)>;
|
|
|
|
|
|
|
|
// Intrinsic call without masking.
|
|
|
|
def : Pat<(!cast<Intrinsic>("int_x86_avx512_mask_vextract" # To.EltTypeName #
|
|
|
|
"x4_512")
|
|
|
|
VR512:$src1, (iPTR imm:$idx), To.ImmAllZerosV, (i8 -1)),
|
|
|
|
(!cast<Instruction>(NAME # To.EltSize # "x4rr")
|
|
|
|
VR512:$src1, imm:$idx)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
2014-09-26 07:48:45 +08:00
|
|
|
multiclass vextract_for_type<ValueType EltVT32, int Opcode32,
|
|
|
|
ValueType EltVT64, int Opcode64> {
|
|
|
|
defm NAME # "32x4" : vextract_for_size<Opcode32,
|
|
|
|
X86VectorVTInfo<16, EltVT32, VR512>,
|
|
|
|
X86VectorVTInfo< 4, EltVT32, VR128X>,
|
|
|
|
X86VectorVTInfo< 8, EltVT64, VR512>,
|
|
|
|
X86VectorVTInfo< 2, EltVT64, VR128X>,
|
|
|
|
vextract128_extract,
|
|
|
|
EXTRACT_get_vextract128_imm>;
|
|
|
|
defm NAME # "64x4" : vextract_for_size<Opcode64,
|
|
|
|
X86VectorVTInfo< 8, EltVT64, VR512>,
|
|
|
|
X86VectorVTInfo< 4, EltVT64, VR256X>,
|
|
|
|
X86VectorVTInfo<16, EltVT32, VR512>,
|
|
|
|
X86VectorVTInfo< 8, EltVT32, VR256>,
|
|
|
|
vextract256_extract,
|
|
|
|
EXTRACT_get_vextract256_imm>, VEX_W;
|
|
|
|
}
|
2013-09-17 15:34:34 +08:00
|
|
|
|
2014-09-26 07:48:45 +08:00
|
|
|
defm VEXTRACTF : vextract_for_type<f32, 0x19, f64, 0x1b>;
|
|
|
|
defm VEXTRACTI : vextract_for_type<i32, 0x39, i64, 0x3b>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
// A 128-bit subvector insert to the first 512-bit vector position
|
|
|
|
// is a subregister copy that needs no instruction.
|
|
|
|
def : Pat<(insert_subvector undef, (v2i64 VR128X:$src), (iPTR 0)),
|
|
|
|
(INSERT_SUBREG (v8i64 (IMPLICIT_DEF)),
|
|
|
|
(INSERT_SUBREG (v4i64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
|
|
|
|
sub_ymm)>;
|
|
|
|
def : Pat<(insert_subvector undef, (v2f64 VR128X:$src), (iPTR 0)),
|
|
|
|
(INSERT_SUBREG (v8f64 (IMPLICIT_DEF)),
|
|
|
|
(INSERT_SUBREG (v4f64 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
|
|
|
|
sub_ymm)>;
|
|
|
|
def : Pat<(insert_subvector undef, (v4i32 VR128X:$src), (iPTR 0)),
|
|
|
|
(INSERT_SUBREG (v16i32 (IMPLICIT_DEF)),
|
|
|
|
(INSERT_SUBREG (v8i32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
|
|
|
|
sub_ymm)>;
|
|
|
|
def : Pat<(insert_subvector undef, (v4f32 VR128X:$src), (iPTR 0)),
|
|
|
|
(INSERT_SUBREG (v16f32 (IMPLICIT_DEF)),
|
|
|
|
(INSERT_SUBREG (v8f32 (IMPLICIT_DEF)), VR128X:$src, sub_xmm),
|
|
|
|
sub_ymm)>;
|
|
|
|
|
|
|
|
def : Pat<(insert_subvector undef, (v4i64 VR256X:$src), (iPTR 0)),
|
|
|
|
(INSERT_SUBREG (v8i64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
|
|
|
|
def : Pat<(insert_subvector undef, (v4f64 VR256X:$src), (iPTR 0)),
|
|
|
|
(INSERT_SUBREG (v8f64 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
|
|
|
|
def : Pat<(insert_subvector undef, (v8i32 VR256X:$src), (iPTR 0)),
|
|
|
|
(INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
|
|
|
|
def : Pat<(insert_subvector undef, (v8f32 VR256X:$src), (iPTR 0)),
|
|
|
|
(INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256X:$src, sub_ymm)>;
|
|
|
|
|
|
|
|
// vextractps - extract 32 bits from XMM
|
|
|
|
def VEXTRACTPSzrr : AVX512AIi8<0x17, MRMDestReg, (outs GR32:$dst),
|
[x86] Fix a pretty horrible bug and inconsistency in the x86 asm
parsing (and latent bug in the instruction definitions).
This is effectively a revert of r136287 which tried to address
a specific and narrow case of immediate operands failing to be accepted
by x86 instructions with a pretty heavy hammer: it introduced a new kind
of operand that behaved differently. All of that is removed with this
commit, but the test cases are both preserved and enhanced.
The core problem that r136287 and this commit are trying to handle is
that gas accepts both of the following instructions:
insertps $192, %xmm0, %xmm1
insertps $-64, %xmm0, %xmm1
These will encode to the same byte sequence, with the immediate
occupying an 8-bit entry. The first form was fixed by r136287 but that
broke the prior handling of the second form! =[ Ironically, we would
still emit the second form in some cases and then be unable to
re-assemble the output.
The reason why the first instruction failed to be handled is because
prior to r136287 the operands ere marked 'i32i8imm' which forces them to
be sign-extenable. Clearly, that won't work for 192 in a single byte.
However, making thim zero-extended or "unsigned" doesn't really address
the core issue either because it breaks negative immediates. The correct
fix is to make these operands 'i8imm' reflecting that they can be either
signed or unsigned but must be 8-bit immediates. This patch backs out
r136287 and then changes those places as well as some others to use
'i8imm' rather than one of the extended variants.
Naturally, this broke something else. The custom DAG nodes had to be
updated to have a much more accurate type constraint of an i8 node, and
a bunch of Pat immediates needed to be specified as i8 values.
The fallout didn't end there though. We also then ceased to be able to
match the instruction-specific intrinsics to the instructions so
modified. Digging, this is because they too used i32 rather than i8 in
their signature. So I've also switched those intrinsics to i8 arguments
in line with the instructions.
In order to make the intrinsic adjustments of course, I also had to add
auto upgrading for the intrinsics.
I suspect that the intrinsic argument types may have led everything down
this rabbit hole. Pretty happy with the result.
llvm-svn: 217310
2014-09-06 18:00:01 +08:00
|
|
|
(ins VR128X:$src1, i32i8imm:$src2),
|
2013-12-11 22:31:04 +08:00
|
|
|
"vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set GR32:$dst, (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2))]>,
|
|
|
|
EVEX;
|
|
|
|
|
|
|
|
def VEXTRACTPSzmr : AVX512AIi8<0x17, MRMDestMem, (outs),
|
[x86] Fix a pretty horrible bug and inconsistency in the x86 asm
parsing (and latent bug in the instruction definitions).
This is effectively a revert of r136287 which tried to address
a specific and narrow case of immediate operands failing to be accepted
by x86 instructions with a pretty heavy hammer: it introduced a new kind
of operand that behaved differently. All of that is removed with this
commit, but the test cases are both preserved and enhanced.
The core problem that r136287 and this commit are trying to handle is
that gas accepts both of the following instructions:
insertps $192, %xmm0, %xmm1
insertps $-64, %xmm0, %xmm1
These will encode to the same byte sequence, with the immediate
occupying an 8-bit entry. The first form was fixed by r136287 but that
broke the prior handling of the second form! =[ Ironically, we would
still emit the second form in some cases and then be unable to
re-assemble the output.
The reason why the first instruction failed to be handled is because
prior to r136287 the operands ere marked 'i32i8imm' which forces them to
be sign-extenable. Clearly, that won't work for 192 in a single byte.
However, making thim zero-extended or "unsigned" doesn't really address
the core issue either because it breaks negative immediates. The correct
fix is to make these operands 'i8imm' reflecting that they can be either
signed or unsigned but must be 8-bit immediates. This patch backs out
r136287 and then changes those places as well as some others to use
'i8imm' rather than one of the extended variants.
Naturally, this broke something else. The custom DAG nodes had to be
updated to have a much more accurate type constraint of an i8 node, and
a bunch of Pat immediates needed to be specified as i8 values.
The fallout didn't end there though. We also then ceased to be able to
match the instruction-specific intrinsics to the instructions so
modified. Digging, this is because they too used i32 rather than i8 in
their signature. So I've also switched those intrinsics to i8 arguments
in line with the instructions.
In order to make the intrinsic adjustments of course, I also had to add
auto upgrading for the intrinsics.
I suspect that the intrinsic argument types may have led everything down
this rabbit hole. Pretty happy with the result.
llvm-svn: 217310
2014-09-06 18:00:01 +08:00
|
|
|
(ins f32mem:$dst, VR128X:$src1, i32i8imm:$src2),
|
2013-12-11 22:31:04 +08:00
|
|
|
"vextractps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
2013-09-17 15:34:34 +08:00
|
|
|
[(store (extractelt (bc_v4i32 (v4f32 VR128X:$src1)), imm:$src2),
|
2014-02-11 15:25:59 +08:00
|
|
|
addr:$dst)]>, EVEX, EVEX_CD8<32, CD8VT1>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
//===---------------------------------------------------------------------===//
|
|
|
|
// AVX-512 BROADCAST
|
|
|
|
//---
|
2014-10-30 22:21:47 +08:00
|
|
|
multiclass avx512_fp_broadcast<bits<8> opc, SDNode OpNode, RegisterClass SrcRC,
|
|
|
|
ValueType svt, X86VectorVTInfo _> {
|
|
|
|
defm r : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
|
|
|
|
(ins SrcRC:$src), "vbroadcast"## !subst("p", "s", _.Suffix),
|
|
|
|
"$src", "$src", (_.VT (OpNode (svt SrcRC:$src)))>,
|
|
|
|
T8PD, EVEX;
|
|
|
|
|
|
|
|
let mayLoad = 1 in {
|
|
|
|
defm m : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
|
|
|
|
(ins _.ScalarMemOp:$src),
|
|
|
|
"vbroadcast"##!subst("p", "s", _.Suffix), "$src", "$src",
|
|
|
|
(_.VT (OpNode (_.ScalarLdFrag addr:$src)))>,
|
|
|
|
T8PD, EVEX;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass avx512_fp_broadcast_vl<bits<8> opc, SDNode OpNode,
|
|
|
|
AVX512VLVectorVTInfo _> {
|
|
|
|
defm Z : avx512_fp_broadcast<opc, OpNode, VR128X, _.info128.VT, _.info512>,
|
|
|
|
EVEX_V512;
|
|
|
|
|
|
|
|
let Predicates = [HasVLX] in {
|
|
|
|
defm Z256 : avx512_fp_broadcast<opc, OpNode, VR128X, _.info128.VT, _.info256>,
|
|
|
|
EVEX_V256;
|
|
|
|
}
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
2014-10-30 22:21:47 +08:00
|
|
|
|
2013-09-17 15:34:34 +08:00
|
|
|
let ExeDomain = SSEPackedSingle in {
|
2014-10-30 22:21:47 +08:00
|
|
|
defm VBROADCASTSS : avx512_fp_broadcast_vl<0x18, X86VBroadcast,
|
|
|
|
avx512vl_f32_info>, EVEX_CD8<32, CD8VT1>;
|
|
|
|
let Predicates = [HasVLX] in {
|
|
|
|
defm VBROADCASTSSZ128 : avx512_fp_broadcast<0x18, X86VBroadcast, VR128X,
|
|
|
|
v4f32, v4f32x_info>, EVEX_V128,
|
|
|
|
EVEX_CD8<32, CD8VT1>;
|
|
|
|
}
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
let ExeDomain = SSEPackedDouble in {
|
2014-10-30 22:21:47 +08:00
|
|
|
defm VBROADCASTSD : avx512_fp_broadcast_vl<0x19, X86VBroadcast,
|
|
|
|
avx512vl_f64_info>, VEX_W, EVEX_CD8<64, CD8VT1>;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
def : Pat<(v16f32 (X86VBroadcast (loadf32 addr:$src))),
|
2014-10-30 22:21:47 +08:00
|
|
|
(VBROADCASTSSZm addr:$src)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
def : Pat<(v8f64 (X86VBroadcast (loadf64 addr:$src))),
|
2014-10-30 22:21:47 +08:00
|
|
|
(VBROADCASTSDZm addr:$src)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
2013-10-26 01:47:18 +08:00
|
|
|
def : Pat<(int_x86_avx512_vbroadcast_ss_512 addr:$src),
|
2014-10-30 22:21:47 +08:00
|
|
|
(VBROADCASTSSZm addr:$src)>;
|
2013-10-26 01:47:18 +08:00
|
|
|
def : Pat<(int_x86_avx512_vbroadcast_sd_512 addr:$src),
|
2014-10-30 22:21:47 +08:00
|
|
|
(VBROADCASTSDZm addr:$src)>;
|
2013-10-26 01:47:18 +08:00
|
|
|
|
2013-09-17 15:34:34 +08:00
|
|
|
multiclass avx512_int_broadcast_reg<bits<8> opc, string OpcodeStr,
|
|
|
|
RegisterClass SrcRC, RegisterClass KRC> {
|
|
|
|
def Zrr : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst), (ins SrcRC:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[]>, EVEX, EVEX_V512;
|
|
|
|
def Zkrr : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst),
|
|
|
|
(ins KRC:$mask, SrcRC:$src),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[]>, EVEX, EVEX_V512, EVEX_KZ;
|
|
|
|
}
|
|
|
|
|
|
|
|
defm VPBROADCASTDr : avx512_int_broadcast_reg<0x7C, "vpbroadcastd", GR32, VK16WM>;
|
|
|
|
defm VPBROADCASTQr : avx512_int_broadcast_reg<0x7C, "vpbroadcastq", GR64, VK8WM>,
|
|
|
|
VEX_W;
|
|
|
|
|
|
|
|
def : Pat <(v16i32 (X86vzext VK16WM:$mask)),
|
|
|
|
(VPBROADCASTDrZkrr VK16WM:$mask, (i32 (MOV32ri 0x1)))>;
|
|
|
|
|
|
|
|
def : Pat <(v8i64 (X86vzext VK8WM:$mask)),
|
|
|
|
(VPBROADCASTQrZkrr VK8WM:$mask, (i64 (MOV64ri 0x1)))>;
|
|
|
|
|
|
|
|
def : Pat<(v16i32 (X86VBroadcast (i32 GR32:$src))),
|
|
|
|
(VPBROADCASTDrZrr GR32:$src)>;
|
2013-12-05 08:11:25 +08:00
|
|
|
def : Pat<(v16i32 (X86VBroadcastm VK16WM:$mask, (i32 GR32:$src))),
|
|
|
|
(VPBROADCASTDrZkrr VK16WM:$mask, GR32:$src)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
def : Pat<(v8i64 (X86VBroadcast (i64 GR64:$src))),
|
|
|
|
(VPBROADCASTQrZrr GR64:$src)>;
|
2013-10-24 15:16:35 +08:00
|
|
|
def : Pat<(v8i64 (X86VBroadcastm VK8WM:$mask, (i64 GR64:$src))),
|
|
|
|
(VPBROADCASTQrZkrr VK8WM:$mask, GR64:$src)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
2013-10-31 21:56:31 +08:00
|
|
|
def : Pat<(v16i32 (int_x86_avx512_pbroadcastd_i32_512 (i32 GR32:$src))),
|
|
|
|
(VPBROADCASTDrZrr GR32:$src)>;
|
|
|
|
def : Pat<(v8i64 (int_x86_avx512_pbroadcastq_i64_512 (i64 GR64:$src))),
|
|
|
|
(VPBROADCASTQrZrr GR64:$src)>;
|
|
|
|
|
2014-01-13 20:55:03 +08:00
|
|
|
def : Pat<(v16i32 (int_x86_avx512_mask_pbroadcast_d_gpr_512 (i32 GR32:$src),
|
|
|
|
(v16i32 immAllZerosV), (i16 GR16:$mask))),
|
|
|
|
(VPBROADCASTDrZkrr (COPY_TO_REGCLASS GR16:$mask, VK16WM), GR32:$src)>;
|
|
|
|
def : Pat<(v8i64 (int_x86_avx512_mask_pbroadcast_q_gpr_512 (i64 GR64:$src),
|
|
|
|
(bc_v8i64 (v16i32 immAllZerosV)), (i8 GR8:$mask))),
|
|
|
|
(VPBROADCASTQrZkrr (COPY_TO_REGCLASS GR8:$mask, VK8WM), GR64:$src)>;
|
|
|
|
|
2013-09-17 15:34:34 +08:00
|
|
|
multiclass avx512_int_broadcast_rm<bits<8> opc, string OpcodeStr,
|
|
|
|
X86MemOperand x86memop, PatFrag ld_frag,
|
|
|
|
RegisterClass DstRC, ValueType OpVT, ValueType SrcVT,
|
|
|
|
RegisterClass KRC> {
|
|
|
|
def rr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins VR128X:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set DstRC:$dst,
|
|
|
|
(OpVT (X86VBroadcast (SrcVT VR128X:$src))))]>, EVEX;
|
|
|
|
def krr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst), (ins KRC:$mask,
|
|
|
|
VR128X:$src),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set DstRC:$dst,
|
|
|
|
(OpVT (X86VBroadcastm KRC:$mask, (SrcVT VR128X:$src))))]>,
|
|
|
|
EVEX, EVEX_KZ;
|
2013-10-24 15:16:35 +08:00
|
|
|
let mayLoad = 1 in {
|
2013-09-17 15:34:34 +08:00
|
|
|
def rm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set DstRC:$dst,
|
|
|
|
(OpVT (X86VBroadcast (ld_frag addr:$src))))]>, EVEX;
|
|
|
|
def krm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst), (ins KRC:$mask,
|
|
|
|
x86memop:$src),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set DstRC:$dst, (OpVT (X86VBroadcastm KRC:$mask,
|
|
|
|
(ld_frag addr:$src))))]>, EVEX, EVEX_KZ;
|
2013-10-24 15:16:35 +08:00
|
|
|
}
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
defm VPBROADCASTDZ : avx512_int_broadcast_rm<0x58, "vpbroadcastd", i32mem,
|
|
|
|
loadi32, VR512, v16i32, v4i32, VK16WM>,
|
|
|
|
EVEX_V512, EVEX_CD8<32, CD8VT1>;
|
|
|
|
defm VPBROADCASTQZ : avx512_int_broadcast_rm<0x59, "vpbroadcastq", i64mem,
|
|
|
|
loadi64, VR512, v8i64, v2i64, VK8WM>, EVEX_V512, VEX_W,
|
|
|
|
EVEX_CD8<64, CD8VT1>;
|
|
|
|
|
2014-06-27 08:43:38 +08:00
|
|
|
multiclass avx512_int_subvec_broadcast_rm<bits<8> opc, string OpcodeStr,
|
|
|
|
X86MemOperand x86memop, PatFrag ld_frag,
|
|
|
|
RegisterClass KRC> {
|
|
|
|
let mayLoad = 1 in {
|
|
|
|
def rm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), (ins x86memop:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
2014-06-27 08:43:38 +08:00
|
|
|
[]>, EVEX;
|
|
|
|
def krm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst), (ins KRC:$mask,
|
|
|
|
x86memop:$src),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
|
2014-06-27 08:43:38 +08:00
|
|
|
[]>, EVEX, EVEX_KZ;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
defm VBROADCASTI32X4 : avx512_int_subvec_broadcast_rm<0x5a, "vbroadcasti32x4",
|
|
|
|
i128mem, loadv2i64, VK16WM>,
|
|
|
|
EVEX_V512, EVEX_CD8<32, CD8VT4>;
|
|
|
|
defm VBROADCASTI64X4 : avx512_int_subvec_broadcast_rm<0x5b, "vbroadcasti64x4",
|
|
|
|
i256mem, loadv4i64, VK16WM>, VEX_W,
|
|
|
|
EVEX_V512, EVEX_CD8<64, CD8VT4>;
|
|
|
|
|
2013-10-31 21:56:31 +08:00
|
|
|
def : Pat<(v16i32 (int_x86_avx512_pbroadcastd_512 (v4i32 VR128X:$src))),
|
|
|
|
(VPBROADCASTDZrr VR128X:$src)>;
|
|
|
|
def : Pat<(v8i64 (int_x86_avx512_pbroadcastq_512 (v2i64 VR128X:$src))),
|
|
|
|
(VPBROADCASTQZrr VR128X:$src)>;
|
|
|
|
|
[AVX512] Bring back vector-shuffle lowering support through broadcasts
Ffter commit at rev219046 512-bit broadcasts lowering become non-optimal. Most of tests on broadcasting and embedded broadcasting were changed and they doesn’t produce efficient code.
Example below is from commit changes (it’s the first test from test/CodeGen/X86/avx512-vbroadcast.ll):
define <16 x i32> @_inreg16xi32(i32 %a) {
; CHECK-LABEL: _inreg16xi32:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpbroadcastd %edi, %zmm0
+; CHECK-NEXT: vmovd %edi, %xmm0
+; CHECK-NEXT: vpbroadcastd %xmm0, %ymm0
+; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; CHECK-NEXT: retq
%b = insertelement <16 x i32> undef, i32 %a, i32 0
%c = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
ret <16 x i32> %c
}
Here, 256-bit broadcast was generated instead of 512-bit one.
In this patch
1) I added vector-shuffle lowering through broadcasts
2) Removed asserts and branches likes because this is incorrect
- assert(Subtarget->hasDQI() && "We can only lower v8i64 with AVX-512-DQI");
3) Fixed lowering tests
llvm-svn: 220774
2014-10-28 20:28:51 +08:00
|
|
|
def : Pat<(v16f32 (X86VBroadcast (v16f32 VR512:$src))),
|
2014-10-30 22:21:47 +08:00
|
|
|
(VBROADCASTSSZr (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm))>;
|
[AVX512] Bring back vector-shuffle lowering support through broadcasts
Ffter commit at rev219046 512-bit broadcasts lowering become non-optimal. Most of tests on broadcasting and embedded broadcasting were changed and they doesn’t produce efficient code.
Example below is from commit changes (it’s the first test from test/CodeGen/X86/avx512-vbroadcast.ll):
define <16 x i32> @_inreg16xi32(i32 %a) {
; CHECK-LABEL: _inreg16xi32:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpbroadcastd %edi, %zmm0
+; CHECK-NEXT: vmovd %edi, %xmm0
+; CHECK-NEXT: vpbroadcastd %xmm0, %ymm0
+; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; CHECK-NEXT: retq
%b = insertelement <16 x i32> undef, i32 %a, i32 0
%c = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
ret <16 x i32> %c
}
Here, 256-bit broadcast was generated instead of 512-bit one.
In this patch
1) I added vector-shuffle lowering through broadcasts
2) Removed asserts and branches likes because this is incorrect
- assert(Subtarget->hasDQI() && "We can only lower v8i64 with AVX-512-DQI");
3) Fixed lowering tests
llvm-svn: 220774
2014-10-28 20:28:51 +08:00
|
|
|
def : Pat<(v8f64 (X86VBroadcast (v8f64 VR512:$src))),
|
2014-10-30 22:21:47 +08:00
|
|
|
(VBROADCASTSDZr (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm))>;
|
[AVX512] Bring back vector-shuffle lowering support through broadcasts
Ffter commit at rev219046 512-bit broadcasts lowering become non-optimal. Most of tests on broadcasting and embedded broadcasting were changed and they doesn’t produce efficient code.
Example below is from commit changes (it’s the first test from test/CodeGen/X86/avx512-vbroadcast.ll):
define <16 x i32> @_inreg16xi32(i32 %a) {
; CHECK-LABEL: _inreg16xi32:
; CHECK: ## BB#0:
-; CHECK-NEXT: vpbroadcastd %edi, %zmm0
+; CHECK-NEXT: vmovd %edi, %xmm0
+; CHECK-NEXT: vpbroadcastd %xmm0, %ymm0
+; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; CHECK-NEXT: retq
%b = insertelement <16 x i32> undef, i32 %a, i32 0
%c = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
ret <16 x i32> %c
}
Here, 256-bit broadcast was generated instead of 512-bit one.
In this patch
1) I added vector-shuffle lowering through broadcasts
2) Removed asserts and branches likes because this is incorrect
- assert(Subtarget->hasDQI() && "We can only lower v8i64 with AVX-512-DQI");
3) Fixed lowering tests
llvm-svn: 220774
2014-10-28 20:28:51 +08:00
|
|
|
|
|
|
|
def : Pat<(v16i32 (X86VBroadcast (v16i32 VR512:$src))),
|
|
|
|
(VPBROADCASTDZrr (EXTRACT_SUBREG (v16i32 VR512:$src), sub_xmm))>;
|
|
|
|
def : Pat<(v8i64 (X86VBroadcast (v8i64 VR512:$src))),
|
|
|
|
(VPBROADCASTQZrr (EXTRACT_SUBREG (v8i64 VR512:$src), sub_xmm))>;
|
|
|
|
|
2013-10-26 02:04:12 +08:00
|
|
|
def : Pat<(v16f32 (int_x86_avx512_vbroadcast_ss_ps_512 (v4f32 VR128X:$src))),
|
2014-10-30 22:21:47 +08:00
|
|
|
(VBROADCASTSSZr VR128X:$src)>;
|
2013-10-26 02:04:12 +08:00
|
|
|
def : Pat<(v8f64 (int_x86_avx512_vbroadcast_sd_pd_512 (v2f64 VR128X:$src))),
|
2014-10-30 22:21:47 +08:00
|
|
|
(VBROADCASTSDZr VR128X:$src)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
// Provide fallback in case the load node that is used in the patterns above
|
|
|
|
// is used by additional users, which prevents the pattern selection.
|
|
|
|
def : Pat<(v16f32 (X86VBroadcast FR32X:$src)),
|
2014-10-30 22:21:47 +08:00
|
|
|
(VBROADCASTSSZr (COPY_TO_REGCLASS FR32X:$src, VR128X))>;
|
2013-09-17 15:34:34 +08:00
|
|
|
def : Pat<(v8f64 (X86VBroadcast FR64X:$src)),
|
2014-10-30 22:21:47 +08:00
|
|
|
(VBROADCASTSDZr (COPY_TO_REGCLASS FR64X:$src, VR128X))>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
|
|
|
|
let Predicates = [HasAVX512] in {
|
|
|
|
def : Pat<(v8i32 (X86VBroadcastm (v8i1 VK8WM:$mask), (loadi32 addr:$src))),
|
|
|
|
(EXTRACT_SUBREG
|
|
|
|
(v16i32 (VPBROADCASTDZkrm (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
|
|
|
|
addr:$src)), sub_ymm)>;
|
|
|
|
}
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AVX-512 BROADCAST MASK TO VECTOR REGISTER
|
|
|
|
//---
|
|
|
|
|
|
|
|
multiclass avx512_mask_broadcast<bits<8> opc, string OpcodeStr,
|
2014-10-26 17:52:24 +08:00
|
|
|
RegisterClass KRC> {
|
|
|
|
let Predicates = [HasCDI] in
|
|
|
|
def Zrr : AVX512XS8I<opc, MRMSrcReg, (outs VR512:$dst), (ins KRC:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
2014-10-26 17:52:24 +08:00
|
|
|
[]>, EVEX, EVEX_V512;
|
|
|
|
|
|
|
|
let Predicates = [HasCDI, HasVLX] in {
|
|
|
|
def Z128rr : AVX512XS8I<opc, MRMSrcReg, (outs VR128:$dst), (ins KRC:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
2014-10-26 17:52:24 +08:00
|
|
|
[]>, EVEX, EVEX_V128;
|
|
|
|
def Z256rr : AVX512XS8I<opc, MRMSrcReg, (outs VR256:$dst), (ins KRC:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
2014-10-26 17:52:24 +08:00
|
|
|
[]>, EVEX, EVEX_V256;
|
|
|
|
}
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
2014-06-13 19:40:31 +08:00
|
|
|
let Predicates = [HasCDI] in {
|
2014-10-26 17:52:24 +08:00
|
|
|
defm VPBROADCASTMW2D : avx512_mask_broadcast<0x3A, "vpbroadcastmw2d",
|
|
|
|
VK16>;
|
|
|
|
defm VPBROADCASTMB2Q : avx512_mask_broadcast<0x2A, "vpbroadcastmb2q",
|
|
|
|
VK8>, VEX_W;
|
2014-06-13 19:40:31 +08:00
|
|
|
}
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AVX-512 - VPERM
|
|
|
|
//
|
|
|
|
// -- immediate form --
|
2014-10-28 07:08:37 +08:00
|
|
|
multiclass avx512_perm_imm<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
|
|
|
X86VectorVTInfo _> {
|
|
|
|
let ExeDomain = _.ExeDomain in {
|
|
|
|
def ri : AVX512AIi8<opc, MRMSrcReg, (outs _.RC:$dst),
|
|
|
|
(ins _.RC:$src1, i8imm:$src2),
|
2013-09-17 15:34:34 +08:00
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2014-10-28 07:08:37 +08:00
|
|
|
[(set _.RC:$dst,
|
|
|
|
(_.VT (OpNode _.RC:$src1, (i8 imm:$src2))))]>,
|
2013-09-17 15:34:34 +08:00
|
|
|
EVEX;
|
2014-10-28 07:08:37 +08:00
|
|
|
def mi : AVX512AIi8<opc, MRMSrcMem, (outs _.RC:$dst),
|
|
|
|
(ins _.MemOp:$src1, i8imm:$src2),
|
2013-09-17 15:34:34 +08:00
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2014-10-28 07:08:37 +08:00
|
|
|
[(set _.RC:$dst,
|
|
|
|
(_.VT (OpNode (_.MemOpFrag addr:$src1),
|
|
|
|
(i8 imm:$src2))))]>,
|
|
|
|
EVEX, EVEX_CD8<_.EltSize, CD8VF>;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
2014-10-28 07:08:37 +08:00
|
|
|
}
|
|
|
|
|
2014-10-28 07:08:40 +08:00
|
|
|
multiclass avx512_permil<bits<8> OpcImm, bits<8> OpcVar, X86VectorVTInfo _,
|
|
|
|
X86VectorVTInfo Ctrl> :
|
|
|
|
avx512_perm_imm<OpcImm, "vpermil" # _.Suffix, X86VPermilpi, _> {
|
|
|
|
let ExeDomain = _.ExeDomain in {
|
|
|
|
def rr : AVX5128I<OpcVar, MRMSrcReg, (outs _.RC:$dst),
|
|
|
|
(ins _.RC:$src1, _.RC:$src2),
|
|
|
|
!strconcat("vpermil" # _.Suffix,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2014-10-28 07:08:40 +08:00
|
|
|
[(set _.RC:$dst,
|
|
|
|
(_.VT (X86VPermilpv _.RC:$src1,
|
|
|
|
(Ctrl.VT Ctrl.RC:$src2))))]>,
|
|
|
|
EVEX_4V;
|
|
|
|
def rm : AVX5128I<OpcVar, MRMSrcMem, (outs _.RC:$dst),
|
|
|
|
(ins _.RC:$src1, Ctrl.MemOp:$src2),
|
|
|
|
!strconcat("vpermil" # _.Suffix,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2014-10-28 07:08:40 +08:00
|
|
|
[(set _.RC:$dst,
|
|
|
|
(_.VT (X86VPermilpv _.RC:$src1,
|
|
|
|
(Ctrl.VT (Ctrl.MemOpFrag addr:$src2)))))]>,
|
|
|
|
EVEX_4V;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-28 07:08:37 +08:00
|
|
|
defm VPERMQZ : avx512_perm_imm<0x00, "vpermq", X86VPermi, v8i64_info>,
|
|
|
|
EVEX_V512, VEX_W;
|
|
|
|
defm VPERMPDZ : avx512_perm_imm<0x01, "vpermpd", X86VPermi, v8f64_info>,
|
|
|
|
EVEX_V512, VEX_W;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
2014-10-28 07:08:40 +08:00
|
|
|
defm VPERMILPSZ : avx512_permil<0x04, 0x0C, v16f32_info, v16i32_info>,
|
2014-10-28 07:08:37 +08:00
|
|
|
EVEX_V512;
|
2014-10-28 07:08:40 +08:00
|
|
|
defm VPERMILPDZ : avx512_permil<0x05, 0x0D, v8f64_info, v8i64_info>,
|
2014-10-28 07:08:37 +08:00
|
|
|
EVEX_V512, VEX_W;
|
2014-10-28 07:08:34 +08:00
|
|
|
|
|
|
|
def : Pat<(v16i32 (X86VPermilpi VR512:$src1, (i8 imm:$imm))),
|
|
|
|
(VPERMILPSZri VR512:$src1, imm:$imm)>;
|
|
|
|
def : Pat<(v8i64 (X86VPermilpi VR512:$src1, (i8 imm:$imm))),
|
|
|
|
(VPERMILPDZri VR512:$src1, imm:$imm)>;
|
|
|
|
|
2013-09-17 15:34:34 +08:00
|
|
|
// -- VPERM - register form --
|
|
|
|
multiclass avx512_perm<bits<8> opc, string OpcodeStr, RegisterClass RC,
|
|
|
|
PatFrag mem_frag, X86MemOperand x86memop, ValueType OpVT> {
|
|
|
|
|
|
|
|
def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, RC:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set RC:$dst,
|
|
|
|
(OpVT (X86VPermv RC:$src1, RC:$src2)))]>, EVEX_4V;
|
|
|
|
|
|
|
|
def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, x86memop:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set RC:$dst,
|
|
|
|
(OpVT (X86VPermv RC:$src1, (mem_frag addr:$src2))))]>,
|
|
|
|
EVEX_4V;
|
|
|
|
}
|
|
|
|
|
|
|
|
defm VPERMDZ : avx512_perm<0x36, "vpermd", VR512, memopv16i32, i512mem,
|
|
|
|
v16i32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
|
|
|
|
defm VPERMQZ : avx512_perm<0x36, "vpermq", VR512, memopv8i64, i512mem,
|
|
|
|
v8i64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
|
|
|
|
let ExeDomain = SSEPackedSingle in
|
|
|
|
defm VPERMPSZ : avx512_perm<0x16, "vpermps", VR512, memopv16f32, f512mem,
|
|
|
|
v16f32>, EVEX_V512, EVEX_CD8<32, CD8VF>;
|
|
|
|
let ExeDomain = SSEPackedDouble in
|
|
|
|
defm VPERMPDZ : avx512_perm<0x16, "vpermpd", VR512, memopv8f64, f512mem,
|
|
|
|
v8f64>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
|
|
|
|
|
|
|
|
// -- VPERM2I - 3 source operands form --
|
|
|
|
multiclass avx512_perm_3src<bits<8> opc, string OpcodeStr, RegisterClass RC,
|
|
|
|
PatFrag mem_frag, X86MemOperand x86memop,
|
2014-07-03 05:25:54 +08:00
|
|
|
SDNode OpNode, ValueType OpVT, RegisterClass KRC> {
|
2013-09-17 15:34:34 +08:00
|
|
|
let Constraints = "$src1 = $dst" in {
|
|
|
|
def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, RC:$src2, RC:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set RC:$dst,
|
2014-01-23 22:27:26 +08:00
|
|
|
(OpVT (OpNode RC:$src1, RC:$src2, RC:$src3)))]>,
|
2013-09-17 15:34:34 +08:00
|
|
|
EVEX_4V;
|
|
|
|
|
2014-07-03 05:25:54 +08:00
|
|
|
def rrk : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, KRC:$mask, RC:$src2, RC:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src3, $src2, $dst {${mask}}|"
|
2014-07-03 05:25:54 +08:00
|
|
|
"$dst {${mask}}, $src2, $src3}"),
|
|
|
|
[(set RC:$dst, (OpVT (vselect KRC:$mask,
|
|
|
|
(OpNode RC:$src1, RC:$src2,
|
|
|
|
RC:$src3),
|
|
|
|
RC:$src1)))]>,
|
|
|
|
EVEX_4V, EVEX_K;
|
|
|
|
|
|
|
|
let AddedComplexity = 30 in // Prefer over VMOV*rrkz Pat<>
|
|
|
|
def rrkz : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, KRC:$mask, RC:$src2, RC:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src3, $src2, $dst {${mask}} {z} |",
|
2014-07-03 05:25:54 +08:00
|
|
|
"$dst {${mask}} {z}, $src2, $src3}"),
|
|
|
|
[(set RC:$dst, (OpVT (vselect KRC:$mask,
|
|
|
|
(OpNode RC:$src1, RC:$src2,
|
|
|
|
RC:$src3),
|
|
|
|
(OpVT (bitconvert
|
|
|
|
(v16i32 immAllZerosV))))))]>,
|
|
|
|
EVEX_4V, EVEX_KZ;
|
|
|
|
|
2013-09-17 15:34:34 +08:00
|
|
|
def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, RC:$src2, x86memop:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set RC:$dst,
|
2014-07-03 05:25:54 +08:00
|
|
|
(OpVT (OpNode RC:$src1, RC:$src2,
|
2013-09-17 15:34:34 +08:00
|
|
|
(mem_frag addr:$src3))))]>, EVEX_4V;
|
2014-07-03 05:25:54 +08:00
|
|
|
|
|
|
|
def rmk : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, KRC:$mask, RC:$src2, x86memop:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src3, $src2, $dst {${mask}}|"
|
2014-07-03 05:25:54 +08:00
|
|
|
"$dst {${mask}}, $src2, $src3}"),
|
|
|
|
[(set RC:$dst,
|
|
|
|
(OpVT (vselect KRC:$mask,
|
|
|
|
(OpNode RC:$src1, RC:$src2,
|
|
|
|
(mem_frag addr:$src3)),
|
|
|
|
RC:$src1)))]>,
|
|
|
|
EVEX_4V, EVEX_K;
|
|
|
|
|
|
|
|
let AddedComplexity = 10 in // Prefer over the rrkz variant
|
|
|
|
def rmkz : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, KRC:$mask, RC:$src2, x86memop:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src3, $src2, $dst {${mask}} {z}|"
|
2014-07-03 05:25:54 +08:00
|
|
|
"$dst {${mask}} {z}, $src2, $src3}"),
|
|
|
|
[(set RC:$dst,
|
|
|
|
(OpVT (vselect KRC:$mask,
|
|
|
|
(OpNode RC:$src1, RC:$src2,
|
|
|
|
(mem_frag addr:$src3)),
|
|
|
|
(OpVT (bitconvert
|
|
|
|
(v16i32 immAllZerosV))))))]>,
|
|
|
|
EVEX_4V, EVEX_KZ;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
}
|
2014-07-03 05:25:54 +08:00
|
|
|
defm VPERMI2D : avx512_perm_3src<0x76, "vpermi2d", VR512, memopv16i32,
|
|
|
|
i512mem, X86VPermiv3, v16i32, VK16WM>,
|
|
|
|
EVEX_V512, EVEX_CD8<32, CD8VF>;
|
|
|
|
defm VPERMI2Q : avx512_perm_3src<0x76, "vpermi2q", VR512, memopv8i64,
|
|
|
|
i512mem, X86VPermiv3, v8i64, VK8WM>,
|
|
|
|
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
|
|
|
|
defm VPERMI2PS : avx512_perm_3src<0x77, "vpermi2ps", VR512, memopv16f32,
|
|
|
|
i512mem, X86VPermiv3, v16f32, VK16WM>,
|
|
|
|
EVEX_V512, EVEX_CD8<32, CD8VF>;
|
|
|
|
defm VPERMI2PD : avx512_perm_3src<0x77, "vpermi2pd", VR512, memopv8f64,
|
|
|
|
i512mem, X86VPermiv3, v8f64, VK8WM>,
|
|
|
|
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
|
2014-01-23 22:27:26 +08:00
|
|
|
|
2014-07-03 05:25:58 +08:00
|
|
|
multiclass avx512_perm_table_3src<bits<8> opc, string Suffix, RegisterClass RC,
|
|
|
|
PatFrag mem_frag, X86MemOperand x86memop,
|
2014-07-03 05:26:01 +08:00
|
|
|
SDNode OpNode, ValueType OpVT, RegisterClass KRC,
|
|
|
|
ValueType MaskVT, RegisterClass MRC> :
|
2014-07-03 05:25:58 +08:00
|
|
|
avx512_perm_3src<opc, "vpermt2"##Suffix, RC, mem_frag, x86memop, OpNode,
|
|
|
|
OpVT, KRC> {
|
|
|
|
def : Pat<(OpVT (!cast<Intrinsic>("int_x86_avx512_mask_vpermt_"##Suffix##"_512")
|
|
|
|
VR512:$idx, VR512:$src1, VR512:$src2, -1)),
|
|
|
|
(!cast<Instruction>(NAME#rr) VR512:$src1, VR512:$idx, VR512:$src2)>;
|
2014-07-03 05:26:01 +08:00
|
|
|
|
|
|
|
def : Pat<(OpVT (!cast<Intrinsic>("int_x86_avx512_mask_vpermt_"##Suffix##"_512")
|
|
|
|
VR512:$idx, VR512:$src1, VR512:$src2, MRC:$mask)),
|
|
|
|
(!cast<Instruction>(NAME#rrk) VR512:$src1,
|
|
|
|
(MaskVT (COPY_TO_REGCLASS MRC:$mask, KRC)), VR512:$idx, VR512:$src2)>;
|
2014-07-03 05:25:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
defm VPERMT2D : avx512_perm_table_3src<0x7E, "d", VR512, memopv16i32, i512mem,
|
2014-07-03 05:26:01 +08:00
|
|
|
X86VPermv3, v16i32, VK16WM, v16i1, GR16>,
|
|
|
|
EVEX_V512, EVEX_CD8<32, CD8VF>;
|
2014-07-03 05:25:58 +08:00
|
|
|
defm VPERMT2Q : avx512_perm_table_3src<0x7E, "q", VR512, memopv8i64, i512mem,
|
2014-07-03 05:26:01 +08:00
|
|
|
X86VPermv3, v8i64, VK8WM, v8i1, GR8>,
|
|
|
|
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
|
2014-07-03 05:25:58 +08:00
|
|
|
defm VPERMT2PS : avx512_perm_table_3src<0x7F, "ps", VR512, memopv16f32, i512mem,
|
2014-07-03 05:26:01 +08:00
|
|
|
X86VPermv3, v16f32, VK16WM, v16i1, GR16>,
|
|
|
|
EVEX_V512, EVEX_CD8<32, CD8VF>;
|
2014-07-03 05:25:58 +08:00
|
|
|
defm VPERMT2PD : avx512_perm_table_3src<0x7F, "pd", VR512, memopv8f64, i512mem,
|
2014-07-03 05:26:01 +08:00
|
|
|
X86VPermv3, v8f64, VK8WM, v8i1, GR8>,
|
|
|
|
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
|
2014-04-29 17:09:15 +08:00
|
|
|
|
2013-09-17 15:34:34 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AVX-512 - BLEND using mask
|
|
|
|
//
|
2014-01-08 18:54:22 +08:00
|
|
|
multiclass avx512_blendmask<bits<8> opc, string OpcodeStr,
|
2013-09-17 15:34:34 +08:00
|
|
|
RegisterClass KRC, RegisterClass RC,
|
|
|
|
X86MemOperand x86memop, PatFrag mem_frag,
|
|
|
|
SDNode OpNode, ValueType vt> {
|
|
|
|
def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
|
2014-01-08 18:54:22 +08:00
|
|
|
(ins KRC:$mask, RC:$src1, RC:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
|
2014-01-08 18:54:22 +08:00
|
|
|
[(set RC:$dst, (OpNode KRC:$mask, (vt RC:$src2),
|
2013-11-05 03:14:56 +08:00
|
|
|
(vt RC:$src1)))]>, EVEX_4V, EVEX_K;
|
2014-01-08 18:54:22 +08:00
|
|
|
let mayLoad = 1 in
|
|
|
|
def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins KRC:$mask, RC:$src1, x86memop:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
|
2014-01-08 18:54:22 +08:00
|
|
|
[]>, EVEX_4V, EVEX_K;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
let ExeDomain = SSEPackedSingle in
|
2013-11-05 03:14:56 +08:00
|
|
|
defm VBLENDMPSZ : avx512_blendmask<0x65, "vblendmps",
|
|
|
|
VK16WM, VR512, f512mem,
|
2013-09-17 15:34:34 +08:00
|
|
|
memopv16f32, vselect, v16f32>,
|
|
|
|
EVEX_CD8<32, CD8VF>, EVEX_V512;
|
|
|
|
let ExeDomain = SSEPackedDouble in
|
2013-11-05 03:14:56 +08:00
|
|
|
defm VBLENDMPDZ : avx512_blendmask<0x65, "vblendmpd",
|
|
|
|
VK8WM, VR512, f512mem,
|
2013-09-17 15:34:34 +08:00
|
|
|
memopv8f64, vselect, v8f64>,
|
|
|
|
VEX_W, EVEX_CD8<64, CD8VF>, EVEX_V512;
|
|
|
|
|
2014-01-08 18:54:22 +08:00
|
|
|
def : Pat<(v16f32 (int_x86_avx512_mask_blend_ps_512 (v16f32 VR512:$src1),
|
|
|
|
(v16f32 VR512:$src2), (i16 GR16:$mask))),
|
2014-01-13 20:55:03 +08:00
|
|
|
(VBLENDMPSZrr (COPY_TO_REGCLASS GR16:$mask, VK16WM),
|
2014-01-08 18:54:22 +08:00
|
|
|
VR512:$src1, VR512:$src2)>;
|
|
|
|
|
|
|
|
def : Pat<(v8f64 (int_x86_avx512_mask_blend_pd_512 (v8f64 VR512:$src1),
|
|
|
|
(v8f64 VR512:$src2), (i8 GR8:$mask))),
|
2014-01-13 20:55:03 +08:00
|
|
|
(VBLENDMPDZrr (COPY_TO_REGCLASS GR8:$mask, VK8WM),
|
2014-01-08 18:54:22 +08:00
|
|
|
VR512:$src1, VR512:$src2)>;
|
|
|
|
|
2013-11-05 03:14:56 +08:00
|
|
|
defm VPBLENDMDZ : avx512_blendmask<0x64, "vpblendmd",
|
|
|
|
VK16WM, VR512, f512mem,
|
|
|
|
memopv16i32, vselect, v16i32>,
|
|
|
|
EVEX_CD8<32, CD8VF>, EVEX_V512;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
2013-11-05 03:14:56 +08:00
|
|
|
defm VPBLENDMQZ : avx512_blendmask<0x64, "vpblendmq",
|
|
|
|
VK8WM, VR512, f512mem,
|
|
|
|
memopv8i64, vselect, v8i64>,
|
|
|
|
VEX_W, EVEX_CD8<64, CD8VF>, EVEX_V512;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
2014-01-08 18:54:22 +08:00
|
|
|
def : Pat<(v16i32 (int_x86_avx512_mask_blend_d_512 (v16i32 VR512:$src1),
|
|
|
|
(v16i32 VR512:$src2), (i16 GR16:$mask))),
|
|
|
|
(VPBLENDMDZrr (COPY_TO_REGCLASS GR16:$mask, VK16),
|
|
|
|
VR512:$src1, VR512:$src2)>;
|
|
|
|
|
|
|
|
def : Pat<(v8i64 (int_x86_avx512_mask_blend_q_512 (v8i64 VR512:$src1),
|
|
|
|
(v8i64 VR512:$src2), (i8 GR8:$mask))),
|
|
|
|
(VPBLENDMQZrr (COPY_TO_REGCLASS GR8:$mask, VK8),
|
|
|
|
VR512:$src1, VR512:$src2)>;
|
|
|
|
|
2013-09-17 15:34:34 +08:00
|
|
|
let Predicates = [HasAVX512] in {
|
|
|
|
def : Pat<(v8f32 (vselect (v8i1 VK8WM:$mask), (v8f32 VR256X:$src1),
|
|
|
|
(v8f32 VR256X:$src2))),
|
|
|
|
(EXTRACT_SUBREG
|
|
|
|
(v16f32 (VBLENDMPSZrr (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
|
|
|
|
(v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
|
|
|
|
(v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
|
|
|
|
|
|
|
|
def : Pat<(v8i32 (vselect (v8i1 VK8WM:$mask), (v8i32 VR256X:$src1),
|
|
|
|
(v8i32 VR256X:$src2))),
|
|
|
|
(EXTRACT_SUBREG
|
|
|
|
(v16i32 (VPBLENDMDZrr (COPY_TO_REGCLASS VK8WM:$mask, VK16WM),
|
|
|
|
(v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
|
|
|
|
(v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
|
|
|
|
}
|
2013-12-16 21:52:35 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Compare Instructions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
// avx512_cmp_scalar - AVX512 CMPSS and CMPSD
|
|
|
|
multiclass avx512_cmp_scalar<RegisterClass RC, X86MemOperand x86memop,
|
|
|
|
Operand CC, SDNode OpNode, ValueType VT,
|
|
|
|
PatFrag ld_frag, string asm, string asm_alt> {
|
|
|
|
def rr : AVX512Ii8<0xC2, MRMSrcReg,
|
|
|
|
(outs VK1:$dst), (ins RC:$src1, RC:$src2, CC:$cc), asm,
|
|
|
|
[(set VK1:$dst, (OpNode (VT RC:$src1), RC:$src2, imm:$cc))],
|
|
|
|
IIC_SSE_ALU_F32S_RR>, EVEX_4V;
|
|
|
|
def rm : AVX512Ii8<0xC2, MRMSrcMem,
|
|
|
|
(outs VK1:$dst), (ins RC:$src1, x86memop:$src2, CC:$cc), asm,
|
|
|
|
[(set VK1:$dst, (OpNode (VT RC:$src1),
|
|
|
|
(ld_frag addr:$src2), imm:$cc))], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
|
2014-01-05 12:55:55 +08:00
|
|
|
let isAsmParserOnly = 1, hasSideEffects = 0 in {
|
2013-12-16 21:52:35 +08:00
|
|
|
def rri_alt : AVX512Ii8<0xC2, MRMSrcReg,
|
|
|
|
(outs VK1:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
|
|
|
|
asm_alt, [], IIC_SSE_ALU_F32S_RR>, EVEX_4V;
|
|
|
|
def rmi_alt : AVX512Ii8<0xC2, MRMSrcMem,
|
|
|
|
(outs VK1:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
|
|
|
|
asm_alt, [], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let Predicates = [HasAVX512] in {
|
|
|
|
defm VCMPSSZ : avx512_cmp_scalar<FR32X, f32mem, AVXCC, X86cmpms, f32, loadf32,
|
|
|
|
"vcmp${cc}ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
|
|
|
"vcmpss\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
|
|
|
|
XS;
|
|
|
|
defm VCMPSDZ : avx512_cmp_scalar<FR64X, f64mem, AVXCC, X86cmpms, f64, loadf64,
|
|
|
|
"vcmp${cc}sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
|
|
|
"vcmpsd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}">,
|
|
|
|
XD, VEX_W;
|
|
|
|
}
|
2013-09-17 15:34:34 +08:00
|
|
|
|
2014-08-25 22:49:34 +08:00
|
|
|
multiclass avx512_icmp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
|
|
|
X86VectorVTInfo _> {
|
2013-09-17 15:34:34 +08:00
|
|
|
def rr : AVX512BI<opc, MRMSrcReg,
|
2014-08-25 22:49:34 +08:00
|
|
|
(outs _.KRC:$dst), (ins _.RC:$src1, _.RC:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
|
|
|
[(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2)))],
|
2013-09-17 15:34:34 +08:00
|
|
|
IIC_SSE_ALU_F32P_RR>, EVEX_4V;
|
2014-08-25 22:49:34 +08:00
|
|
|
let mayLoad = 1 in
|
2013-09-17 15:34:34 +08:00
|
|
|
def rm : AVX512BI<opc, MRMSrcMem,
|
2014-08-25 22:49:34 +08:00
|
|
|
(outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
|
|
|
[(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1),
|
|
|
|
(_.VT (bitconvert (_.LdFrag addr:$src2)))))],
|
2013-09-17 15:34:34 +08:00
|
|
|
IIC_SSE_ALU_F32P_RM>, EVEX_4V;
|
2014-08-25 22:49:34 +08:00
|
|
|
def rrk : AVX512BI<opc, MRMSrcReg,
|
|
|
|
(outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst {${mask}}|",
|
|
|
|
"$dst {${mask}}, $src1, $src2}"),
|
|
|
|
[(set _.KRC:$dst, (and _.KRCWM:$mask,
|
|
|
|
(OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2))))],
|
|
|
|
IIC_SSE_ALU_F32P_RR>, EVEX_4V, EVEX_K;
|
|
|
|
let mayLoad = 1 in
|
|
|
|
def rmk : AVX512BI<opc, MRMSrcMem,
|
|
|
|
(outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst {${mask}}|",
|
|
|
|
"$dst {${mask}}, $src1, $src2}"),
|
|
|
|
[(set _.KRC:$dst, (and _.KRCWM:$mask,
|
|
|
|
(OpNode (_.VT _.RC:$src1),
|
|
|
|
(_.VT (bitconvert
|
|
|
|
(_.LdFrag addr:$src2))))))],
|
|
|
|
IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K;
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass avx512_icmp_packed_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
2014-09-18 22:06:55 +08:00
|
|
|
X86VectorVTInfo _> :
|
|
|
|
avx512_icmp_packed<opc, OpcodeStr, OpNode, _> {
|
2014-08-25 22:49:34 +08:00
|
|
|
let mayLoad = 1 in {
|
|
|
|
def rmb : AVX512BI<opc, MRMSrcMem,
|
|
|
|
(outs _.KRC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2),
|
|
|
|
!strconcat(OpcodeStr, "\t{${src2}", _.BroadcastStr, ", $src1, $dst",
|
|
|
|
"|$dst, $src1, ${src2}", _.BroadcastStr, "}"),
|
|
|
|
[(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1),
|
|
|
|
(X86VBroadcast (_.ScalarLdFrag addr:$src2))))],
|
|
|
|
IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_B;
|
|
|
|
def rmbk : AVX512BI<opc, MRMSrcMem,
|
|
|
|
(outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1,
|
|
|
|
_.ScalarMemOp:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"\t{${src2}", _.BroadcastStr, ", $src1, $dst {${mask}}|",
|
|
|
|
"$dst {${mask}}, $src1, ${src2}", _.BroadcastStr, "}"),
|
|
|
|
[(set _.KRC:$dst, (and _.KRCWM:$mask,
|
|
|
|
(OpNode (_.VT _.RC:$src1),
|
|
|
|
(X86VBroadcast
|
|
|
|
(_.ScalarLdFrag addr:$src2)))))],
|
|
|
|
IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K, EVEX_B;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass avx512_icmp_packed_vl<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
|
|
|
AVX512VLVectorVTInfo VTInfo, Predicate prd> {
|
|
|
|
let Predicates = [prd] in
|
|
|
|
defm Z : avx512_icmp_packed<opc, OpcodeStr, OpNode, VTInfo.info512>,
|
|
|
|
EVEX_V512;
|
|
|
|
|
|
|
|
let Predicates = [prd, HasVLX] in {
|
|
|
|
defm Z256 : avx512_icmp_packed<opc, OpcodeStr, OpNode, VTInfo.info256>,
|
|
|
|
EVEX_V256;
|
|
|
|
defm Z128 : avx512_icmp_packed<opc, OpcodeStr, OpNode, VTInfo.info128>,
|
|
|
|
EVEX_V128;
|
|
|
|
}
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
2014-08-25 22:49:34 +08:00
|
|
|
multiclass avx512_icmp_packed_rmb_vl<bits<8> opc, string OpcodeStr,
|
|
|
|
SDNode OpNode, AVX512VLVectorVTInfo VTInfo,
|
|
|
|
Predicate prd> {
|
|
|
|
let Predicates = [prd] in
|
|
|
|
defm Z : avx512_icmp_packed_rmb<opc, OpcodeStr, OpNode, VTInfo.info512>,
|
|
|
|
EVEX_V512;
|
|
|
|
|
|
|
|
let Predicates = [prd, HasVLX] in {
|
|
|
|
defm Z256 : avx512_icmp_packed_rmb<opc, OpcodeStr, OpNode, VTInfo.info256>,
|
|
|
|
EVEX_V256;
|
|
|
|
defm Z128 : avx512_icmp_packed_rmb<opc, OpcodeStr, OpNode, VTInfo.info128>,
|
|
|
|
EVEX_V128;
|
|
|
|
}
|
|
|
|
}
|
2013-09-17 15:34:34 +08:00
|
|
|
|
2014-08-25 22:49:34 +08:00
|
|
|
defm VPCMPEQB : avx512_icmp_packed_vl<0x74, "vpcmpeqb", X86pcmpeqm,
|
|
|
|
avx512vl_i8_info, HasBWI>,
|
|
|
|
EVEX_CD8<8, CD8VF>;
|
|
|
|
|
|
|
|
defm VPCMPEQW : avx512_icmp_packed_vl<0x75, "vpcmpeqw", X86pcmpeqm,
|
|
|
|
avx512vl_i16_info, HasBWI>,
|
|
|
|
EVEX_CD8<16, CD8VF>;
|
|
|
|
|
2014-09-18 22:06:55 +08:00
|
|
|
defm VPCMPEQD : avx512_icmp_packed_rmb_vl<0x76, "vpcmpeqd", X86pcmpeqm,
|
2014-08-25 22:49:34 +08:00
|
|
|
avx512vl_i32_info, HasAVX512>,
|
|
|
|
EVEX_CD8<32, CD8VF>;
|
|
|
|
|
2014-09-18 22:06:55 +08:00
|
|
|
defm VPCMPEQQ : avx512_icmp_packed_rmb_vl<0x29, "vpcmpeqq", X86pcmpeqm,
|
2014-08-25 22:49:34 +08:00
|
|
|
avx512vl_i64_info, HasAVX512>,
|
|
|
|
T8PD, VEX_W, EVEX_CD8<64, CD8VF>;
|
|
|
|
|
|
|
|
defm VPCMPGTB : avx512_icmp_packed_vl<0x64, "vpcmpgtb", X86pcmpgtm,
|
|
|
|
avx512vl_i8_info, HasBWI>,
|
|
|
|
EVEX_CD8<8, CD8VF>;
|
|
|
|
|
|
|
|
defm VPCMPGTW : avx512_icmp_packed_vl<0x65, "vpcmpgtw", X86pcmpgtm,
|
|
|
|
avx512vl_i16_info, HasBWI>,
|
|
|
|
EVEX_CD8<16, CD8VF>;
|
|
|
|
|
2014-09-18 22:06:55 +08:00
|
|
|
defm VPCMPGTD : avx512_icmp_packed_rmb_vl<0x66, "vpcmpgtd", X86pcmpgtm,
|
2014-08-25 22:49:34 +08:00
|
|
|
avx512vl_i32_info, HasAVX512>,
|
|
|
|
EVEX_CD8<32, CD8VF>;
|
|
|
|
|
2014-09-18 22:06:55 +08:00
|
|
|
defm VPCMPGTQ : avx512_icmp_packed_rmb_vl<0x37, "vpcmpgtq", X86pcmpgtm,
|
2014-08-25 22:49:34 +08:00
|
|
|
avx512vl_i64_info, HasAVX512>,
|
|
|
|
T8PD, VEX_W, EVEX_CD8<64, CD8VF>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
def : Pat<(v8i1 (X86pcmpgtm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))),
|
2014-08-27 17:34:37 +08:00
|
|
|
(COPY_TO_REGCLASS (VPCMPGTDZrr
|
2013-09-17 15:34:34 +08:00
|
|
|
(v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
|
|
|
|
(v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm))), VK8)>;
|
|
|
|
|
|
|
|
def : Pat<(v8i1 (X86pcmpeqm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2))),
|
2014-08-27 17:34:37 +08:00
|
|
|
(COPY_TO_REGCLASS (VPCMPEQDZrr
|
2013-09-17 15:34:34 +08:00
|
|
|
(v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
|
|
|
|
(v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm))), VK8)>;
|
|
|
|
|
2014-08-27 17:34:37 +08:00
|
|
|
multiclass avx512_icmp_cc<bits<8> opc, string Suffix, SDNode OpNode,
|
|
|
|
X86VectorVTInfo _> {
|
2013-09-17 15:34:34 +08:00
|
|
|
def rri : AVX512AIi8<opc, MRMSrcReg,
|
2014-08-27 17:34:37 +08:00
|
|
|
(outs _.KRC:$dst), (ins _.RC:$src1, _.RC:$src2, AVXCC:$cc),
|
2014-07-02 02:03:43 +08:00
|
|
|
!strconcat("vpcmp${cc}", Suffix,
|
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2014-08-27 17:34:37 +08:00
|
|
|
[(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
|
|
|
|
imm:$cc))],
|
2013-09-17 15:34:34 +08:00
|
|
|
IIC_SSE_ALU_F32P_RR>, EVEX_4V;
|
2014-08-27 17:34:37 +08:00
|
|
|
let mayLoad = 1 in
|
2013-09-17 15:34:34 +08:00
|
|
|
def rmi : AVX512AIi8<opc, MRMSrcMem,
|
2014-08-27 17:34:37 +08:00
|
|
|
(outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2, AVXCC:$cc),
|
2014-07-02 02:03:43 +08:00
|
|
|
!strconcat("vpcmp${cc}", Suffix,
|
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2014-08-27 17:34:37 +08:00
|
|
|
[(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1),
|
|
|
|
(_.VT (bitconvert (_.LdFrag addr:$src2))),
|
|
|
|
imm:$cc))],
|
|
|
|
IIC_SSE_ALU_F32P_RM>, EVEX_4V;
|
|
|
|
def rrik : AVX512AIi8<opc, MRMSrcReg,
|
|
|
|
(outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2,
|
|
|
|
AVXCC:$cc),
|
|
|
|
!strconcat("vpcmp${cc}", Suffix,
|
|
|
|
"\t{$src2, $src1, $dst {${mask}}|",
|
|
|
|
"$dst {${mask}}, $src1, $src2}"),
|
|
|
|
[(set _.KRC:$dst, (and _.KRCWM:$mask,
|
|
|
|
(OpNode (_.VT _.RC:$src1), (_.VT _.RC:$src2),
|
|
|
|
imm:$cc)))],
|
|
|
|
IIC_SSE_ALU_F32P_RR>, EVEX_4V, EVEX_K;
|
|
|
|
let mayLoad = 1 in
|
|
|
|
def rmik : AVX512AIi8<opc, MRMSrcMem,
|
|
|
|
(outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2,
|
|
|
|
AVXCC:$cc),
|
|
|
|
!strconcat("vpcmp${cc}", Suffix,
|
|
|
|
"\t{$src2, $src1, $dst {${mask}}|",
|
|
|
|
"$dst {${mask}}, $src1, $src2}"),
|
|
|
|
[(set _.KRC:$dst, (and _.KRCWM:$mask,
|
|
|
|
(OpNode (_.VT _.RC:$src1),
|
|
|
|
(_.VT (bitconvert (_.LdFrag addr:$src2))),
|
|
|
|
imm:$cc)))],
|
|
|
|
IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K;
|
|
|
|
|
2013-09-17 15:34:34 +08:00
|
|
|
// Accept explicit immediate argument form instead of comparison code.
|
2014-01-05 12:55:55 +08:00
|
|
|
let isAsmParserOnly = 1, hasSideEffects = 0 in {
|
2013-09-17 15:34:34 +08:00
|
|
|
def rri_alt : AVX512AIi8<opc, MRMSrcReg,
|
2014-08-27 17:34:37 +08:00
|
|
|
(outs _.KRC:$dst), (ins _.RC:$src1, _.RC:$src2, i8imm:$cc),
|
|
|
|
!strconcat("vpcmp", Suffix, "\t{$cc, $src2, $src1, $dst|",
|
|
|
|
"$dst, $src1, $src2, $cc}"),
|
2014-07-02 02:03:43 +08:00
|
|
|
[], IIC_SSE_ALU_F32P_RR>, EVEX_4V;
|
2014-08-27 17:34:37 +08:00
|
|
|
def rmi_alt : AVX512AIi8<opc, MRMSrcMem,
|
|
|
|
(outs _.KRC:$dst), (ins _.RC:$src1, _.MemOp:$src2, i8imm:$cc),
|
|
|
|
!strconcat("vpcmp", Suffix, "\t{$cc, $src2, $src1, $dst|",
|
|
|
|
"$dst, $src1, $src2, $cc}"),
|
|
|
|
[], IIC_SSE_ALU_F32P_RM>, EVEX_4V;
|
2014-07-02 02:03:45 +08:00
|
|
|
def rrik_alt : AVX512AIi8<opc, MRMSrcReg,
|
2014-08-27 17:34:37 +08:00
|
|
|
(outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.RC:$src2,
|
|
|
|
i8imm:$cc),
|
2014-07-02 02:03:45 +08:00
|
|
|
!strconcat("vpcmp", Suffix,
|
2014-08-27 17:34:37 +08:00
|
|
|
"\t{$cc, $src2, $src1, $dst {${mask}}|",
|
|
|
|
"$dst {${mask}}, $src1, $src2, $cc}"),
|
2014-07-02 02:03:45 +08:00
|
|
|
[], IIC_SSE_ALU_F32P_RR>, EVEX_4V, EVEX_K;
|
|
|
|
def rmik_alt : AVX512AIi8<opc, MRMSrcMem,
|
2014-08-27 17:34:37 +08:00
|
|
|
(outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1, _.MemOp:$src2,
|
|
|
|
i8imm:$cc),
|
2014-07-02 02:03:45 +08:00
|
|
|
!strconcat("vpcmp", Suffix,
|
2014-08-27 17:34:37 +08:00
|
|
|
"\t{$cc, $src2, $src1, $dst {${mask}}|",
|
|
|
|
"$dst {${mask}}, $src1, $src2, $cc}"),
|
2014-07-02 02:03:45 +08:00
|
|
|
[], IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-27 17:34:37 +08:00
|
|
|
multiclass avx512_icmp_cc_rmb<bits<8> opc, string Suffix, SDNode OpNode,
|
2014-09-18 22:06:55 +08:00
|
|
|
X86VectorVTInfo _> :
|
|
|
|
avx512_icmp_cc<opc, Suffix, OpNode, _> {
|
2014-08-27 17:34:37 +08:00
|
|
|
let mayLoad = 1 in {
|
|
|
|
def rmib : AVX512AIi8<opc, MRMSrcMem,
|
|
|
|
(outs _.KRC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2,
|
|
|
|
AVXCC:$cc),
|
|
|
|
!strconcat("vpcmp${cc}", Suffix,
|
|
|
|
"\t{${src2}", _.BroadcastStr, ", $src1, $dst|",
|
|
|
|
"$dst, $src1, ${src2}", _.BroadcastStr, "}"),
|
|
|
|
[(set _.KRC:$dst, (OpNode (_.VT _.RC:$src1),
|
|
|
|
(X86VBroadcast (_.ScalarLdFrag addr:$src2)),
|
|
|
|
imm:$cc))],
|
|
|
|
IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_B;
|
|
|
|
def rmibk : AVX512AIi8<opc, MRMSrcMem,
|
|
|
|
(outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1,
|
|
|
|
_.ScalarMemOp:$src2, AVXCC:$cc),
|
|
|
|
!strconcat("vpcmp${cc}", Suffix,
|
|
|
|
"\t{${src2}", _.BroadcastStr, ", $src1, $dst {${mask}}|",
|
|
|
|
"$dst {${mask}}, $src1, ${src2}", _.BroadcastStr, "}"),
|
|
|
|
[(set _.KRC:$dst, (and _.KRCWM:$mask,
|
|
|
|
(OpNode (_.VT _.RC:$src1),
|
|
|
|
(X86VBroadcast (_.ScalarLdFrag addr:$src2)),
|
|
|
|
imm:$cc)))],
|
|
|
|
IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K, EVEX_B;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Accept explicit immediate argument form instead of comparison code.
|
|
|
|
let isAsmParserOnly = 1, hasSideEffects = 0 in {
|
|
|
|
def rmib_alt : AVX512AIi8<opc, MRMSrcMem,
|
|
|
|
(outs _.KRC:$dst), (ins _.RC:$src1, _.ScalarMemOp:$src2,
|
|
|
|
i8imm:$cc),
|
|
|
|
!strconcat("vpcmp", Suffix,
|
|
|
|
"\t{$cc, ${src2}", _.BroadcastStr, ", $src1, $dst|",
|
|
|
|
"$dst, $src1, ${src2}", _.BroadcastStr, ", $cc}"),
|
|
|
|
[], IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_B;
|
|
|
|
def rmibk_alt : AVX512AIi8<opc, MRMSrcMem,
|
|
|
|
(outs _.KRC:$dst), (ins _.KRCWM:$mask, _.RC:$src1,
|
|
|
|
_.ScalarMemOp:$src2, i8imm:$cc),
|
|
|
|
!strconcat("vpcmp", Suffix,
|
|
|
|
"\t{$cc, ${src2}", _.BroadcastStr, ", $src1, $dst {${mask}}|",
|
|
|
|
"$dst {${mask}}, $src1, ${src2}", _.BroadcastStr, ", $cc}"),
|
|
|
|
[], IIC_SSE_ALU_F32P_RM>, EVEX_4V, EVEX_K, EVEX_B;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass avx512_icmp_cc_vl<bits<8> opc, string Suffix, SDNode OpNode,
|
|
|
|
AVX512VLVectorVTInfo VTInfo, Predicate prd> {
|
|
|
|
let Predicates = [prd] in
|
|
|
|
defm Z : avx512_icmp_cc<opc, Suffix, OpNode, VTInfo.info512>, EVEX_V512;
|
|
|
|
|
|
|
|
let Predicates = [prd, HasVLX] in {
|
|
|
|
defm Z256 : avx512_icmp_cc<opc, Suffix, OpNode, VTInfo.info256>, EVEX_V256;
|
|
|
|
defm Z128 : avx512_icmp_cc<opc, Suffix, OpNode, VTInfo.info128>, EVEX_V128;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass avx512_icmp_cc_rmb_vl<bits<8> opc, string Suffix, SDNode OpNode,
|
|
|
|
AVX512VLVectorVTInfo VTInfo, Predicate prd> {
|
|
|
|
let Predicates = [prd] in
|
|
|
|
defm Z : avx512_icmp_cc_rmb<opc, Suffix, OpNode, VTInfo.info512>,
|
|
|
|
EVEX_V512;
|
|
|
|
|
|
|
|
let Predicates = [prd, HasVLX] in {
|
|
|
|
defm Z256 : avx512_icmp_cc_rmb<opc, Suffix, OpNode, VTInfo.info256>,
|
|
|
|
EVEX_V256;
|
|
|
|
defm Z128 : avx512_icmp_cc_rmb<opc, Suffix, OpNode, VTInfo.info128>,
|
|
|
|
EVEX_V128;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
defm VPCMPB : avx512_icmp_cc_vl<0x3F, "b", X86cmpm, avx512vl_i8_info,
|
|
|
|
HasBWI>, EVEX_CD8<8, CD8VF>;
|
|
|
|
defm VPCMPUB : avx512_icmp_cc_vl<0x3E, "ub", X86cmpmu, avx512vl_i8_info,
|
|
|
|
HasBWI>, EVEX_CD8<8, CD8VF>;
|
|
|
|
|
|
|
|
defm VPCMPW : avx512_icmp_cc_vl<0x3F, "w", X86cmpm, avx512vl_i16_info,
|
|
|
|
HasBWI>, VEX_W, EVEX_CD8<16, CD8VF>;
|
|
|
|
defm VPCMPUW : avx512_icmp_cc_vl<0x3E, "uw", X86cmpmu, avx512vl_i16_info,
|
|
|
|
HasBWI>, VEX_W, EVEX_CD8<16, CD8VF>;
|
|
|
|
|
2014-09-18 22:06:55 +08:00
|
|
|
defm VPCMPD : avx512_icmp_cc_rmb_vl<0x1F, "d", X86cmpm, avx512vl_i32_info,
|
2014-08-27 17:34:37 +08:00
|
|
|
HasAVX512>, EVEX_CD8<32, CD8VF>;
|
2014-09-18 22:06:55 +08:00
|
|
|
defm VPCMPUD : avx512_icmp_cc_rmb_vl<0x1E, "ud", X86cmpmu, avx512vl_i32_info,
|
2014-08-27 17:34:37 +08:00
|
|
|
HasAVX512>, EVEX_CD8<32, CD8VF>;
|
|
|
|
|
2014-09-18 22:06:55 +08:00
|
|
|
defm VPCMPQ : avx512_icmp_cc_rmb_vl<0x1F, "q", X86cmpm, avx512vl_i64_info,
|
2014-08-27 17:34:37 +08:00
|
|
|
HasAVX512>, VEX_W, EVEX_CD8<64, CD8VF>;
|
2014-09-18 22:06:55 +08:00
|
|
|
defm VPCMPUQ : avx512_icmp_cc_rmb_vl<0x1E, "uq", X86cmpmu, avx512vl_i64_info,
|
2014-08-27 17:34:37 +08:00
|
|
|
HasAVX512>, VEX_W, EVEX_CD8<64, CD8VF>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
2014-06-26 08:21:12 +08:00
|
|
|
// avx512_cmp_packed - compare packed instructions
|
2013-09-17 15:34:34 +08:00
|
|
|
multiclass avx512_cmp_packed<RegisterClass KRC, RegisterClass RC,
|
2014-01-01 23:12:34 +08:00
|
|
|
X86MemOperand x86memop, ValueType vt,
|
|
|
|
string suffix, Domain d> {
|
2013-09-17 15:34:34 +08:00
|
|
|
def rri : AVX512PIi8<0xC2, MRMSrcReg,
|
2014-01-01 23:12:34 +08:00
|
|
|
(outs KRC:$dst), (ins RC:$src1, RC:$src2, AVXCC:$cc),
|
|
|
|
!strconcat("vcmp${cc}", suffix,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2014-01-01 23:12:34 +08:00
|
|
|
[(set KRC:$dst, (X86cmpm (vt RC:$src1), (vt RC:$src2), imm:$cc))], d>;
|
|
|
|
def rrib: AVX512PIi8<0xC2, MRMSrcReg,
|
2014-01-13 20:55:03 +08:00
|
|
|
(outs KRC:$dst), (ins RC:$src1, RC:$src2, AVXCC:$cc),
|
2014-01-01 23:12:34 +08:00
|
|
|
!strconcat("vcmp${cc}", suffix,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{{sae}, $src2, $src1, $dst|$dst, $src1, $src2, {sae}}"),
|
2014-01-01 23:12:34 +08:00
|
|
|
[], d>, EVEX_B;
|
2013-09-17 15:34:34 +08:00
|
|
|
def rmi : AVX512PIi8<0xC2, MRMSrcMem,
|
2014-01-01 23:12:34 +08:00
|
|
|
(outs KRC:$dst), (ins RC:$src1, x86memop:$src2, AVXCC:$cc),
|
2014-01-23 22:27:26 +08:00
|
|
|
!strconcat("vcmp${cc}", suffix,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2, $cc}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set KRC:$dst,
|
2014-01-01 23:12:34 +08:00
|
|
|
(X86cmpm (vt RC:$src1), (memop addr:$src2), imm:$cc))], d>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
// Accept explicit immediate argument form instead of comparison code.
|
2014-01-05 12:55:55 +08:00
|
|
|
let isAsmParserOnly = 1, hasSideEffects = 0 in {
|
2013-10-09 12:24:38 +08:00
|
|
|
def rri_alt : AVX512PIi8<0xC2, MRMSrcReg,
|
2014-06-26 08:21:12 +08:00
|
|
|
(outs KRC:$dst), (ins RC:$src1, RC:$src2, i8imm:$cc),
|
2014-01-01 23:12:34 +08:00
|
|
|
!strconcat("vcmp", suffix,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"), [], d>;
|
2013-10-09 12:24:38 +08:00
|
|
|
def rmi_alt : AVX512PIi8<0xC2, MRMSrcMem,
|
2014-06-26 08:21:12 +08:00
|
|
|
(outs KRC:$dst), (ins RC:$src1, x86memop:$src2, i8imm:$cc),
|
2014-01-01 23:12:34 +08:00
|
|
|
!strconcat("vcmp", suffix,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}"), [], d>;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-01 23:12:34 +08:00
|
|
|
defm VCMPPSZ : avx512_cmp_packed<VK16, VR512, f512mem, v16f32,
|
2014-02-18 08:21:49 +08:00
|
|
|
"ps", SSEPackedSingle>, PS, EVEX_4V, EVEX_V512,
|
2014-02-01 16:17:56 +08:00
|
|
|
EVEX_CD8<32, CD8VF>;
|
2014-01-01 23:12:34 +08:00
|
|
|
defm VCMPPDZ : avx512_cmp_packed<VK8, VR512, f512mem, v8f64,
|
2014-01-14 15:41:20 +08:00
|
|
|
"pd", SSEPackedDouble>, PD, EVEX_4V, VEX_W, EVEX_V512,
|
2013-09-17 15:34:34 +08:00
|
|
|
EVEX_CD8<64, CD8VF>;
|
|
|
|
|
|
|
|
def : Pat<(v8i1 (X86cmpm (v8f32 VR256X:$src1), (v8f32 VR256X:$src2), imm:$cc)),
|
|
|
|
(COPY_TO_REGCLASS (VCMPPSZrri
|
|
|
|
(v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
|
|
|
|
(v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
|
|
|
|
imm:$cc), VK8)>;
|
|
|
|
def : Pat<(v8i1 (X86cmpm (v8i32 VR256X:$src1), (v8i32 VR256X:$src2), imm:$cc)),
|
|
|
|
(COPY_TO_REGCLASS (VPCMPDZrri
|
|
|
|
(v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
|
|
|
|
(v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
|
|
|
|
imm:$cc), VK8)>;
|
|
|
|
def : Pat<(v8i1 (X86cmpmu (v8i32 VR256X:$src1), (v8i32 VR256X:$src2), imm:$cc)),
|
|
|
|
(COPY_TO_REGCLASS (VPCMPUDZrri
|
|
|
|
(v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)),
|
|
|
|
(v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src2, sub_ymm)),
|
|
|
|
imm:$cc), VK8)>;
|
2014-01-01 23:12:34 +08:00
|
|
|
|
|
|
|
def : Pat<(i16 (int_x86_avx512_mask_cmp_ps_512 (v16f32 VR512:$src1),
|
|
|
|
(v16f32 VR512:$src2), imm:$cc, (i16 -1),
|
|
|
|
FROUND_NO_EXC)),
|
|
|
|
(COPY_TO_REGCLASS (VCMPPSZrrib VR512:$src1, VR512:$src2,
|
2014-01-13 20:55:03 +08:00
|
|
|
(I8Imm imm:$cc)), GR16)>;
|
2014-01-01 23:12:34 +08:00
|
|
|
|
|
|
|
def : Pat<(i8 (int_x86_avx512_mask_cmp_pd_512 (v8f64 VR512:$src1),
|
|
|
|
(v8f64 VR512:$src2), imm:$cc, (i8 -1),
|
|
|
|
FROUND_NO_EXC)),
|
|
|
|
(COPY_TO_REGCLASS (VCMPPDZrrib VR512:$src1, VR512:$src2,
|
2014-01-13 20:55:03 +08:00
|
|
|
(I8Imm imm:$cc)), GR8)>;
|
2014-01-01 23:12:34 +08:00
|
|
|
|
|
|
|
def : Pat<(i16 (int_x86_avx512_mask_cmp_ps_512 (v16f32 VR512:$src1),
|
|
|
|
(v16f32 VR512:$src2), imm:$cc, (i16 -1),
|
|
|
|
FROUND_CURRENT)),
|
|
|
|
(COPY_TO_REGCLASS (VCMPPSZrri VR512:$src1, VR512:$src2,
|
|
|
|
(I8Imm imm:$cc)), GR16)>;
|
|
|
|
|
|
|
|
def : Pat<(i8 (int_x86_avx512_mask_cmp_pd_512 (v8f64 VR512:$src1),
|
|
|
|
(v8f64 VR512:$src2), imm:$cc, (i8 -1),
|
|
|
|
FROUND_CURRENT)),
|
|
|
|
(COPY_TO_REGCLASS (VCMPPDZrri VR512:$src1, VR512:$src2,
|
|
|
|
(I8Imm imm:$cc)), GR8)>;
|
|
|
|
|
2013-09-17 15:34:34 +08:00
|
|
|
// Mask register copy, including
|
|
|
|
// - copy between mask registers
|
|
|
|
// - load/store mask registers
|
|
|
|
// - copy from GPR to mask register and vice versa
|
|
|
|
//
|
|
|
|
multiclass avx512_mask_mov<bits<8> opc_kk, bits<8> opc_km, bits<8> opc_mk,
|
|
|
|
string OpcodeStr, RegisterClass KRC,
|
2014-07-23 22:49:42 +08:00
|
|
|
ValueType vvt, ValueType ivt, X86MemOperand x86memop> {
|
2014-01-05 22:21:07 +08:00
|
|
|
let hasSideEffects = 0 in {
|
2013-09-17 15:34:34 +08:00
|
|
|
def kk : I<opc_kk, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
|
2013-09-17 15:34:34 +08:00
|
|
|
let mayLoad = 1 in
|
|
|
|
def km : I<opc_km, MRMSrcMem, (outs KRC:$dst), (ins x86memop:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
2014-07-23 22:49:42 +08:00
|
|
|
[(set KRC:$dst, (vvt (bitconvert (ivt (load addr:$src)))))]>;
|
2013-09-17 15:34:34 +08:00
|
|
|
let mayStore = 1 in
|
|
|
|
def mk : I<opc_mk, MRMDestMem, (outs), (ins x86memop:$dst, KRC:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass avx512_mask_mov_gpr<bits<8> opc_kr, bits<8> opc_rk,
|
|
|
|
string OpcodeStr,
|
|
|
|
RegisterClass KRC, RegisterClass GRC> {
|
2014-01-05 22:21:07 +08:00
|
|
|
let hasSideEffects = 0 in {
|
2013-09-17 15:34:34 +08:00
|
|
|
def kr : I<opc_kr, MRMSrcReg, (outs KRC:$dst), (ins GRC:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
|
2013-09-17 15:34:34 +08:00
|
|
|
def rk : I<opc_rk, MRMSrcReg, (outs GRC:$dst), (ins KRC:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-23 22:49:42 +08:00
|
|
|
let Predicates = [HasDQI] in
|
|
|
|
defm KMOVB : avx512_mask_mov<0x90, 0x90, 0x91, "kmovb", VK8, v8i1, i8,
|
|
|
|
i8mem>,
|
|
|
|
avx512_mask_mov_gpr<0x92, 0x93, "kmovb", VK8, GR32>,
|
|
|
|
VEX, PD;
|
|
|
|
|
|
|
|
let Predicates = [HasAVX512] in
|
|
|
|
defm KMOVW : avx512_mask_mov<0x90, 0x90, 0x91, "kmovw", VK16, v16i1, i16,
|
|
|
|
i16mem>,
|
|
|
|
avx512_mask_mov_gpr<0x92, 0x93, "kmovw", VK16, GR32>,
|
2014-02-18 08:21:49 +08:00
|
|
|
VEX, PS;
|
2014-07-23 22:49:42 +08:00
|
|
|
|
|
|
|
let Predicates = [HasBWI] in {
|
|
|
|
defm KMOVD : avx512_mask_mov<0x90, 0x90, 0x91, "kmovd", VK32, v32i1, i32,
|
|
|
|
i32mem>, VEX, PD, VEX_W;
|
|
|
|
defm KMOVD : avx512_mask_mov_gpr<0x92, 0x93, "kmovd", VK32, GR32>,
|
|
|
|
VEX, XD;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
2014-07-23 22:49:42 +08:00
|
|
|
let Predicates = [HasBWI] in {
|
|
|
|
defm KMOVQ : avx512_mask_mov<0x90, 0x90, 0x91, "kmovq", VK64, v64i1, i64,
|
|
|
|
i64mem>, VEX, PS, VEX_W;
|
|
|
|
defm KMOVQ : avx512_mask_mov_gpr<0x92, 0x93, "kmovq", VK64, GR64>,
|
|
|
|
VEX, XD, VEX_W;
|
|
|
|
}
|
|
|
|
|
|
|
|
// GR from/to mask register
|
|
|
|
let Predicates = [HasDQI] in {
|
|
|
|
def : Pat<(v8i1 (bitconvert (i8 GR8:$src))),
|
|
|
|
(KMOVBkr (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit))>;
|
|
|
|
def : Pat<(i8 (bitconvert (v8i1 VK8:$src))),
|
|
|
|
(EXTRACT_SUBREG (KMOVBrk VK8:$src), sub_8bit)>;
|
|
|
|
}
|
2013-09-17 15:34:34 +08:00
|
|
|
let Predicates = [HasAVX512] in {
|
|
|
|
def : Pat<(v16i1 (bitconvert (i16 GR16:$src))),
|
|
|
|
(KMOVWkr (SUBREG_TO_REG (i32 0), GR16:$src, sub_16bit))>;
|
|
|
|
def : Pat<(i16 (bitconvert (v16i1 VK16:$src))),
|
|
|
|
(EXTRACT_SUBREG (KMOVWrk VK16:$src), sub_16bit)>;
|
2014-07-23 22:49:42 +08:00
|
|
|
}
|
|
|
|
let Predicates = [HasBWI] in {
|
|
|
|
def : Pat<(v32i1 (bitconvert (i32 GR32:$src))), (KMOVDkr GR32:$src)>;
|
|
|
|
def : Pat<(i32 (bitconvert (v32i1 VK32:$src))), (KMOVDrk VK32:$src)>;
|
|
|
|
}
|
|
|
|
let Predicates = [HasBWI] in {
|
|
|
|
def : Pat<(v64i1 (bitconvert (i64 GR64:$src))), (KMOVQkr GR64:$src)>;
|
|
|
|
def : Pat<(i64 (bitconvert (v64i1 VK64:$src))), (KMOVQrk VK64:$src)>;
|
|
|
|
}
|
2013-09-17 15:34:34 +08:00
|
|
|
|
2014-07-23 22:49:42 +08:00
|
|
|
// Load/store kreg
|
|
|
|
let Predicates = [HasDQI] in {
|
|
|
|
def : Pat<(store (i8 (bitconvert (v8i1 VK8:$src))), addr:$dst),
|
|
|
|
(KMOVBmk addr:$dst, VK8:$src)>;
|
|
|
|
}
|
|
|
|
let Predicates = [HasAVX512] in {
|
|
|
|
def : Pat<(store (i16 (bitconvert (v16i1 VK16:$src))), addr:$dst),
|
2013-09-17 15:34:34 +08:00
|
|
|
(KMOVWmk addr:$dst, VK16:$src)>;
|
2014-07-23 22:49:42 +08:00
|
|
|
def : Pat<(store (i8 (bitconvert (v8i1 VK8:$src))), addr:$dst),
|
2013-12-16 21:52:35 +08:00
|
|
|
(KMOVWmk addr:$dst, (COPY_TO_REGCLASS VK8:$src, VK16))>;
|
|
|
|
def : Pat<(i1 (load addr:$src)),
|
|
|
|
(COPY_TO_REGCLASS (KMOVWkm addr:$src), VK1)>;
|
2014-07-23 22:49:42 +08:00
|
|
|
def : Pat<(v8i1 (bitconvert (i8 (load addr:$src)))),
|
2013-12-16 21:52:35 +08:00
|
|
|
(COPY_TO_REGCLASS (KMOVWkm addr:$src), VK8)>;
|
2014-07-23 22:49:42 +08:00
|
|
|
}
|
|
|
|
let Predicates = [HasBWI] in {
|
|
|
|
def : Pat<(store (i32 (bitconvert (v32i1 VK32:$src))), addr:$dst),
|
|
|
|
(KMOVDmk addr:$dst, VK32:$src)>;
|
|
|
|
}
|
|
|
|
let Predicates = [HasBWI] in {
|
|
|
|
def : Pat<(store (i64 (bitconvert (v64i1 VK64:$src))), addr:$dst),
|
|
|
|
(KMOVQmk addr:$dst, VK64:$src)>;
|
|
|
|
}
|
2013-12-17 16:33:15 +08:00
|
|
|
|
2014-07-23 22:49:42 +08:00
|
|
|
let Predicates = [HasAVX512] in {
|
2014-08-18 19:59:06 +08:00
|
|
|
def : Pat<(i1 (trunc (i64 GR64:$src))),
|
|
|
|
(COPY_TO_REGCLASS (KMOVWkr (AND32ri (EXTRACT_SUBREG $src, sub_32bit),
|
|
|
|
(i32 1))), VK1)>;
|
|
|
|
|
2013-12-24 22:24:07 +08:00
|
|
|
def : Pat<(i1 (trunc (i32 GR32:$src))),
|
2014-02-20 14:34:39 +08:00
|
|
|
(COPY_TO_REGCLASS (KMOVWkr (AND32ri $src, (i32 1))), VK1)>;
|
2013-12-24 22:24:07 +08:00
|
|
|
|
|
|
|
def : Pat<(i1 (trunc (i8 GR8:$src))),
|
2014-02-20 14:34:39 +08:00
|
|
|
(COPY_TO_REGCLASS
|
|
|
|
(KMOVWkr (AND32ri (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit), (i32 1))),
|
|
|
|
VK1)>;
|
|
|
|
def : Pat<(i1 (trunc (i16 GR16:$src))),
|
|
|
|
(COPY_TO_REGCLASS
|
|
|
|
(KMOVWkr (AND32ri (SUBREG_TO_REG (i32 0), $src, sub_16bit), (i32 1))),
|
|
|
|
VK1)>;
|
2014-07-23 22:49:42 +08:00
|
|
|
|
2014-02-23 22:28:35 +08:00
|
|
|
def : Pat<(i32 (zext VK1:$src)),
|
|
|
|
(AND32ri (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1))>;
|
2013-12-24 22:24:07 +08:00
|
|
|
def : Pat<(i8 (zext VK1:$src)),
|
|
|
|
(EXTRACT_SUBREG
|
2014-02-23 22:28:35 +08:00
|
|
|
(AND32ri (KMOVWrk
|
|
|
|
(COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1)), sub_8bit)>;
|
2014-01-13 20:55:03 +08:00
|
|
|
def : Pat<(i64 (zext VK1:$src)),
|
2014-02-23 22:28:35 +08:00
|
|
|
(AND64ri8 (SUBREG_TO_REG (i64 0),
|
|
|
|
(KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), sub_32bit), (i64 1))>;
|
2014-02-17 15:29:33 +08:00
|
|
|
def : Pat<(i16 (zext VK1:$src)),
|
|
|
|
(EXTRACT_SUBREG
|
2014-02-23 22:28:35 +08:00
|
|
|
(AND32ri (KMOVWrk (COPY_TO_REGCLASS VK1:$src, VK16)), (i32 1)),
|
|
|
|
sub_16bit)>;
|
2014-04-09 20:37:50 +08:00
|
|
|
def : Pat<(v16i1 (scalar_to_vector VK1:$src)),
|
|
|
|
(COPY_TO_REGCLASS VK1:$src, VK16)>;
|
|
|
|
def : Pat<(v8i1 (scalar_to_vector VK1:$src)),
|
|
|
|
(COPY_TO_REGCLASS VK1:$src, VK8)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
2014-07-23 22:49:42 +08:00
|
|
|
let Predicates = [HasBWI] in {
|
|
|
|
def : Pat<(v32i1 (scalar_to_vector VK1:$src)),
|
|
|
|
(COPY_TO_REGCLASS VK1:$src, VK32)>;
|
|
|
|
def : Pat<(v64i1 (scalar_to_vector VK1:$src)),
|
|
|
|
(COPY_TO_REGCLASS VK1:$src, VK64)>;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-09-17 15:34:34 +08:00
|
|
|
// With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
|
|
|
|
let Predicates = [HasAVX512] in {
|
|
|
|
// GR from/to 8-bit mask without native support
|
|
|
|
def : Pat<(v8i1 (bitconvert (i8 GR8:$src))),
|
|
|
|
(COPY_TO_REGCLASS
|
|
|
|
(KMOVWkr (SUBREG_TO_REG (i32 0), GR8:$src, sub_8bit)),
|
|
|
|
VK8)>;
|
|
|
|
def : Pat<(i8 (bitconvert (v8i1 VK8:$src))),
|
|
|
|
(EXTRACT_SUBREG
|
|
|
|
(KMOVWrk (COPY_TO_REGCLASS VK8:$src, VK16)),
|
|
|
|
sub_8bit)>;
|
2013-12-16 21:52:35 +08:00
|
|
|
|
2014-02-10 15:02:39 +08:00
|
|
|
def : Pat<(i1 (X86Vextract VK16:$src, (iPTR 0))),
|
2013-12-16 21:52:35 +08:00
|
|
|
(COPY_TO_REGCLASS VK16:$src, VK1)>;
|
2014-02-10 15:02:39 +08:00
|
|
|
def : Pat<(i1 (X86Vextract VK8:$src, (iPTR 0))),
|
2013-12-16 21:52:35 +08:00
|
|
|
(COPY_TO_REGCLASS VK8:$src, VK1)>;
|
2014-07-23 22:49:42 +08:00
|
|
|
}
|
|
|
|
let Predicates = [HasBWI] in {
|
|
|
|
def : Pat<(i1 (X86Vextract VK32:$src, (iPTR 0))),
|
|
|
|
(COPY_TO_REGCLASS VK32:$src, VK1)>;
|
|
|
|
def : Pat<(i1 (X86Vextract VK64:$src, (iPTR 0))),
|
|
|
|
(COPY_TO_REGCLASS VK64:$src, VK1)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Mask unary operation
|
|
|
|
// - KNOT
|
|
|
|
multiclass avx512_mask_unop<bits<8> opc, string OpcodeStr,
|
2014-07-23 22:49:42 +08:00
|
|
|
RegisterClass KRC, SDPatternOperator OpNode,
|
|
|
|
Predicate prd> {
|
|
|
|
let Predicates = [prd] in
|
2013-09-17 15:34:34 +08:00
|
|
|
def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set KRC:$dst, (OpNode KRC:$src))]>;
|
|
|
|
}
|
|
|
|
|
2014-07-23 22:49:42 +08:00
|
|
|
multiclass avx512_mask_unop_all<bits<8> opc, string OpcodeStr,
|
|
|
|
SDPatternOperator OpNode> {
|
|
|
|
defm B : avx512_mask_unop<opc, !strconcat(OpcodeStr, "b"), VK8, OpNode,
|
|
|
|
HasDQI>, VEX, PD;
|
|
|
|
defm W : avx512_mask_unop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode,
|
|
|
|
HasAVX512>, VEX, PS;
|
|
|
|
defm D : avx512_mask_unop<opc, !strconcat(OpcodeStr, "d"), VK32, OpNode,
|
|
|
|
HasBWI>, VEX, PD, VEX_W;
|
|
|
|
defm Q : avx512_mask_unop<opc, !strconcat(OpcodeStr, "q"), VK64, OpNode,
|
|
|
|
HasBWI>, VEX, PS, VEX_W;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
2014-07-23 22:49:42 +08:00
|
|
|
defm KNOT : avx512_mask_unop_all<0x44, "knot", not>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
2013-12-10 21:53:10 +08:00
|
|
|
multiclass avx512_mask_unop_int<string IntName, string InstName> {
|
|
|
|
let Predicates = [HasAVX512] in
|
|
|
|
def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_w")
|
|
|
|
(i16 GR16:$src)),
|
|
|
|
(COPY_TO_REGCLASS (!cast<Instruction>(InstName##"Wrr")
|
|
|
|
(v16i1 (COPY_TO_REGCLASS GR16:$src, VK16))), GR16)>;
|
|
|
|
}
|
|
|
|
defm : avx512_mask_unop_int<"knot", "KNOT">;
|
|
|
|
|
2014-07-23 22:49:42 +08:00
|
|
|
let Predicates = [HasDQI] in
|
|
|
|
def : Pat<(xor VK8:$src1, (v8i1 immAllOnesV)), (KNOTBrr VK8:$src1)>;
|
|
|
|
let Predicates = [HasAVX512] in
|
2013-09-17 15:34:34 +08:00
|
|
|
def : Pat<(xor VK16:$src1, (v16i1 immAllOnesV)), (KNOTWrr VK16:$src1)>;
|
2014-07-23 22:49:42 +08:00
|
|
|
let Predicates = [HasBWI] in
|
|
|
|
def : Pat<(xor VK32:$src1, (v32i1 immAllOnesV)), (KNOTDrr VK32:$src1)>;
|
|
|
|
let Predicates = [HasBWI] in
|
|
|
|
def : Pat<(xor VK64:$src1, (v64i1 immAllOnesV)), (KNOTQrr VK64:$src1)>;
|
|
|
|
|
|
|
|
// KNL does not support KMOVB, 8-bit mask is promoted to 16-bit
|
|
|
|
let Predicates = [HasAVX512] in {
|
2013-09-17 15:34:34 +08:00
|
|
|
def : Pat<(xor VK8:$src1, (v8i1 immAllOnesV)),
|
|
|
|
(COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$src1, VK16)), VK8)>;
|
|
|
|
|
|
|
|
def : Pat<(not VK8:$src),
|
|
|
|
(COPY_TO_REGCLASS
|
|
|
|
(KNOTWrr (COPY_TO_REGCLASS VK8:$src, VK16)), VK8)>;
|
2014-07-23 22:49:42 +08:00
|
|
|
}
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
// Mask binary operation
|
2013-12-10 21:53:10 +08:00
|
|
|
// - KAND, KANDN, KOR, KXNOR, KXOR
|
2013-09-17 15:34:34 +08:00
|
|
|
multiclass avx512_mask_binop<bits<8> opc, string OpcodeStr,
|
2014-07-28 21:46:45 +08:00
|
|
|
RegisterClass KRC, SDPatternOperator OpNode,
|
|
|
|
Predicate prd> {
|
|
|
|
let Predicates = [prd] in
|
2013-09-17 15:34:34 +08:00
|
|
|
def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src1, KRC:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set KRC:$dst, (OpNode KRC:$src1, KRC:$src2))]>;
|
|
|
|
}
|
|
|
|
|
2014-07-28 21:46:45 +08:00
|
|
|
multiclass avx512_mask_binop_all<bits<8> opc, string OpcodeStr,
|
|
|
|
SDPatternOperator OpNode> {
|
|
|
|
defm B : avx512_mask_binop<opc, !strconcat(OpcodeStr, "b"), VK8, OpNode,
|
|
|
|
HasDQI>, VEX_4V, VEX_L, PD;
|
|
|
|
defm W : avx512_mask_binop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode,
|
|
|
|
HasAVX512>, VEX_4V, VEX_L, PS;
|
|
|
|
defm D : avx512_mask_binop<opc, !strconcat(OpcodeStr, "d"), VK32, OpNode,
|
|
|
|
HasBWI>, VEX_4V, VEX_L, VEX_W, PD;
|
|
|
|
defm Q : avx512_mask_binop<opc, !strconcat(OpcodeStr, "q"), VK64, OpNode,
|
|
|
|
HasBWI>, VEX_4V, VEX_L, VEX_W, PS;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
def andn : PatFrag<(ops node:$i0, node:$i1), (and (not node:$i0), node:$i1)>;
|
|
|
|
def xnor : PatFrag<(ops node:$i0, node:$i1), (not (xor node:$i0, node:$i1))>;
|
|
|
|
|
|
|
|
let isCommutable = 1 in {
|
2014-07-28 21:46:45 +08:00
|
|
|
defm KAND : avx512_mask_binop_all<0x41, "kand", and>;
|
|
|
|
defm KOR : avx512_mask_binop_all<0x45, "kor", or>;
|
|
|
|
defm KXNOR : avx512_mask_binop_all<0x46, "kxnor", xnor>;
|
|
|
|
defm KXOR : avx512_mask_binop_all<0x47, "kxor", xor>;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
2014-07-28 21:46:45 +08:00
|
|
|
let isCommutable = 0 in
|
|
|
|
defm KANDN : avx512_mask_binop_all<0x42, "kandn", andn>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
2013-12-16 21:52:35 +08:00
|
|
|
def : Pat<(xor VK1:$src1, VK1:$src2),
|
|
|
|
(COPY_TO_REGCLASS (KXORWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
|
|
|
|
(COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>;
|
|
|
|
|
|
|
|
def : Pat<(or VK1:$src1, VK1:$src2),
|
|
|
|
(COPY_TO_REGCLASS (KORWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
|
|
|
|
(COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>;
|
|
|
|
|
2013-12-25 18:06:40 +08:00
|
|
|
def : Pat<(and VK1:$src1, VK1:$src2),
|
|
|
|
(COPY_TO_REGCLASS (KANDWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
|
|
|
|
(COPY_TO_REGCLASS VK1:$src2, VK16)), VK1)>;
|
|
|
|
|
2013-09-17 15:34:34 +08:00
|
|
|
multiclass avx512_mask_binop_int<string IntName, string InstName> {
|
|
|
|
let Predicates = [HasAVX512] in
|
2013-12-10 21:53:10 +08:00
|
|
|
def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_w")
|
|
|
|
(i16 GR16:$src1), (i16 GR16:$src2)),
|
|
|
|
(COPY_TO_REGCLASS (!cast<Instruction>(InstName##"Wrr")
|
|
|
|
(v16i1 (COPY_TO_REGCLASS GR16:$src1, VK16)),
|
|
|
|
(v16i1 (COPY_TO_REGCLASS GR16:$src2, VK16))), GR16)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
defm : avx512_mask_binop_int<"kand", "KAND">;
|
|
|
|
defm : avx512_mask_binop_int<"kandn", "KANDN">;
|
|
|
|
defm : avx512_mask_binop_int<"kor", "KOR">;
|
|
|
|
defm : avx512_mask_binop_int<"kxnor", "KXNOR">;
|
|
|
|
defm : avx512_mask_binop_int<"kxor", "KXOR">;
|
2013-12-10 21:53:10 +08:00
|
|
|
|
2013-09-17 15:34:34 +08:00
|
|
|
// With AVX-512, 8-bit mask is promoted to 16-bit mask.
|
|
|
|
multiclass avx512_binop_pat<SDPatternOperator OpNode, Instruction Inst> {
|
|
|
|
let Predicates = [HasAVX512] in
|
|
|
|
def : Pat<(OpNode VK8:$src1, VK8:$src2),
|
|
|
|
(COPY_TO_REGCLASS
|
|
|
|
(Inst (COPY_TO_REGCLASS VK8:$src1, VK16),
|
|
|
|
(COPY_TO_REGCLASS VK8:$src2, VK16)), VK8)>;
|
|
|
|
}
|
|
|
|
|
|
|
|
defm : avx512_binop_pat<and, KANDWrr>;
|
|
|
|
defm : avx512_binop_pat<andn, KANDNWrr>;
|
|
|
|
defm : avx512_binop_pat<or, KORWrr>;
|
|
|
|
defm : avx512_binop_pat<xnor, KXNORWrr>;
|
|
|
|
defm : avx512_binop_pat<xor, KXORWrr>;
|
|
|
|
|
|
|
|
// Mask unpacking
|
|
|
|
multiclass avx512_mask_unpck<bits<8> opc, string OpcodeStr,
|
2013-12-10 21:53:10 +08:00
|
|
|
RegisterClass KRC> {
|
2013-09-17 15:34:34 +08:00
|
|
|
let Predicates = [HasAVX512] in
|
2013-12-10 21:53:10 +08:00
|
|
|
def rr : I<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src1, KRC:$src2),
|
2013-09-17 15:34:34 +08:00
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
multiclass avx512_mask_unpck_bw<bits<8> opc, string OpcodeStr> {
|
2013-12-10 21:53:10 +08:00
|
|
|
defm BW : avx512_mask_unpck<opc, !strconcat(OpcodeStr, "bw"), VK16>,
|
2014-01-14 15:41:20 +08:00
|
|
|
VEX_4V, VEX_L, PD;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
defm KUNPCK : avx512_mask_unpck_bw<0x4b, "kunpck">;
|
2013-12-17 16:33:15 +08:00
|
|
|
def : Pat<(v16i1 (concat_vectors (v8i1 VK8:$src1), (v8i1 VK8:$src2))),
|
|
|
|
(KUNPCKBWrr (COPY_TO_REGCLASS VK8:$src2, VK16),
|
|
|
|
(COPY_TO_REGCLASS VK8:$src1, VK16))>;
|
|
|
|
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
multiclass avx512_mask_unpck_int<string IntName, string InstName> {
|
|
|
|
let Predicates = [HasAVX512] in
|
2013-12-10 21:53:10 +08:00
|
|
|
def : Pat<(!cast<Intrinsic>("int_x86_avx512_"##IntName##"_bw")
|
|
|
|
(i16 GR16:$src1), (i16 GR16:$src2)),
|
|
|
|
(COPY_TO_REGCLASS (!cast<Instruction>(InstName##"BWrr")
|
|
|
|
(v16i1 (COPY_TO_REGCLASS GR16:$src1, VK16)),
|
|
|
|
(v16i1 (COPY_TO_REGCLASS GR16:$src2, VK16))), GR16)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
2013-12-10 21:53:10 +08:00
|
|
|
defm : avx512_mask_unpck_int<"kunpck", "KUNPCK">;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
// Mask bit testing
|
|
|
|
multiclass avx512_mask_testop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
|
|
|
|
SDNode OpNode> {
|
|
|
|
let Predicates = [HasAVX512], Defs = [EFLAGS] in
|
|
|
|
def rr : I<opc, MRMSrcReg, (outs), (ins KRC:$src1, KRC:$src2),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set EFLAGS, (OpNode KRC:$src1, KRC:$src2))]>;
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass avx512_mask_testop_w<bits<8> opc, string OpcodeStr, SDNode OpNode> {
|
|
|
|
defm W : avx512_mask_testop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
|
2014-02-18 08:21:49 +08:00
|
|
|
VEX, PS;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
defm KORTEST : avx512_mask_testop_w<0x98, "kortest", X86kortest>;
|
2013-12-16 21:52:35 +08:00
|
|
|
|
2013-12-24 22:24:07 +08:00
|
|
|
def : Pat<(X86cmp VK1:$src1, (i1 0)),
|
2013-12-16 21:52:35 +08:00
|
|
|
(KORTESTWrr (COPY_TO_REGCLASS VK1:$src1, VK16),
|
2013-12-24 22:24:07 +08:00
|
|
|
(COPY_TO_REGCLASS VK1:$src1, VK16))>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
// Mask shift
|
|
|
|
multiclass avx512_mask_shiftop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
|
|
|
|
SDNode OpNode> {
|
|
|
|
let Predicates = [HasAVX512] in
|
|
|
|
def ri : Ii8<opc, MRMSrcReg, (outs KRC:$dst), (ins KRC:$src, i8imm:$imm),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$imm, $src, $dst|$dst, $src, $imm}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set KRC:$dst, (OpNode KRC:$src, (i8 imm:$imm)))]>;
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass avx512_mask_shiftop_w<bits<8> opc1, bits<8> opc2, string OpcodeStr,
|
|
|
|
SDNode OpNode> {
|
|
|
|
defm W : avx512_mask_shiftop<opc1, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
|
2014-01-14 15:41:20 +08:00
|
|
|
VEX, TAPD, VEX_W;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
2013-12-16 21:52:35 +08:00
|
|
|
defm KSHIFTL : avx512_mask_shiftop_w<0x32, 0x33, "kshiftl", X86vshli>;
|
|
|
|
defm KSHIFTR : avx512_mask_shiftop_w<0x30, 0x31, "kshiftr", X86vsrli>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
// Mask setting all 0s or 1s
|
|
|
|
multiclass avx512_mask_setop<RegisterClass KRC, ValueType VT, PatFrag Val> {
|
|
|
|
let Predicates = [HasAVX512] in
|
|
|
|
let isReMaterializable = 1, isAsCheapAsAMove = 1, isPseudo = 1 in
|
|
|
|
def #NAME# : I<0, Pseudo, (outs KRC:$dst), (ins), "",
|
|
|
|
[(set KRC:$dst, (VT Val))]>;
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass avx512_mask_setop_w<PatFrag Val> {
|
2013-12-16 21:52:35 +08:00
|
|
|
defm B : avx512_mask_setop<VK8, v8i1, Val>;
|
2013-09-17 15:34:34 +08:00
|
|
|
defm W : avx512_mask_setop<VK16, v16i1, Val>;
|
|
|
|
}
|
|
|
|
|
|
|
|
defm KSET0 : avx512_mask_setop_w<immAllZerosV>;
|
|
|
|
defm KSET1 : avx512_mask_setop_w<immAllOnesV>;
|
|
|
|
|
|
|
|
// With AVX-512 only, 8-bit mask is promoted to 16-bit mask.
|
|
|
|
let Predicates = [HasAVX512] in {
|
|
|
|
def : Pat<(v8i1 immAllZerosV), (COPY_TO_REGCLASS (KSET0W), VK8)>;
|
|
|
|
def : Pat<(v8i1 immAllOnesV), (COPY_TO_REGCLASS (KSET1W), VK8)>;
|
2014-01-13 20:55:03 +08:00
|
|
|
def : Pat<(i1 0), (COPY_TO_REGCLASS (KSET0W), VK1)>;
|
|
|
|
def : Pat<(i1 1), (COPY_TO_REGCLASS (KSET1W), VK1)>;
|
|
|
|
def : Pat<(i1 -1), (COPY_TO_REGCLASS (KSET1W), VK1)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 0))),
|
|
|
|
(v8i1 (COPY_TO_REGCLASS VK16:$src, VK8))>;
|
|
|
|
|
|
|
|
def : Pat<(v16i1 (insert_subvector undef, (v8i1 VK8:$src), (iPTR 0))),
|
|
|
|
(v16i1 (COPY_TO_REGCLASS VK8:$src, VK16))>;
|
|
|
|
|
|
|
|
def : Pat<(v8i1 (extract_subvector (v16i1 VK16:$src), (iPTR 8))),
|
|
|
|
(v8i1 (COPY_TO_REGCLASS (KSHIFTRWri VK16:$src, (i8 8)), VK8))>;
|
|
|
|
|
2014-09-30 19:41:54 +08:00
|
|
|
let Predicates = [HasVLX] in {
|
|
|
|
def : Pat<(v8i1 (insert_subvector undef, (v4i1 VK4:$src), (iPTR 0))),
|
|
|
|
(v8i1 (COPY_TO_REGCLASS VK4:$src, VK8))>;
|
|
|
|
def : Pat<(v8i1 (insert_subvector undef, (v2i1 VK2:$src), (iPTR 0))),
|
|
|
|
(v8i1 (COPY_TO_REGCLASS VK2:$src, VK8))>;
|
|
|
|
def : Pat<(v4i1 (extract_subvector (v8i1 VK8:$src), (iPTR 0))),
|
|
|
|
(v4i1 (COPY_TO_REGCLASS VK8:$src, VK4))>;
|
|
|
|
def : Pat<(v2i1 (extract_subvector (v8i1 VK8:$src), (iPTR 0))),
|
|
|
|
(v2i1 (COPY_TO_REGCLASS VK8:$src, VK2))>;
|
|
|
|
}
|
|
|
|
|
2014-03-02 17:19:44 +08:00
|
|
|
def : Pat<(v8i1 (X86vshli VK8:$src, (i8 imm:$imm))),
|
|
|
|
(v8i1 (COPY_TO_REGCLASS (KSHIFTLWri (COPY_TO_REGCLASS VK8:$src, VK16), (I8Imm $imm)), VK8))>;
|
|
|
|
|
|
|
|
def : Pat<(v8i1 (X86vsrli VK8:$src, (i8 imm:$imm))),
|
|
|
|
(v8i1 (COPY_TO_REGCLASS (KSHIFTRWri (COPY_TO_REGCLASS VK8:$src, VK16), (I8Imm $imm)), VK8))>;
|
2013-09-17 15:34:34 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AVX-512 - Aligned and unaligned load and store
|
|
|
|
//
|
|
|
|
|
[SKX] Enabling load/store instructions: encoding
Instructions: VMOVAPD, VMOVAPS, VMOVDQA8, VMOVDQA16, VMOVDQA32,VMOVDQA64, VMOVDQU8, VMOVDQU16, VMOVDQU32,VMOVDQU64, VMOVUPD, VMOVUPS,
Reviewed by Elena Demikhovsky <elena.demikhovsky@intel.com>
llvm-svn: 214719
2014-08-04 22:35:15 +08:00
|
|
|
multiclass avx512_load<bits<8> opc, string OpcodeStr, PatFrag ld_frag,
|
|
|
|
RegisterClass KRC, RegisterClass RC,
|
|
|
|
ValueType vt, ValueType zvt, X86MemOperand memop,
|
|
|
|
Domain d, bit IsReMaterializable = 1> {
|
2014-03-13 20:05:52 +08:00
|
|
|
let hasSideEffects = 0 in {
|
2013-09-17 15:34:34 +08:00
|
|
|
def rr : AVX512PI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
|
[SKX] Enabling load/store instructions: encoding
Instructions: VMOVAPD, VMOVAPS, VMOVDQA8, VMOVDQA16, VMOVDQA32,VMOVDQA64, VMOVDQU8, VMOVDQU16, VMOVDQU32,VMOVDQU64, VMOVUPD, VMOVUPS,
Reviewed by Elena Demikhovsky <elena.demikhovsky@intel.com>
llvm-svn: 214719
2014-08-04 22:35:15 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), [],
|
|
|
|
d>, EVEX;
|
2014-03-13 20:05:52 +08:00
|
|
|
def rrkz : AVX512PI<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src),
|
[SKX] Enabling load/store instructions: encoding
Instructions: VMOVAPD, VMOVAPS, VMOVDQA8, VMOVDQA16, VMOVDQA32,VMOVDQA64, VMOVDQU8, VMOVDQU16, VMOVDQU32,VMOVDQU64, VMOVUPD, VMOVUPS,
Reviewed by Elena Demikhovsky <elena.demikhovsky@intel.com>
llvm-svn: 214719
2014-08-04 22:35:15 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src, ${dst} {${mask}} {z}|",
|
|
|
|
"${dst} {${mask}} {z}, $src}"), [], d>, EVEX, EVEX_KZ;
|
2014-03-13 20:05:52 +08:00
|
|
|
}
|
[SKX] Enabling load/store instructions: encoding
Instructions: VMOVAPD, VMOVAPS, VMOVDQA8, VMOVDQA16, VMOVDQA32,VMOVDQA64, VMOVDQU8, VMOVDQU16, VMOVDQU32,VMOVDQU64, VMOVUPD, VMOVUPS,
Reviewed by Elena Demikhovsky <elena.demikhovsky@intel.com>
llvm-svn: 214719
2014-08-04 22:35:15 +08:00
|
|
|
let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable,
|
|
|
|
SchedRW = [WriteLoad] in
|
|
|
|
def rm : AVX512PI<opc, MRMSrcMem, (outs RC:$dst), (ins memop:$src),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
|
|
|
[(set RC:$dst, (vt (bitconvert (ld_frag addr:$src))))],
|
|
|
|
d>, EVEX;
|
|
|
|
|
|
|
|
let AddedComplexity = 20 in {
|
|
|
|
let Constraints = "$src0 = $dst", hasSideEffects = 0 in {
|
|
|
|
let hasSideEffects = 0 in
|
|
|
|
def rrk : AVX512PI<opc, MRMSrcReg, (outs RC:$dst),
|
|
|
|
(ins RC:$src0, KRC:$mask, RC:$src1),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src1, ${dst} {${mask}}|",
|
|
|
|
"${dst} {${mask}}, $src1}"),
|
|
|
|
[(set RC:$dst, (vt (vselect KRC:$mask,
|
|
|
|
(vt RC:$src1),
|
|
|
|
(vt RC:$src0))))],
|
|
|
|
d>, EVEX, EVEX_K;
|
|
|
|
let mayLoad = 1, SchedRW = [WriteLoad] in
|
|
|
|
def rmk : AVX512PI<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins RC:$src0, KRC:$mask, memop:$src1),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src1, ${dst} {${mask}}|",
|
|
|
|
"${dst} {${mask}}, $src1}"),
|
|
|
|
[(set RC:$dst, (vt
|
|
|
|
(vselect KRC:$mask,
|
|
|
|
(vt (bitconvert (ld_frag addr:$src1))),
|
|
|
|
(vt RC:$src0))))],
|
|
|
|
d>, EVEX, EVEX_K;
|
|
|
|
}
|
|
|
|
let mayLoad = 1, SchedRW = [WriteLoad] in
|
|
|
|
def rmkz : AVX512PI<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins KRC:$mask, memop:$src),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src, ${dst} {${mask}} {z}|",
|
|
|
|
"${dst} {${mask}} {z}, $src}"),
|
|
|
|
[(set RC:$dst, (vt
|
|
|
|
(vselect KRC:$mask,
|
|
|
|
(vt (bitconvert (ld_frag addr:$src))),
|
|
|
|
(vt (bitconvert (zvt immAllZerosV))))))],
|
|
|
|
d>, EVEX, EVEX_KZ;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass avx512_load_vl<bits<8> opc, string OpcodeStr, string ld_pat,
|
|
|
|
string elty, string elsz, string vsz512,
|
|
|
|
string vsz256, string vsz128, Domain d,
|
|
|
|
Predicate prd, bit IsReMaterializable = 1> {
|
|
|
|
let Predicates = [prd] in
|
|
|
|
defm Z : avx512_load<opc, OpcodeStr,
|
|
|
|
!cast<PatFrag>(ld_pat##"v"##vsz512##elty##elsz),
|
|
|
|
!cast<RegisterClass>("VK"##vsz512##"WM"), VR512,
|
|
|
|
!cast<ValueType>("v"##vsz512##elty##elsz), v16i32,
|
|
|
|
!cast<X86MemOperand>(elty##"512mem"), d,
|
|
|
|
IsReMaterializable>, EVEX_V512;
|
|
|
|
|
|
|
|
let Predicates = [prd, HasVLX] in {
|
|
|
|
defm Z256 : avx512_load<opc, OpcodeStr,
|
|
|
|
!cast<PatFrag>(ld_pat##!if(!eq(elty,"f"),
|
|
|
|
"v"##vsz256##elty##elsz, "v4i64")),
|
|
|
|
!cast<RegisterClass>("VK"##vsz256##"WM"), VR256X,
|
|
|
|
!cast<ValueType>("v"##vsz256##elty##elsz), v8i32,
|
|
|
|
!cast<X86MemOperand>(elty##"256mem"), d,
|
|
|
|
IsReMaterializable>, EVEX_V256;
|
|
|
|
|
|
|
|
defm Z128 : avx512_load<opc, OpcodeStr,
|
|
|
|
!cast<PatFrag>(ld_pat##!if(!eq(elty,"f"),
|
|
|
|
"v"##vsz128##elty##elsz, "v2i64")),
|
|
|
|
!cast<RegisterClass>("VK"##vsz128##"WM"), VR128X,
|
|
|
|
!cast<ValueType>("v"##vsz128##elty##elsz), v4i32,
|
|
|
|
!cast<X86MemOperand>(elty##"128mem"), d,
|
|
|
|
IsReMaterializable>, EVEX_V128;
|
2014-03-13 20:05:52 +08:00
|
|
|
}
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
2014-03-13 20:05:52 +08:00
|
|
|
|
[SKX] Enabling load/store instructions: encoding
Instructions: VMOVAPD, VMOVAPS, VMOVDQA8, VMOVDQA16, VMOVDQA32,VMOVDQA64, VMOVDQU8, VMOVDQU16, VMOVDQU32,VMOVDQU64, VMOVUPD, VMOVUPS,
Reviewed by Elena Demikhovsky <elena.demikhovsky@intel.com>
llvm-svn: 214719
2014-08-04 22:35:15 +08:00
|
|
|
|
|
|
|
multiclass avx512_store<bits<8> opc, string OpcodeStr, PatFrag st_frag,
|
|
|
|
ValueType OpVT, RegisterClass KRC, RegisterClass RC,
|
|
|
|
X86MemOperand memop, Domain d> {
|
2014-03-13 20:05:52 +08:00
|
|
|
let isAsmParserOnly = 1, hasSideEffects = 0 in {
|
|
|
|
def rr_alt : AVX512PI<opc, MRMDestReg, (outs RC:$dst), (ins RC:$src),
|
[SKX] Enabling load/store instructions: encoding
Instructions: VMOVAPD, VMOVAPS, VMOVDQA8, VMOVDQA16, VMOVDQA32,VMOVDQA64, VMOVDQU8, VMOVDQU16, VMOVDQU32,VMOVDQU64, VMOVUPD, VMOVUPS,
Reviewed by Elena Demikhovsky <elena.demikhovsky@intel.com>
llvm-svn: 214719
2014-08-04 22:35:15 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), [], d>,
|
2014-03-13 20:05:52 +08:00
|
|
|
EVEX;
|
|
|
|
let Constraints = "$src1 = $dst" in
|
[SKX] Enabling load/store instructions: encoding
Instructions: VMOVAPD, VMOVAPS, VMOVDQA8, VMOVDQA16, VMOVDQA32,VMOVDQA64, VMOVDQU8, VMOVDQU16, VMOVDQU32,VMOVDQU64, VMOVUPD, VMOVUPS,
Reviewed by Elena Demikhovsky <elena.demikhovsky@intel.com>
llvm-svn: 214719
2014-08-04 22:35:15 +08:00
|
|
|
def rrk_alt : AVX512PI<opc, MRMDestReg, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, KRC:$mask, RC:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"), [], d>,
|
2014-03-13 20:05:52 +08:00
|
|
|
EVEX, EVEX_K;
|
[SKX] Enabling load/store instructions: encoding
Instructions: VMOVAPD, VMOVAPS, VMOVDQA8, VMOVDQA16, VMOVDQA32,VMOVDQA64, VMOVDQU8, VMOVDQU16, VMOVDQU32,VMOVDQU64, VMOVUPD, VMOVUPS,
Reviewed by Elena Demikhovsky <elena.demikhovsky@intel.com>
llvm-svn: 214719
2014-08-04 22:35:15 +08:00
|
|
|
def rrkz_alt : AVX512PI<opc, MRMDestReg, (outs RC:$dst),
|
2014-03-13 20:05:52 +08:00
|
|
|
(ins KRC:$mask, RC:$src),
|
[SKX] Enabling load/store instructions: encoding
Instructions: VMOVAPD, VMOVAPS, VMOVDQA8, VMOVDQA16, VMOVDQA32,VMOVDQA64, VMOVDQU8, VMOVDQU16, VMOVDQU32,VMOVDQU64, VMOVUPD, VMOVUPS,
Reviewed by Elena Demikhovsky <elena.demikhovsky@intel.com>
llvm-svn: 214719
2014-08-04 22:35:15 +08:00
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
|
2014-03-13 20:05:52 +08:00
|
|
|
[], d>, EVEX, EVEX_KZ;
|
|
|
|
}
|
|
|
|
let mayStore = 1 in {
|
[SKX] Enabling load/store instructions: encoding
Instructions: VMOVAPD, VMOVAPS, VMOVDQA8, VMOVDQA16, VMOVDQA32,VMOVDQA64, VMOVDQU8, VMOVDQU16, VMOVDQU32,VMOVDQU64, VMOVUPD, VMOVUPS,
Reviewed by Elena Demikhovsky <elena.demikhovsky@intel.com>
llvm-svn: 214719
2014-08-04 22:35:15 +08:00
|
|
|
def mr : AVX512PI<opc, MRMDestMem, (outs), (ins memop:$dst, RC:$src),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
|
|
|
[(st_frag (OpVT RC:$src), addr:$dst)], d>, EVEX;
|
2014-03-13 20:05:52 +08:00
|
|
|
def mrk : AVX512PI<opc, MRMDestMem, (outs),
|
[SKX] Enabling load/store instructions: encoding
Instructions: VMOVAPD, VMOVAPS, VMOVDQA8, VMOVDQA16, VMOVDQA32,VMOVDQA64, VMOVDQU8, VMOVDQU16, VMOVDQU32,VMOVDQU64, VMOVUPD, VMOVUPS,
Reviewed by Elena Demikhovsky <elena.demikhovsky@intel.com>
llvm-svn: 214719
2014-08-04 22:35:15 +08:00
|
|
|
(ins memop:$dst, KRC:$mask, RC:$src),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"\t{$src, ${dst} {${mask}}|${dst} {${mask}}, $src}"),
|
2014-03-13 20:05:52 +08:00
|
|
|
[], d>, EVEX, EVEX_K;
|
|
|
|
}
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
[SKX] Enabling load/store instructions: encoding
Instructions: VMOVAPD, VMOVAPS, VMOVDQA8, VMOVDQA16, VMOVDQA32,VMOVDQA64, VMOVDQU8, VMOVDQU16, VMOVDQU32,VMOVDQU64, VMOVUPD, VMOVUPS,
Reviewed by Elena Demikhovsky <elena.demikhovsky@intel.com>
llvm-svn: 214719
2014-08-04 22:35:15 +08:00
|
|
|
|
|
|
|
multiclass avx512_store_vl<bits<8> opc, string OpcodeStr, string st_pat,
|
|
|
|
string st_suff_512, string st_suff_256,
|
|
|
|
string st_suff_128, string elty, string elsz,
|
|
|
|
string vsz512, string vsz256, string vsz128,
|
|
|
|
Domain d, Predicate prd> {
|
|
|
|
let Predicates = [prd] in
|
|
|
|
defm Z : avx512_store<opc, OpcodeStr, !cast<PatFrag>(st_pat##st_suff_512),
|
|
|
|
!cast<ValueType>("v"##vsz512##elty##elsz),
|
|
|
|
!cast<RegisterClass>("VK"##vsz512##"WM"), VR512,
|
|
|
|
!cast<X86MemOperand>(elty##"512mem"), d>, EVEX_V512;
|
|
|
|
|
|
|
|
let Predicates = [prd, HasVLX] in {
|
|
|
|
defm Z256 : avx512_store<opc, OpcodeStr, !cast<PatFrag>(st_pat##st_suff_256),
|
|
|
|
!cast<ValueType>("v"##vsz256##elty##elsz),
|
|
|
|
!cast<RegisterClass>("VK"##vsz256##"WM"), VR256X,
|
|
|
|
!cast<X86MemOperand>(elty##"256mem"), d>, EVEX_V256;
|
|
|
|
|
|
|
|
defm Z128 : avx512_store<opc, OpcodeStr, !cast<PatFrag>(st_pat##st_suff_128),
|
|
|
|
!cast<ValueType>("v"##vsz128##elty##elsz),
|
|
|
|
!cast<RegisterClass>("VK"##vsz128##"WM"), VR128X,
|
|
|
|
!cast<X86MemOperand>(elty##"128mem"), d>, EVEX_V128;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
defm VMOVAPS : avx512_load_vl<0x28, "vmovaps", "alignedload", "f", "32",
|
|
|
|
"16", "8", "4", SSEPackedSingle, HasAVX512>,
|
|
|
|
avx512_store_vl<0x29, "vmovaps", "alignedstore",
|
|
|
|
"512", "256", "", "f", "32", "16", "8", "4",
|
|
|
|
SSEPackedSingle, HasAVX512>,
|
|
|
|
PS, EVEX_CD8<32, CD8VF>;
|
|
|
|
|
|
|
|
defm VMOVAPD : avx512_load_vl<0x28, "vmovapd", "alignedload", "f", "64",
|
|
|
|
"8", "4", "2", SSEPackedDouble, HasAVX512>,
|
|
|
|
avx512_store_vl<0x29, "vmovapd", "alignedstore",
|
|
|
|
"512", "256", "", "f", "64", "8", "4", "2",
|
|
|
|
SSEPackedDouble, HasAVX512>,
|
|
|
|
PD, VEX_W, EVEX_CD8<64, CD8VF>;
|
|
|
|
|
|
|
|
defm VMOVUPS : avx512_load_vl<0x10, "vmovups", "load", "f", "32",
|
|
|
|
"16", "8", "4", SSEPackedSingle, HasAVX512>,
|
|
|
|
avx512_store_vl<0x11, "vmovups", "store", "", "", "", "f", "32",
|
|
|
|
"16", "8", "4", SSEPackedSingle, HasAVX512>,
|
|
|
|
PS, EVEX_CD8<32, CD8VF>;
|
|
|
|
|
|
|
|
defm VMOVUPD : avx512_load_vl<0x10, "vmovupd", "load", "f", "64",
|
|
|
|
"8", "4", "2", SSEPackedDouble, HasAVX512, 0>,
|
|
|
|
avx512_store_vl<0x11, "vmovupd", "store", "", "", "", "f", "64",
|
|
|
|
"8", "4", "2", SSEPackedDouble, HasAVX512>,
|
|
|
|
PD, VEX_W, EVEX_CD8<64, CD8VF>;
|
|
|
|
|
2014-03-13 20:05:52 +08:00
|
|
|
def: Pat<(v8f64 (int_x86_avx512_mask_loadu_pd_512 addr:$ptr,
|
[SKX] Enabling load/store instructions: encoding
Instructions: VMOVAPD, VMOVAPS, VMOVDQA8, VMOVDQA16, VMOVDQA32,VMOVDQA64, VMOVDQU8, VMOVDQU16, VMOVDQU32,VMOVDQU64, VMOVUPD, VMOVUPS,
Reviewed by Elena Demikhovsky <elena.demikhovsky@intel.com>
llvm-svn: 214719
2014-08-04 22:35:15 +08:00
|
|
|
(bc_v8f64 (v16i32 immAllZerosV)), GR8:$mask)),
|
2014-03-13 20:05:52 +08:00
|
|
|
(VMOVUPDZrmkz (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), addr:$ptr)>;
|
|
|
|
|
|
|
|
def: Pat<(v16f32 (int_x86_avx512_mask_loadu_ps_512 addr:$ptr,
|
|
|
|
(bc_v16f32 (v16i32 immAllZerosV)), GR16:$mask)),
|
|
|
|
(VMOVUPSZrmkz (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), addr:$ptr)>;
|
|
|
|
|
|
|
|
def: Pat<(int_x86_avx512_mask_storeu_ps_512 addr:$ptr, (v16f32 VR512:$src),
|
|
|
|
GR16:$mask),
|
|
|
|
(VMOVUPSZmrk addr:$ptr, (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)),
|
|
|
|
VR512:$src)>;
|
|
|
|
def: Pat<(int_x86_avx512_mask_storeu_pd_512 addr:$ptr, (v8f64 VR512:$src),
|
|
|
|
GR8:$mask),
|
|
|
|
(VMOVUPDZmrk addr:$ptr, (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)),
|
|
|
|
VR512:$src)>;
|
|
|
|
|
2014-11-23 16:07:43 +08:00
|
|
|
def: Pat<(masked_store addr:$ptr, VK8WM:$mask, (v8f32 VR256:$src)),
|
|
|
|
(VMOVUPSZmrk addr:$ptr,
|
|
|
|
(v16i1 (COPY_TO_REGCLASS VK8WM:$mask, VK16WM)),
|
|
|
|
(INSERT_SUBREG (v16f32 (IMPLICIT_DEF)), VR256:$src, sub_ymm))>;
|
|
|
|
|
|
|
|
def: Pat<(v8f32 (masked_load addr:$ptr, VK8WM:$mask, undef)),
|
|
|
|
(v8f32 (EXTRACT_SUBREG (v16f32 (VMOVUPSZrmkz
|
|
|
|
(v16i1 (COPY_TO_REGCLASS VK8WM:$mask, VK16WM)), addr:$ptr)), sub_ymm))>;
|
|
|
|
|
|
|
|
def: Pat<(masked_store addr:$ptr, VK16WM:$mask, (v16f32 VR512:$src)),
|
|
|
|
(VMOVUPSZmrk addr:$ptr, VK16WM:$mask, VR512:$src)>;
|
|
|
|
|
|
|
|
def: Pat<(masked_store addr:$ptr, VK8WM:$mask, (v8f64 VR512:$src)),
|
|
|
|
(VMOVUPDZmrk addr:$ptr, VK8WM:$mask, VR512:$src)>;
|
|
|
|
|
|
|
|
def: Pat<(v16f32 (masked_load addr:$ptr, VK16WM:$mask, undef)),
|
|
|
|
(VMOVUPSZrmkz VK16WM:$mask, addr:$ptr)>;
|
|
|
|
|
|
|
|
def: Pat<(v16f32 (masked_load addr:$ptr, VK16WM:$mask,
|
|
|
|
(bc_v16f32 (v16i32 immAllZerosV)))),
|
|
|
|
(VMOVUPSZrmkz VK16WM:$mask, addr:$ptr)>;
|
|
|
|
|
|
|
|
def: Pat<(v16f32 (masked_load addr:$ptr, VK16WM:$mask, (v16f32 VR512:$src0))),
|
|
|
|
(VMOVUPSZrmk VR512:$src0, VK16WM:$mask, addr:$ptr)>;
|
|
|
|
|
|
|
|
def: Pat<(v8f64 (masked_load addr:$ptr, VK8WM:$mask, undef)),
|
|
|
|
(VMOVUPDZrmkz VK8WM:$mask, addr:$ptr)>;
|
|
|
|
|
|
|
|
def: Pat<(v8f64 (masked_load addr:$ptr, VK8WM:$mask,
|
|
|
|
(bc_v8f64 (v16i32 immAllZerosV)))),
|
|
|
|
(VMOVUPDZrmkz VK8WM:$mask, addr:$ptr)>;
|
|
|
|
|
|
|
|
def: Pat<(v8f64 (masked_load addr:$ptr, VK8WM:$mask, (v8f64 VR512:$src0))),
|
|
|
|
(VMOVUPDZrmk VR512:$src0, VK8WM:$mask, addr:$ptr)>;
|
|
|
|
|
[SKX] Enabling load/store instructions: encoding
Instructions: VMOVAPD, VMOVAPS, VMOVDQA8, VMOVDQA16, VMOVDQA32,VMOVDQA64, VMOVDQU8, VMOVDQU16, VMOVDQU32,VMOVDQU64, VMOVUPD, VMOVUPS,
Reviewed by Elena Demikhovsky <elena.demikhovsky@intel.com>
llvm-svn: 214719
2014-08-04 22:35:15 +08:00
|
|
|
defm VMOVDQA32 : avx512_load_vl<0x6F, "vmovdqa32", "alignedload", "i", "32",
|
|
|
|
"16", "8", "4", SSEPackedInt, HasAVX512>,
|
|
|
|
avx512_store_vl<0x7F, "vmovdqa32", "alignedstore",
|
|
|
|
"512", "256", "", "i", "32", "16", "8", "4",
|
|
|
|
SSEPackedInt, HasAVX512>,
|
|
|
|
PD, EVEX_CD8<32, CD8VF>;
|
|
|
|
|
|
|
|
defm VMOVDQA64 : avx512_load_vl<0x6F, "vmovdqa64", "alignedload", "i", "64",
|
|
|
|
"8", "4", "2", SSEPackedInt, HasAVX512>,
|
|
|
|
avx512_store_vl<0x7F, "vmovdqa64", "alignedstore",
|
|
|
|
"512", "256", "", "i", "64", "8", "4", "2",
|
|
|
|
SSEPackedInt, HasAVX512>,
|
|
|
|
PD, VEX_W, EVEX_CD8<64, CD8VF>;
|
|
|
|
|
|
|
|
defm VMOVDQU8 : avx512_load_vl<0x6F, "vmovdqu8", "load", "i", "8",
|
|
|
|
"64", "32", "16", SSEPackedInt, HasBWI>,
|
|
|
|
avx512_store_vl<0x7F, "vmovdqu8", "store", "", "", "",
|
|
|
|
"i", "8", "64", "32", "16", SSEPackedInt,
|
|
|
|
HasBWI>, XD, EVEX_CD8<8, CD8VF>;
|
|
|
|
|
|
|
|
defm VMOVDQU16 : avx512_load_vl<0x6F, "vmovdqu16", "load", "i", "16",
|
|
|
|
"32", "16", "8", SSEPackedInt, HasBWI>,
|
|
|
|
avx512_store_vl<0x7F, "vmovdqu16", "store", "", "", "",
|
|
|
|
"i", "16", "32", "16", "8", SSEPackedInt,
|
|
|
|
HasBWI>, XD, VEX_W, EVEX_CD8<16, CD8VF>;
|
|
|
|
|
|
|
|
defm VMOVDQU32 : avx512_load_vl<0x6F, "vmovdqu32", "load", "i", "32",
|
|
|
|
"16", "8", "4", SSEPackedInt, HasAVX512>,
|
|
|
|
avx512_store_vl<0x7F, "vmovdqu32", "store", "", "", "",
|
|
|
|
"i", "32", "16", "8", "4", SSEPackedInt,
|
|
|
|
HasAVX512>, XS, EVEX_CD8<32, CD8VF>;
|
|
|
|
|
|
|
|
defm VMOVDQU64 : avx512_load_vl<0x6F, "vmovdqu64", "load", "i", "64",
|
|
|
|
"8", "4", "2", SSEPackedInt, HasAVX512>,
|
|
|
|
avx512_store_vl<0x7F, "vmovdqu64", "store", "", "", "",
|
|
|
|
"i", "64", "8", "4", "2", SSEPackedInt,
|
|
|
|
HasAVX512>, XS, VEX_W, EVEX_CD8<64, CD8VF>;
|
2013-10-22 17:19:28 +08:00
|
|
|
|
2014-04-09 20:37:50 +08:00
|
|
|
def: Pat<(v16i32 (int_x86_avx512_mask_loadu_d_512 addr:$ptr,
|
|
|
|
(v16i32 immAllZerosV), GR16:$mask)),
|
[SKX] Enabling load/store instructions: encoding
Instructions: VMOVAPD, VMOVAPS, VMOVDQA8, VMOVDQA16, VMOVDQA32,VMOVDQA64, VMOVDQU8, VMOVDQU16, VMOVDQU32,VMOVDQU64, VMOVUPD, VMOVUPS,
Reviewed by Elena Demikhovsky <elena.demikhovsky@intel.com>
llvm-svn: 214719
2014-08-04 22:35:15 +08:00
|
|
|
(VMOVDQU32Zrmkz (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), addr:$ptr)>;
|
2014-04-09 20:37:50 +08:00
|
|
|
|
|
|
|
def: Pat<(v8i64 (int_x86_avx512_mask_loadu_q_512 addr:$ptr,
|
[SKX] Enabling load/store instructions: encoding
Instructions: VMOVAPD, VMOVAPS, VMOVDQA8, VMOVDQA16, VMOVDQA32,VMOVDQA64, VMOVDQU8, VMOVDQU16, VMOVDQU32,VMOVDQU64, VMOVUPD, VMOVUPS,
Reviewed by Elena Demikhovsky <elena.demikhovsky@intel.com>
llvm-svn: 214719
2014-08-04 22:35:15 +08:00
|
|
|
(bc_v8i64 (v16i32 immAllZerosV)), GR8:$mask)),
|
|
|
|
(VMOVDQU64Zrmkz (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), addr:$ptr)>;
|
2014-04-09 20:37:50 +08:00
|
|
|
|
2014-05-04 21:35:37 +08:00
|
|
|
def: Pat<(int_x86_avx512_mask_storeu_d_512 addr:$ptr, (v16i32 VR512:$src),
|
[SKX] Enabling load/store instructions: encoding
Instructions: VMOVAPD, VMOVAPS, VMOVDQA8, VMOVDQA16, VMOVDQA32,VMOVDQA64, VMOVDQU8, VMOVDQU16, VMOVDQU32,VMOVDQU64, VMOVUPD, VMOVUPS,
Reviewed by Elena Demikhovsky <elena.demikhovsky@intel.com>
llvm-svn: 214719
2014-08-04 22:35:15 +08:00
|
|
|
GR16:$mask),
|
|
|
|
(VMOVDQU32Zmrk addr:$ptr, (v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)),
|
2014-05-04 21:35:37 +08:00
|
|
|
VR512:$src)>;
|
|
|
|
def: Pat<(int_x86_avx512_mask_storeu_q_512 addr:$ptr, (v8i64 VR512:$src),
|
[SKX] Enabling load/store instructions: encoding
Instructions: VMOVAPD, VMOVAPS, VMOVDQA8, VMOVDQA16, VMOVDQA32,VMOVDQA64, VMOVDQU8, VMOVDQU16, VMOVDQU32,VMOVDQU64, VMOVUPD, VMOVUPS,
Reviewed by Elena Demikhovsky <elena.demikhovsky@intel.com>
llvm-svn: 214719
2014-08-04 22:35:15 +08:00
|
|
|
GR8:$mask),
|
|
|
|
(VMOVDQU64Zmrk addr:$ptr, (v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)),
|
2014-05-04 21:35:37 +08:00
|
|
|
VR512:$src)>;
|
|
|
|
|
2013-09-17 15:34:34 +08:00
|
|
|
let AddedComplexity = 20 in {
|
2014-02-05 15:05:03 +08:00
|
|
|
def : Pat<(v8i64 (vselect VK8WM:$mask, (v8i64 VR512:$src),
|
[SKX] Enabling load/store instructions: encoding
Instructions: VMOVAPD, VMOVAPS, VMOVDQA8, VMOVDQA16, VMOVDQA32,VMOVDQA64, VMOVDQU8, VMOVDQU16, VMOVDQU32,VMOVDQU64, VMOVUPD, VMOVUPS,
Reviewed by Elena Demikhovsky <elena.demikhovsky@intel.com>
llvm-svn: 214719
2014-08-04 22:35:15 +08:00
|
|
|
(bc_v8i64 (v16i32 immAllZerosV)))),
|
|
|
|
(VMOVDQU64Zrrkz VK8WM:$mask, VR512:$src)>;
|
2014-02-05 15:05:03 +08:00
|
|
|
|
|
|
|
def : Pat<(v8i64 (vselect VK8WM:$mask, (bc_v8i64 (v16i32 immAllZerosV)),
|
[SKX] Enabling load/store instructions: encoding
Instructions: VMOVAPD, VMOVAPS, VMOVDQA8, VMOVDQA16, VMOVDQA32,VMOVDQA64, VMOVDQU8, VMOVDQU16, VMOVDQU32,VMOVDQU64, VMOVUPD, VMOVUPS,
Reviewed by Elena Demikhovsky <elena.demikhovsky@intel.com>
llvm-svn: 214719
2014-08-04 22:35:15 +08:00
|
|
|
(v8i64 VR512:$src))),
|
|
|
|
(VMOVDQU64Zrrkz (COPY_TO_REGCLASS (KNOTWrr (COPY_TO_REGCLASS VK8:$mask, VK16)),
|
2014-02-05 15:05:03 +08:00
|
|
|
VK8), VR512:$src)>;
|
|
|
|
|
|
|
|
def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 VR512:$src),
|
|
|
|
(v16i32 immAllZerosV))),
|
[SKX] Enabling load/store instructions: encoding
Instructions: VMOVAPD, VMOVAPS, VMOVDQA8, VMOVDQA16, VMOVDQA32,VMOVDQA64, VMOVDQU8, VMOVDQU16, VMOVDQU32,VMOVDQU64, VMOVUPD, VMOVUPS,
Reviewed by Elena Demikhovsky <elena.demikhovsky@intel.com>
llvm-svn: 214719
2014-08-04 22:35:15 +08:00
|
|
|
(VMOVDQU32Zrrkz VK16WM:$mask, VR512:$src)>;
|
2014-02-05 15:05:03 +08:00
|
|
|
|
|
|
|
def : Pat<(v16i32 (vselect VK16WM:$mask, (v16i32 immAllZerosV),
|
[SKX] Enabling load/store instructions: encoding
Instructions: VMOVAPD, VMOVAPS, VMOVDQA8, VMOVDQA16, VMOVDQA32,VMOVDQA64, VMOVDQU8, VMOVDQU16, VMOVDQU32,VMOVDQU64, VMOVUPD, VMOVUPS,
Reviewed by Elena Demikhovsky <elena.demikhovsky@intel.com>
llvm-svn: 214719
2014-08-04 22:35:15 +08:00
|
|
|
(v16i32 VR512:$src))),
|
|
|
|
(VMOVDQU32Zrrkz (KNOTWrr VK16WM:$mask), VR512:$src)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
[SKX] Enabling load/store instructions: encoding
Instructions: VMOVAPD, VMOVAPS, VMOVDQA8, VMOVDQA16, VMOVDQA32,VMOVDQA64, VMOVDQU8, VMOVDQU16, VMOVDQU32,VMOVDQU64, VMOVUPD, VMOVUPS,
Reviewed by Elena Demikhovsky <elena.demikhovsky@intel.com>
llvm-svn: 214719
2014-08-04 22:35:15 +08:00
|
|
|
|
2014-11-23 16:07:43 +08:00
|
|
|
def: Pat<(v16i32 (masked_load addr:$ptr, VK16WM:$mask, (v16i32 immAllZerosV))),
|
|
|
|
(VMOVDQU32Zrmkz VK16WM:$mask, addr:$ptr)>;
|
|
|
|
|
|
|
|
def: Pat<(v16i32 (masked_load addr:$ptr, VK16WM:$mask, undef)),
|
|
|
|
(VMOVDQU32Zrmkz VK16WM:$mask, addr:$ptr)>;
|
|
|
|
|
|
|
|
def: Pat<(v16i32 (masked_load addr:$ptr, VK16WM:$mask, (v16i32 VR512:$src0))),
|
|
|
|
(VMOVDQU32Zrmk VR512:$src0, VK16WM:$mask, addr:$ptr)>;
|
|
|
|
|
|
|
|
def: Pat<(v8i64 (masked_load addr:$ptr, VK8WM:$mask,
|
|
|
|
(bc_v8i64 (v16i32 immAllZerosV)))),
|
|
|
|
(VMOVDQU64Zrmkz VK8WM:$mask, addr:$ptr)>;
|
|
|
|
|
|
|
|
def: Pat<(v8i64 (masked_load addr:$ptr, VK8WM:$mask, undef)),
|
|
|
|
(VMOVDQU64Zrmkz VK8WM:$mask, addr:$ptr)>;
|
|
|
|
|
|
|
|
def: Pat<(v8i64 (masked_load addr:$ptr, VK8WM:$mask, (v8i64 VR512:$src0))),
|
|
|
|
(VMOVDQU64Zrmk VR512:$src0, VK8WM:$mask, addr:$ptr)>;
|
|
|
|
|
|
|
|
def: Pat<(masked_store addr:$ptr, VK16WM:$mask, (v16i32 VR512:$src)),
|
|
|
|
(VMOVDQU32Zmrk addr:$ptr, VK16WM:$mask, VR512:$src)>;
|
|
|
|
|
|
|
|
def: Pat<(masked_store addr:$ptr, VK8WM:$mask, (v8i64 VR512:$src)),
|
|
|
|
(VMOVDQU64Zmrk addr:$ptr, VK8WM:$mask, VR512:$src)>;
|
|
|
|
|
|
|
|
// SKX replacement
|
|
|
|
def: Pat<(masked_store addr:$ptr, VK8WM:$mask, (v8i32 VR256:$src)),
|
|
|
|
(VMOVDQU32Z256mrk addr:$ptr, VK8WM:$mask, VR256:$src)>;
|
|
|
|
|
|
|
|
// KNL replacement
|
|
|
|
def: Pat<(masked_store addr:$ptr, VK8WM:$mask, (v8i32 VR256:$src)),
|
|
|
|
(VMOVDQU32Zmrk addr:$ptr,
|
|
|
|
(v16i1 (COPY_TO_REGCLASS VK8WM:$mask, VK16WM)),
|
|
|
|
(INSERT_SUBREG (v16i32 (IMPLICIT_DEF)), VR256:$src, sub_ymm))>;
|
|
|
|
|
|
|
|
def: Pat<(v8i32 (masked_load addr:$ptr, VK8WM:$mask, undef)),
|
|
|
|
(v8i32 (EXTRACT_SUBREG (v16i32 (VMOVDQU32Zrmkz
|
|
|
|
(v16i1 (COPY_TO_REGCLASS VK8WM:$mask, VK16WM)), addr:$ptr)), sub_ymm))>;
|
|
|
|
|
|
|
|
|
2013-09-17 15:34:34 +08:00
|
|
|
// Move Int Doubleword to Packed Double Int
|
|
|
|
//
|
2014-01-14 23:10:08 +08:00
|
|
|
def VMOVDI2PDIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR32:$src),
|
2013-12-11 22:31:04 +08:00
|
|
|
"vmovd\t{$src, $dst|$dst, $src}",
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set VR128X:$dst,
|
|
|
|
(v4i32 (scalar_to_vector GR32:$src)))], IIC_SSE_MOVDQ>,
|
|
|
|
EVEX, VEX_LIG;
|
2014-01-14 23:10:08 +08:00
|
|
|
def VMOVDI2PDIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst), (ins i32mem:$src),
|
2013-12-11 22:31:04 +08:00
|
|
|
"vmovd\t{$src, $dst|$dst, $src}",
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set VR128X:$dst,
|
|
|
|
(v4i32 (scalar_to_vector (loadi32 addr:$src))))],
|
|
|
|
IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
|
2014-01-14 23:10:08 +08:00
|
|
|
def VMOV64toPQIZrr : AVX512BI<0x6E, MRMSrcReg, (outs VR128X:$dst), (ins GR64:$src),
|
2013-12-11 22:31:04 +08:00
|
|
|
"vmovq\t{$src, $dst|$dst, $src}",
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set VR128X:$dst,
|
|
|
|
(v2i64 (scalar_to_vector GR64:$src)))],
|
|
|
|
IIC_SSE_MOVDQ>, EVEX, VEX_W, VEX_LIG;
|
2013-10-12 13:41:08 +08:00
|
|
|
let isCodeGenOnly = 1 in {
|
2014-01-14 23:10:08 +08:00
|
|
|
def VMOV64toSDZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
|
2013-12-11 22:31:04 +08:00
|
|
|
"vmovq\t{$src, $dst|$dst, $src}",
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set FR64:$dst, (bitconvert GR64:$src))],
|
|
|
|
IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>;
|
2014-01-14 23:10:08 +08:00
|
|
|
def VMOVSDto64Zrr : AVX512BI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
|
2013-12-11 22:31:04 +08:00
|
|
|
"vmovq\t{$src, $dst|$dst, $src}",
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set GR64:$dst, (bitconvert FR64:$src))],
|
|
|
|
IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteMove]>;
|
2013-10-12 13:41:08 +08:00
|
|
|
}
|
2014-01-14 23:10:08 +08:00
|
|
|
def VMOVSDto64Zmr : AVX512BI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
|
2013-12-11 22:31:04 +08:00
|
|
|
"vmovq\t{$src, $dst|$dst, $src}",
|
2013-09-17 15:34:34 +08:00
|
|
|
[(store (i64 (bitconvert FR64:$src)), addr:$dst)],
|
|
|
|
IIC_SSE_MOVDQ>, EVEX, VEX_W, Sched<[WriteStore]>,
|
|
|
|
EVEX_CD8<64, CD8VT1>;
|
|
|
|
|
|
|
|
// Move Int Doubleword to Single Scalar
|
|
|
|
//
|
2013-10-12 13:41:08 +08:00
|
|
|
let isCodeGenOnly = 1 in {
|
2014-01-14 23:10:08 +08:00
|
|
|
def VMOVDI2SSZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR32X:$dst), (ins GR32:$src),
|
2013-12-11 22:31:04 +08:00
|
|
|
"vmovd\t{$src, $dst|$dst, $src}",
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set FR32X:$dst, (bitconvert GR32:$src))],
|
|
|
|
IIC_SSE_MOVDQ>, EVEX, VEX_LIG;
|
|
|
|
|
2014-01-14 23:10:08 +08:00
|
|
|
def VMOVDI2SSZrm : AVX512BI<0x6E, MRMSrcMem, (outs FR32X:$dst), (ins i32mem:$src),
|
2013-12-11 22:31:04 +08:00
|
|
|
"vmovd\t{$src, $dst|$dst, $src}",
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set FR32X:$dst, (bitconvert (loadi32 addr:$src)))],
|
|
|
|
IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
|
2013-10-12 13:41:08 +08:00
|
|
|
}
|
2013-09-17 15:34:34 +08:00
|
|
|
|
2014-01-14 23:10:08 +08:00
|
|
|
// Move doubleword from xmm register to r/m32
|
2013-09-17 15:34:34 +08:00
|
|
|
//
|
2014-01-14 23:10:08 +08:00
|
|
|
def VMOVPDI2DIZrr : AVX512BI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128X:$src),
|
2013-12-11 22:31:04 +08:00
|
|
|
"vmovd\t{$src, $dst|$dst, $src}",
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set GR32:$dst, (vector_extract (v4i32 VR128X:$src),
|
|
|
|
(iPTR 0)))], IIC_SSE_MOVD_ToGP>,
|
|
|
|
EVEX, VEX_LIG;
|
2014-01-14 23:10:08 +08:00
|
|
|
def VMOVPDI2DIZmr : AVX512BI<0x7E, MRMDestMem, (outs),
|
2013-09-17 15:34:34 +08:00
|
|
|
(ins i32mem:$dst, VR128X:$src),
|
2013-12-11 22:31:04 +08:00
|
|
|
"vmovd\t{$src, $dst|$dst, $src}",
|
2013-09-17 15:34:34 +08:00
|
|
|
[(store (i32 (vector_extract (v4i32 VR128X:$src),
|
|
|
|
(iPTR 0))), addr:$dst)], IIC_SSE_MOVDQ>,
|
|
|
|
EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
|
|
|
|
|
2014-01-14 23:10:08 +08:00
|
|
|
// Move quadword from xmm1 register to r/m64
|
2013-09-17 15:34:34 +08:00
|
|
|
//
|
|
|
|
def VMOVPQIto64Zrr : I<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128X:$src),
|
2013-12-11 22:31:04 +08:00
|
|
|
"vmovq\t{$src, $dst|$dst, $src}",
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set GR64:$dst, (extractelt (v2i64 VR128X:$src),
|
|
|
|
(iPTR 0)))],
|
2014-01-14 15:41:20 +08:00
|
|
|
IIC_SSE_MOVD_ToGP>, PD, EVEX, VEX_LIG, VEX_W,
|
2013-09-17 15:34:34 +08:00
|
|
|
Requires<[HasAVX512, In64BitMode]>;
|
|
|
|
|
2013-10-03 20:03:26 +08:00
|
|
|
def VMOVPQIto64Zmr : I<0xD6, MRMDestMem, (outs),
|
2013-09-17 15:34:34 +08:00
|
|
|
(ins i64mem:$dst, VR128X:$src),
|
2013-12-11 22:31:04 +08:00
|
|
|
"vmovq\t{$src, $dst|$dst, $src}",
|
2013-09-17 15:34:34 +08:00
|
|
|
[(store (extractelt (v2i64 VR128X:$src), (iPTR 0)),
|
|
|
|
addr:$dst)], IIC_SSE_MOVDQ>,
|
2014-01-14 15:41:20 +08:00
|
|
|
EVEX, PD, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>,
|
2013-09-17 15:34:34 +08:00
|
|
|
Sched<[WriteStore]>, Requires<[HasAVX512, In64BitMode]>;
|
|
|
|
|
|
|
|
// Move Scalar Single to Double Int
|
|
|
|
//
|
2013-10-12 13:41:08 +08:00
|
|
|
let isCodeGenOnly = 1 in {
|
2014-01-14 23:10:08 +08:00
|
|
|
def VMOVSS2DIZrr : AVX512BI<0x7E, MRMDestReg, (outs GR32:$dst),
|
2013-09-17 15:34:34 +08:00
|
|
|
(ins FR32X:$src),
|
2013-12-11 22:31:04 +08:00
|
|
|
"vmovd\t{$src, $dst|$dst, $src}",
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set GR32:$dst, (bitconvert FR32X:$src))],
|
|
|
|
IIC_SSE_MOVD_ToGP>, EVEX, VEX_LIG;
|
2014-01-14 23:10:08 +08:00
|
|
|
def VMOVSS2DIZmr : AVX512BI<0x7E, MRMDestMem, (outs),
|
2013-09-17 15:34:34 +08:00
|
|
|
(ins i32mem:$dst, FR32X:$src),
|
2013-12-11 22:31:04 +08:00
|
|
|
"vmovd\t{$src, $dst|$dst, $src}",
|
2013-09-17 15:34:34 +08:00
|
|
|
[(store (i32 (bitconvert FR32X:$src)), addr:$dst)],
|
|
|
|
IIC_SSE_MOVDQ>, EVEX, VEX_LIG, EVEX_CD8<32, CD8VT1>;
|
2013-10-12 13:41:08 +08:00
|
|
|
}
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
// Move Quadword Int to Packed Quadword Int
|
|
|
|
//
|
2014-01-14 23:10:08 +08:00
|
|
|
def VMOVQI2PQIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst),
|
2013-09-17 15:34:34 +08:00
|
|
|
(ins i64mem:$src),
|
2013-12-11 22:31:04 +08:00
|
|
|
"vmovq\t{$src, $dst|$dst, $src}",
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set VR128X:$dst,
|
|
|
|
(v2i64 (scalar_to_vector (loadi64 addr:$src))))]>,
|
|
|
|
EVEX, VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AVX-512 MOVSS, MOVSD
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
multiclass avx512_move_scalar <string asm, RegisterClass RC,
|
|
|
|
SDNode OpNode, ValueType vt,
|
|
|
|
X86MemOperand x86memop, PatFrag mem_pat> {
|
2014-01-05 22:21:07 +08:00
|
|
|
let hasSideEffects = 0 in {
|
2013-09-17 15:34:34 +08:00
|
|
|
def rr : SI<0x10, MRMSrcReg, (outs VR128X:$dst), (ins VR128X:$src1, RC:$src2),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set VR128X:$dst, (vt (OpNode VR128X:$src1,
|
|
|
|
(scalar_to_vector RC:$src2))))],
|
|
|
|
IIC_SSE_MOV_S_RR>, EVEX_4V, VEX_LIG;
|
2013-12-16 21:52:35 +08:00
|
|
|
let Constraints = "$src1 = $dst" in
|
|
|
|
def rrk : SI<0x10, MRMSrcReg, (outs VR128X:$dst),
|
|
|
|
(ins VR128X:$src1, VK1WM:$mask, RC:$src2, RC:$src3),
|
|
|
|
!strconcat(asm,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src3, $src2, $dst {${mask}}|$dst {${mask}}, $src2, $src3}"),
|
2013-12-16 21:52:35 +08:00
|
|
|
[], IIC_SSE_MOV_S_RR>, EVEX_4V, VEX_LIG, EVEX_K;
|
2013-09-17 15:34:34 +08:00
|
|
|
def rm : SI<0x10, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(asm, "\t{$src, $dst|$dst, $src}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set RC:$dst, (mem_pat addr:$src))], IIC_SSE_MOV_S_RM>,
|
|
|
|
EVEX, VEX_LIG;
|
2014-08-27 15:38:43 +08:00
|
|
|
let mayStore = 1 in {
|
2013-09-17 15:34:34 +08:00
|
|
|
def mr: SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, RC:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(asm, "\t{$src, $dst|$dst, $src}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(store RC:$src, addr:$dst)], IIC_SSE_MOV_S_MR>,
|
|
|
|
EVEX, VEX_LIG;
|
2014-08-27 15:38:43 +08:00
|
|
|
def mrk: SI<0x11, MRMDestMem, (outs), (ins x86memop:$dst, VK1WM:$mask, RC:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(asm, "\t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
|
2014-08-27 15:38:43 +08:00
|
|
|
[], IIC_SSE_MOV_S_MR>,
|
|
|
|
EVEX, VEX_LIG, EVEX_K;
|
|
|
|
} // mayStore
|
2014-01-05 22:21:07 +08:00
|
|
|
} //hasSideEffects = 0
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
let ExeDomain = SSEPackedSingle in
|
2013-12-11 22:31:04 +08:00
|
|
|
defm VMOVSSZ : avx512_move_scalar<"movss", FR32X, X86Movss, v4f32, f32mem,
|
2013-09-17 15:34:34 +08:00
|
|
|
loadf32>, XS, EVEX_CD8<32, CD8VT1>;
|
|
|
|
|
|
|
|
let ExeDomain = SSEPackedDouble in
|
2013-12-11 22:31:04 +08:00
|
|
|
defm VMOVSDZ : avx512_move_scalar<"movsd", FR64X, X86Movsd, v2f64, f64mem,
|
2013-09-17 15:34:34 +08:00
|
|
|
loadf64>, XD, VEX_W, EVEX_CD8<64, CD8VT1>;
|
|
|
|
|
2013-12-16 21:52:35 +08:00
|
|
|
def : Pat<(f32 (X86select VK1WM:$mask, (f32 FR32X:$src1), (f32 FR32X:$src2))),
|
|
|
|
(COPY_TO_REGCLASS (VMOVSSZrrk (COPY_TO_REGCLASS FR32X:$src2, VR128X),
|
|
|
|
VK1WM:$mask, (f32 (IMPLICIT_DEF)), FR32X:$src1), FR32X)>;
|
|
|
|
|
|
|
|
def : Pat<(f64 (X86select VK1WM:$mask, (f64 FR64X:$src1), (f64 FR64X:$src2))),
|
|
|
|
(COPY_TO_REGCLASS (VMOVSDZrrk (COPY_TO_REGCLASS FR64X:$src2, VR128X),
|
|
|
|
VK1WM:$mask, (f64 (IMPLICIT_DEF)), FR64X:$src1), FR64X)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
2014-08-27 15:38:43 +08:00
|
|
|
def : Pat<(int_x86_avx512_mask_store_ss addr:$dst, VR128X:$src, GR8:$mask),
|
|
|
|
(VMOVSSZmrk addr:$dst, (i1 (COPY_TO_REGCLASS GR8:$mask, VK1WM)),
|
|
|
|
(COPY_TO_REGCLASS VR128X:$src, FR32X))>;
|
|
|
|
|
2013-09-17 15:34:34 +08:00
|
|
|
// For the disassembler
|
2014-01-05 12:17:28 +08:00
|
|
|
let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0 in {
|
2013-09-17 15:34:34 +08:00
|
|
|
def VMOVSSZrr_REV : SI<0x11, MRMDestReg, (outs VR128X:$dst),
|
|
|
|
(ins VR128X:$src1, FR32X:$src2),
|
2013-12-11 22:31:04 +08:00
|
|
|
"movss\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
|
2013-09-17 15:34:34 +08:00
|
|
|
IIC_SSE_MOV_S_RR>,
|
|
|
|
XS, EVEX_4V, VEX_LIG;
|
|
|
|
def VMOVSDZrr_REV : SI<0x11, MRMDestReg, (outs VR128X:$dst),
|
|
|
|
(ins VR128X:$src1, FR64X:$src2),
|
2013-12-11 22:31:04 +08:00
|
|
|
"movsd\t{$src2, $src1, $dst|$dst, $src1, $src2}", [],
|
2013-09-17 15:34:34 +08:00
|
|
|
IIC_SSE_MOV_S_RR>,
|
|
|
|
XD, EVEX_4V, VEX_LIG, VEX_W;
|
|
|
|
}
|
|
|
|
|
|
|
|
let Predicates = [HasAVX512] in {
|
|
|
|
let AddedComplexity = 15 in {
|
|
|
|
// Move scalar to XMM zero-extended, zeroing a VR128X then do a
|
|
|
|
// MOVS{S,D} to the lower bits.
|
|
|
|
def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32X:$src)))),
|
|
|
|
(VMOVSSZrr (v4f32 (V_SET0)), FR32X:$src)>;
|
|
|
|
def : Pat<(v4f32 (X86vzmovl (v4f32 VR128X:$src))),
|
|
|
|
(VMOVSSZrr (v4f32 (V_SET0)), (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
|
|
|
|
def : Pat<(v4i32 (X86vzmovl (v4i32 VR128X:$src))),
|
|
|
|
(VMOVSSZrr (v4i32 (V_SET0)), (COPY_TO_REGCLASS VR128X:$src, FR32X))>;
|
|
|
|
def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64X:$src)))),
|
|
|
|
(VMOVSDZrr (v2f64 (V_SET0)), FR64X:$src)>;
|
|
|
|
|
|
|
|
// Move low f32 and clear high bits.
|
|
|
|
def : Pat<(v8f32 (X86vzmovl (v8f32 VR256X:$src))),
|
|
|
|
(SUBREG_TO_REG (i32 0),
|
|
|
|
(VMOVSSZrr (v4f32 (V_SET0)),
|
|
|
|
(EXTRACT_SUBREG (v8f32 VR256X:$src), sub_xmm)), sub_xmm)>;
|
|
|
|
def : Pat<(v8i32 (X86vzmovl (v8i32 VR256X:$src))),
|
|
|
|
(SUBREG_TO_REG (i32 0),
|
|
|
|
(VMOVSSZrr (v4i32 (V_SET0)),
|
|
|
|
(EXTRACT_SUBREG (v8i32 VR256X:$src), sub_xmm)), sub_xmm)>;
|
|
|
|
}
|
|
|
|
|
|
|
|
let AddedComplexity = 20 in {
|
|
|
|
// MOVSSrm zeros the high parts of the register; represent this
|
|
|
|
// with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
|
|
|
|
def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
|
|
|
|
(COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
|
|
|
|
def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
|
|
|
|
(COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
|
|
|
|
def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
|
|
|
|
(COPY_TO_REGCLASS (VMOVSSZrm addr:$src), VR128X)>;
|
|
|
|
|
|
|
|
// MOVSDrm zeros the high parts of the register; represent this
|
|
|
|
// with SUBREG_TO_REG. The AVX versions also write: DST[255:128] <- 0
|
|
|
|
def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
|
|
|
|
(COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
|
|
|
|
def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
|
|
|
|
(COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
|
|
|
|
def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
|
|
|
|
(COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
|
|
|
|
def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
|
|
|
|
(COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
|
|
|
|
def : Pat<(v2f64 (X86vzload addr:$src)),
|
|
|
|
(COPY_TO_REGCLASS (VMOVSDZrm addr:$src), VR128X)>;
|
|
|
|
|
|
|
|
// Represent the same patterns above but in the form they appear for
|
|
|
|
// 256-bit types
|
|
|
|
def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
|
|
|
|
(v4i32 (scalar_to_vector (loadi32 addr:$src))), (iPTR 0)))),
|
2013-10-02 20:20:42 +08:00
|
|
|
(SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrm addr:$src), sub_xmm)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
|
|
|
|
(v4f32 (scalar_to_vector (loadf32 addr:$src))), (iPTR 0)))),
|
|
|
|
(SUBREG_TO_REG (i32 0), (VMOVSSZrm addr:$src), sub_xmm)>;
|
|
|
|
def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
|
|
|
|
(v2f64 (scalar_to_vector (loadf64 addr:$src))), (iPTR 0)))),
|
|
|
|
(SUBREG_TO_REG (i32 0), (VMOVSDZrm addr:$src), sub_xmm)>;
|
|
|
|
}
|
|
|
|
def : Pat<(v8f32 (X86vzmovl (insert_subvector undef,
|
|
|
|
(v4f32 (scalar_to_vector FR32X:$src)), (iPTR 0)))),
|
|
|
|
(SUBREG_TO_REG (i32 0), (v4f32 (VMOVSSZrr (v4f32 (V_SET0)),
|
|
|
|
FR32X:$src)), sub_xmm)>;
|
|
|
|
def : Pat<(v4f64 (X86vzmovl (insert_subvector undef,
|
|
|
|
(v2f64 (scalar_to_vector FR64X:$src)), (iPTR 0)))),
|
|
|
|
(SUBREG_TO_REG (i64 0), (v2f64 (VMOVSDZrr (v2f64 (V_SET0)),
|
|
|
|
FR64X:$src)), sub_xmm)>;
|
|
|
|
def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
|
|
|
|
(v2i64 (scalar_to_vector (loadi64 addr:$src))), (iPTR 0)))),
|
2013-10-02 20:20:42 +08:00
|
|
|
(SUBREG_TO_REG (i64 0), (VMOVQI2PQIZrm addr:$src), sub_xmm)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
// Move low f64 and clear high bits.
|
|
|
|
def : Pat<(v4f64 (X86vzmovl (v4f64 VR256X:$src))),
|
|
|
|
(SUBREG_TO_REG (i32 0),
|
|
|
|
(VMOVSDZrr (v2f64 (V_SET0)),
|
|
|
|
(EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm)), sub_xmm)>;
|
|
|
|
|
|
|
|
def : Pat<(v4i64 (X86vzmovl (v4i64 VR256X:$src))),
|
|
|
|
(SUBREG_TO_REG (i32 0), (VMOVSDZrr (v2i64 (V_SET0)),
|
|
|
|
(EXTRACT_SUBREG (v4i64 VR256X:$src), sub_xmm)), sub_xmm)>;
|
|
|
|
|
|
|
|
// Extract and store.
|
|
|
|
def : Pat<(store (f32 (vector_extract (v4f32 VR128X:$src), (iPTR 0))),
|
|
|
|
addr:$dst),
|
|
|
|
(VMOVSSZmr addr:$dst, (COPY_TO_REGCLASS (v4f32 VR128X:$src), FR32X))>;
|
|
|
|
def : Pat<(store (f64 (vector_extract (v2f64 VR128X:$src), (iPTR 0))),
|
|
|
|
addr:$dst),
|
|
|
|
(VMOVSDZmr addr:$dst, (COPY_TO_REGCLASS (v2f64 VR128X:$src), FR64X))>;
|
|
|
|
|
|
|
|
// Shuffle with VMOVSS
|
|
|
|
def : Pat<(v4i32 (X86Movss VR128X:$src1, VR128X:$src2)),
|
|
|
|
(VMOVSSZrr (v4i32 VR128X:$src1),
|
|
|
|
(COPY_TO_REGCLASS (v4i32 VR128X:$src2), FR32X))>;
|
|
|
|
def : Pat<(v4f32 (X86Movss VR128X:$src1, VR128X:$src2)),
|
|
|
|
(VMOVSSZrr (v4f32 VR128X:$src1),
|
|
|
|
(COPY_TO_REGCLASS (v4f32 VR128X:$src2), FR32X))>;
|
|
|
|
|
|
|
|
// 256-bit variants
|
|
|
|
def : Pat<(v8i32 (X86Movss VR256X:$src1, VR256X:$src2)),
|
|
|
|
(SUBREG_TO_REG (i32 0),
|
|
|
|
(VMOVSSZrr (EXTRACT_SUBREG (v8i32 VR256X:$src1), sub_xmm),
|
|
|
|
(EXTRACT_SUBREG (v8i32 VR256X:$src2), sub_xmm)),
|
|
|
|
sub_xmm)>;
|
|
|
|
def : Pat<(v8f32 (X86Movss VR256X:$src1, VR256X:$src2)),
|
|
|
|
(SUBREG_TO_REG (i32 0),
|
|
|
|
(VMOVSSZrr (EXTRACT_SUBREG (v8f32 VR256X:$src1), sub_xmm),
|
|
|
|
(EXTRACT_SUBREG (v8f32 VR256X:$src2), sub_xmm)),
|
|
|
|
sub_xmm)>;
|
|
|
|
|
|
|
|
// Shuffle with VMOVSD
|
|
|
|
def : Pat<(v2i64 (X86Movsd VR128X:$src1, VR128X:$src2)),
|
|
|
|
(VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
|
|
|
|
def : Pat<(v2f64 (X86Movsd VR128X:$src1, VR128X:$src2)),
|
|
|
|
(VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
|
|
|
|
def : Pat<(v4f32 (X86Movsd VR128X:$src1, VR128X:$src2)),
|
|
|
|
(VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
|
|
|
|
def : Pat<(v4i32 (X86Movsd VR128X:$src1, VR128X:$src2)),
|
|
|
|
(VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
|
|
|
|
|
|
|
|
// 256-bit variants
|
|
|
|
def : Pat<(v4i64 (X86Movsd VR256X:$src1, VR256X:$src2)),
|
|
|
|
(SUBREG_TO_REG (i32 0),
|
|
|
|
(VMOVSDZrr (EXTRACT_SUBREG (v4i64 VR256X:$src1), sub_xmm),
|
|
|
|
(EXTRACT_SUBREG (v4i64 VR256X:$src2), sub_xmm)),
|
|
|
|
sub_xmm)>;
|
|
|
|
def : Pat<(v4f64 (X86Movsd VR256X:$src1, VR256X:$src2)),
|
|
|
|
(SUBREG_TO_REG (i32 0),
|
|
|
|
(VMOVSDZrr (EXTRACT_SUBREG (v4f64 VR256X:$src1), sub_xmm),
|
|
|
|
(EXTRACT_SUBREG (v4f64 VR256X:$src2), sub_xmm)),
|
|
|
|
sub_xmm)>;
|
|
|
|
|
|
|
|
def : Pat<(v2f64 (X86Movlpd VR128X:$src1, VR128X:$src2)),
|
|
|
|
(VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
|
|
|
|
def : Pat<(v2i64 (X86Movlpd VR128X:$src1, VR128X:$src2)),
|
|
|
|
(VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
|
|
|
|
def : Pat<(v4f32 (X86Movlps VR128X:$src1, VR128X:$src2)),
|
|
|
|
(VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
|
|
|
|
def : Pat<(v4i32 (X86Movlps VR128X:$src1, VR128X:$src2)),
|
|
|
|
(VMOVSDZrr VR128X:$src1, (COPY_TO_REGCLASS VR128X:$src2, FR64X))>;
|
|
|
|
}
|
|
|
|
|
|
|
|
let AddedComplexity = 15 in
|
|
|
|
def VMOVZPQILo2PQIZrr : AVX512XSI<0x7E, MRMSrcReg, (outs VR128X:$dst),
|
|
|
|
(ins VR128X:$src),
|
2013-12-11 22:31:04 +08:00
|
|
|
"vmovq\t{$src, $dst|$dst, $src}",
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set VR128X:$dst, (v2i64 (X86vzmovl
|
|
|
|
(v2i64 VR128X:$src))))],
|
|
|
|
IIC_SSE_MOVQ_RR>, EVEX, VEX_W;
|
|
|
|
|
|
|
|
let AddedComplexity = 20 in
|
|
|
|
def VMOVZPQILo2PQIZrm : AVX512XSI<0x7E, MRMSrcMem, (outs VR128X:$dst),
|
|
|
|
(ins i128mem:$src),
|
2013-12-11 22:31:04 +08:00
|
|
|
"vmovq\t{$src, $dst|$dst, $src}",
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set VR128X:$dst, (v2i64 (X86vzmovl
|
|
|
|
(loadv2i64 addr:$src))))],
|
|
|
|
IIC_SSE_MOVDQ>, EVEX, VEX_W,
|
|
|
|
EVEX_CD8<8, CD8VT8>;
|
|
|
|
|
|
|
|
let Predicates = [HasAVX512] in {
|
|
|
|
// AVX 128-bit movd/movq instruction write zeros in the high 128-bit part.
|
|
|
|
let AddedComplexity = 20 in {
|
|
|
|
def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector (loadi32 addr:$src))))),
|
|
|
|
(VMOVDI2PDIZrm addr:$src)>;
|
2013-10-01 16:38:02 +08:00
|
|
|
def : Pat<(v2i64 (X86vzmovl (v2i64 (scalar_to_vector GR64:$src)))),
|
|
|
|
(VMOV64toPQIZrr GR64:$src)>;
|
|
|
|
def : Pat<(v4i32 (X86vzmovl (v4i32 (scalar_to_vector GR32:$src)))),
|
|
|
|
(VMOVDI2PDIZrr GR32:$src)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv4f32 addr:$src)))),
|
|
|
|
(VMOVDI2PDIZrm addr:$src)>;
|
|
|
|
def : Pat<(v4i32 (X86vzmovl (bc_v4i32 (loadv2i64 addr:$src)))),
|
|
|
|
(VMOVDI2PDIZrm addr:$src)>;
|
|
|
|
def : Pat<(v2i64 (X86vzmovl (loadv2i64 addr:$src))),
|
|
|
|
(VMOVZPQILo2PQIZrm addr:$src)>;
|
|
|
|
def : Pat<(v2f64 (X86vzmovl (v2f64 VR128X:$src))),
|
|
|
|
(VMOVZPQILo2PQIZrr VR128X:$src)>;
|
2013-12-05 08:11:25 +08:00
|
|
|
def : Pat<(v2i64 (X86vzload addr:$src)),
|
|
|
|
(VMOVZPQILo2PQIZrm addr:$src)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
2013-10-01 16:38:02 +08:00
|
|
|
|
2013-09-17 15:34:34 +08:00
|
|
|
// Use regular 128-bit instructions to match 256-bit scalar_to_vec+zext.
|
|
|
|
def : Pat<(v8i32 (X86vzmovl (insert_subvector undef,
|
|
|
|
(v4i32 (scalar_to_vector GR32:$src)),(iPTR 0)))),
|
|
|
|
(SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src), sub_xmm)>;
|
|
|
|
def : Pat<(v4i64 (X86vzmovl (insert_subvector undef,
|
|
|
|
(v2i64 (scalar_to_vector GR64:$src)),(iPTR 0)))),
|
|
|
|
(SUBREG_TO_REG (i64 0), (VMOV64toPQIZrr GR64:$src), sub_xmm)>;
|
|
|
|
}
|
|
|
|
|
|
|
|
def : Pat<(v16i32 (X86Vinsert (v16i32 immAllZerosV), GR32:$src2, (iPTR 0))),
|
|
|
|
(SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src2), sub_xmm)>;
|
|
|
|
|
|
|
|
def : Pat<(v8i64 (X86Vinsert (bc_v8i64 (v16i32 immAllZerosV)), GR64:$src2, (iPTR 0))),
|
|
|
|
(SUBREG_TO_REG (i32 0), (VMOV64toPQIZrr GR64:$src2), sub_xmm)>;
|
|
|
|
|
|
|
|
def : Pat<(v16i32 (X86Vinsert undef, GR32:$src2, (iPTR 0))),
|
|
|
|
(SUBREG_TO_REG (i32 0), (VMOVDI2PDIZrr GR32:$src2), sub_xmm)>;
|
|
|
|
|
|
|
|
def : Pat<(v8i64 (X86Vinsert undef, GR64:$src2, (iPTR 0))),
|
|
|
|
(SUBREG_TO_REG (i32 0), (VMOV64toPQIZrr GR64:$src2), sub_xmm)>;
|
|
|
|
|
2014-06-11 00:39:53 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AVX-512 - Non-temporals
|
|
|
|
//===----------------------------------------------------------------------===//
|
2014-08-13 18:46:00 +08:00
|
|
|
let SchedRW = [WriteLoad] in {
|
|
|
|
def VMOVNTDQAZrm : AVX512PI<0x2A, MRMSrcMem, (outs VR512:$dst),
|
|
|
|
(ins i512mem:$src), "vmovntdqa\t{$src, $dst|$dst, $src}",
|
|
|
|
[(set VR512:$dst, (int_x86_avx512_movntdqa addr:$src))],
|
|
|
|
SSEPackedInt>, EVEX, T8PD, EVEX_V512,
|
|
|
|
EVEX_CD8<64, CD8VF>;
|
|
|
|
|
|
|
|
let Predicates = [HasAVX512, HasVLX] in {
|
|
|
|
def VMOVNTDQAZ256rm : AVX512PI<0x2A, MRMSrcMem, (outs VR256X:$dst),
|
|
|
|
(ins i256mem:$src),
|
|
|
|
"vmovntdqa\t{$src, $dst|$dst, $src}", [],
|
|
|
|
SSEPackedInt>, EVEX, T8PD, EVEX_V256,
|
|
|
|
EVEX_CD8<64, CD8VF>;
|
|
|
|
|
|
|
|
def VMOVNTDQAZ128rm : AVX512PI<0x2A, MRMSrcMem, (outs VR128X:$dst),
|
|
|
|
(ins i128mem:$src),
|
|
|
|
"vmovntdqa\t{$src, $dst|$dst, $src}", [],
|
|
|
|
SSEPackedInt>, EVEX, T8PD, EVEX_V128,
|
|
|
|
EVEX_CD8<64, CD8VF>;
|
|
|
|
}
|
|
|
|
}
|
2014-06-11 00:39:53 +08:00
|
|
|
|
2014-08-13 18:46:00 +08:00
|
|
|
multiclass avx512_movnt<bits<8> opc, string OpcodeStr, PatFrag st_frag,
|
|
|
|
ValueType OpVT, RegisterClass RC, X86MemOperand memop,
|
|
|
|
Domain d, InstrItinClass itin = IIC_SSE_MOVNT> {
|
|
|
|
let SchedRW = [WriteStore], mayStore = 1,
|
|
|
|
AddedComplexity = 400 in
|
|
|
|
def mr : AVX512PI<opc, MRMDestMem, (outs), (ins memop:$dst, RC:$src),
|
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
|
|
|
[(st_frag (OpVT RC:$src), addr:$dst)], d, itin>, EVEX;
|
2014-06-19 00:51:10 +08:00
|
|
|
}
|
|
|
|
|
2014-08-13 18:46:00 +08:00
|
|
|
multiclass avx512_movnt_vl<bits<8> opc, string OpcodeStr, PatFrag st_frag,
|
|
|
|
string elty, string elsz, string vsz512,
|
|
|
|
string vsz256, string vsz128, Domain d,
|
|
|
|
Predicate prd, InstrItinClass itin = IIC_SSE_MOVNT> {
|
|
|
|
let Predicates = [prd] in
|
|
|
|
defm Z : avx512_movnt<opc, OpcodeStr, st_frag,
|
|
|
|
!cast<ValueType>("v"##vsz512##elty##elsz), VR512,
|
|
|
|
!cast<X86MemOperand>(elty##"512mem"), d, itin>,
|
|
|
|
EVEX_V512;
|
|
|
|
|
|
|
|
let Predicates = [prd, HasVLX] in {
|
|
|
|
defm Z256 : avx512_movnt<opc, OpcodeStr, st_frag,
|
|
|
|
!cast<ValueType>("v"##vsz256##elty##elsz), VR256X,
|
|
|
|
!cast<X86MemOperand>(elty##"256mem"), d, itin>,
|
|
|
|
EVEX_V256;
|
|
|
|
|
|
|
|
defm Z128 : avx512_movnt<opc, OpcodeStr, st_frag,
|
|
|
|
!cast<ValueType>("v"##vsz128##elty##elsz), VR128X,
|
|
|
|
!cast<X86MemOperand>(elty##"128mem"), d, itin>,
|
|
|
|
EVEX_V128;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
defm VMOVNTDQ : avx512_movnt_vl<0xE7, "vmovntdq", alignednontemporalstore,
|
|
|
|
"i", "64", "8", "4", "2", SSEPackedInt,
|
|
|
|
HasAVX512>, PD, EVEX_CD8<64, CD8VF>;
|
|
|
|
|
|
|
|
defm VMOVNTPD : avx512_movnt_vl<0x2B, "vmovntpd", alignednontemporalstore,
|
|
|
|
"f", "64", "8", "4", "2", SSEPackedDouble,
|
|
|
|
HasAVX512>, PD, VEX_W, EVEX_CD8<64, CD8VF>;
|
|
|
|
|
|
|
|
defm VMOVNTPS : avx512_movnt_vl<0x2B, "vmovntps", alignednontemporalstore,
|
|
|
|
"f", "32", "16", "8", "4", SSEPackedSingle,
|
|
|
|
HasAVX512>, PS, EVEX_CD8<32, CD8VF>;
|
|
|
|
|
2013-09-17 15:34:34 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AVX-512 - Integer arithmetic
|
|
|
|
//
|
|
|
|
multiclass avx512_binop_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
2014-10-08 22:37:45 +08:00
|
|
|
X86VectorVTInfo _, OpndItins itins,
|
|
|
|
bit IsCommutable = 0> {
|
2014-10-09 07:25:39 +08:00
|
|
|
defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
|
2014-10-08 22:37:45 +08:00
|
|
|
(ins _.RC:$src1, _.RC:$src2), OpcodeStr##_.Suffix,
|
|
|
|
"$src2, $src1", "$src1, $src2",
|
|
|
|
(_.VT (OpNode _.RC:$src1, _.RC:$src2)),
|
2014-11-12 15:31:03 +08:00
|
|
|
"", itins.rr, IsCommutable>,
|
2014-10-08 22:37:45 +08:00
|
|
|
AVX512BIBase, EVEX_4V;
|
2014-03-27 17:45:08 +08:00
|
|
|
|
2014-10-14 22:36:19 +08:00
|
|
|
let mayLoad = 1 in
|
2014-10-09 07:25:39 +08:00
|
|
|
defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
|
2014-10-08 22:37:45 +08:00
|
|
|
(ins _.RC:$src1, _.MemOp:$src2), OpcodeStr##_.Suffix,
|
|
|
|
"$src2, $src1", "$src1, $src2",
|
|
|
|
(_.VT (OpNode _.RC:$src1,
|
|
|
|
(bitconvert (_.LdFrag addr:$src2)))),
|
2014-11-12 15:31:03 +08:00
|
|
|
"", itins.rm>,
|
2014-10-08 22:37:45 +08:00
|
|
|
AVX512BIBase, EVEX_4V;
|
2014-10-14 22:36:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
multiclass avx512_binop_rmb<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
|
|
|
X86VectorVTInfo _, OpndItins itins,
|
|
|
|
bit IsCommutable = 0> :
|
|
|
|
avx512_binop_rm<opc, OpcodeStr, OpNode, _, itins, IsCommutable> {
|
|
|
|
let mayLoad = 1 in
|
2014-10-09 07:25:39 +08:00
|
|
|
defm rmb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
|
2014-10-08 22:37:45 +08:00
|
|
|
(ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr##_.Suffix,
|
|
|
|
"${src2}"##_.BroadcastStr##", $src1",
|
|
|
|
"$src1, ${src2}"##_.BroadcastStr,
|
|
|
|
(_.VT (OpNode _.RC:$src1,
|
|
|
|
(X86VBroadcast
|
|
|
|
(_.ScalarLdFrag addr:$src2)))),
|
2014-11-12 15:31:03 +08:00
|
|
|
"", itins.rm>,
|
2014-10-08 22:37:45 +08:00
|
|
|
AVX512BIBase, EVEX_4V, EVEX_B;
|
2014-03-27 17:45:08 +08:00
|
|
|
}
|
|
|
|
|
2014-10-09 16:38:48 +08:00
|
|
|
multiclass avx512_binop_rm_vl<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
|
|
|
AVX512VLVectorVTInfo VTInfo, OpndItins itins,
|
|
|
|
Predicate prd, bit IsCommutable = 0> {
|
|
|
|
let Predicates = [prd] in
|
|
|
|
defm Z : avx512_binop_rm<opc, OpcodeStr, OpNode, VTInfo.info512, itins,
|
|
|
|
IsCommutable>, EVEX_V512;
|
|
|
|
|
|
|
|
let Predicates = [prd, HasVLX] in {
|
|
|
|
defm Z256 : avx512_binop_rm<opc, OpcodeStr, OpNode, VTInfo.info256, itins,
|
|
|
|
IsCommutable>, EVEX_V256;
|
|
|
|
defm Z128 : avx512_binop_rm<opc, OpcodeStr, OpNode, VTInfo.info128, itins,
|
|
|
|
IsCommutable>, EVEX_V128;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-14 22:36:19 +08:00
|
|
|
multiclass avx512_binop_rmb_vl<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
|
|
|
AVX512VLVectorVTInfo VTInfo, OpndItins itins,
|
|
|
|
Predicate prd, bit IsCommutable = 0> {
|
|
|
|
let Predicates = [prd] in
|
|
|
|
defm Z : avx512_binop_rmb<opc, OpcodeStr, OpNode, VTInfo.info512, itins,
|
|
|
|
IsCommutable>, EVEX_V512;
|
|
|
|
|
|
|
|
let Predicates = [prd, HasVLX] in {
|
|
|
|
defm Z256 : avx512_binop_rmb<opc, OpcodeStr, OpNode, VTInfo.info256, itins,
|
|
|
|
IsCommutable>, EVEX_V256;
|
|
|
|
defm Z128 : avx512_binop_rmb<opc, OpcodeStr, OpNode, VTInfo.info128, itins,
|
|
|
|
IsCommutable>, EVEX_V128;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass avx512_binop_rm_vl_q<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
|
|
|
OpndItins itins, Predicate prd,
|
|
|
|
bit IsCommutable = 0> {
|
|
|
|
defm NAME : avx512_binop_rmb_vl<opc, OpcodeStr, OpNode, avx512vl_i64_info,
|
|
|
|
itins, prd, IsCommutable>,
|
|
|
|
VEX_W, EVEX_CD8<64, CD8VF>;
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass avx512_binop_rm_vl_d<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
|
|
|
OpndItins itins, Predicate prd,
|
|
|
|
bit IsCommutable = 0> {
|
|
|
|
defm NAME : avx512_binop_rmb_vl<opc, OpcodeStr, OpNode, avx512vl_i32_info,
|
|
|
|
itins, prd, IsCommutable>, EVEX_CD8<32, CD8VF>;
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass avx512_binop_rm_vl_w<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
|
|
|
OpndItins itins, Predicate prd,
|
|
|
|
bit IsCommutable = 0> {
|
|
|
|
defm NAME : avx512_binop_rm_vl<opc, OpcodeStr, OpNode, avx512vl_i16_info,
|
|
|
|
itins, prd, IsCommutable>, EVEX_CD8<16, CD8VF>;
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass avx512_binop_rm_vl_b<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
|
|
|
OpndItins itins, Predicate prd,
|
|
|
|
bit IsCommutable = 0> {
|
|
|
|
defm NAME : avx512_binop_rm_vl<opc, OpcodeStr, OpNode, avx512vl_i8_info,
|
|
|
|
itins, prd, IsCommutable>, EVEX_CD8<8, CD8VF>;
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass avx512_binop_rm_vl_dq<bits<8> opc_d, bits<8> opc_q, string OpcodeStr,
|
|
|
|
SDNode OpNode, OpndItins itins, Predicate prd,
|
|
|
|
bit IsCommutable = 0> {
|
|
|
|
defm Q : avx512_binop_rm_vl_q<opc_q, OpcodeStr, OpNode, itins, prd,
|
|
|
|
IsCommutable>;
|
|
|
|
|
|
|
|
defm D : avx512_binop_rm_vl_d<opc_d, OpcodeStr, OpNode, itins, prd,
|
|
|
|
IsCommutable>;
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass avx512_binop_rm_vl_bw<bits<8> opc_b, bits<8> opc_w, string OpcodeStr,
|
|
|
|
SDNode OpNode, OpndItins itins, Predicate prd,
|
|
|
|
bit IsCommutable = 0> {
|
|
|
|
defm W : avx512_binop_rm_vl_w<opc_w, OpcodeStr, OpNode, itins, prd,
|
|
|
|
IsCommutable>;
|
|
|
|
|
|
|
|
defm B : avx512_binop_rm_vl_b<opc_b, OpcodeStr, OpNode, itins, prd,
|
|
|
|
IsCommutable>;
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass avx512_binop_rm_vl_all<bits<8> opc_b, bits<8> opc_w,
|
|
|
|
bits<8> opc_d, bits<8> opc_q,
|
|
|
|
string OpcodeStr, SDNode OpNode,
|
|
|
|
OpndItins itins, bit IsCommutable = 0> {
|
|
|
|
defm NAME : avx512_binop_rm_vl_dq<opc_d, opc_q, OpcodeStr, OpNode,
|
|
|
|
itins, HasAVX512, IsCommutable>,
|
|
|
|
avx512_binop_rm_vl_bw<opc_b, opc_w, OpcodeStr, OpNode,
|
|
|
|
itins, HasBWI, IsCommutable>;
|
|
|
|
}
|
|
|
|
|
2014-03-27 17:45:08 +08:00
|
|
|
multiclass avx512_binop_rm2<bits<8> opc, string OpcodeStr, ValueType DstVT,
|
|
|
|
ValueType SrcVT, RegisterClass KRC, RegisterClass RC,
|
|
|
|
PatFrag memop_frag, X86MemOperand x86memop,
|
|
|
|
PatFrag scalar_mfrag, X86MemOperand x86scalar_mop,
|
|
|
|
string BrdcstStr, OpndItins itins, bit IsCommutable = 0> {
|
2013-09-17 15:34:34 +08:00
|
|
|
let isCommutable = IsCommutable in
|
2014-03-27 17:45:08 +08:00
|
|
|
{
|
|
|
|
def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
|
2013-09-17 15:34:34 +08:00
|
|
|
(ins RC:$src1, RC:$src2),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2014-03-27 17:45:08 +08:00
|
|
|
[]>, EVEX_4V;
|
|
|
|
def rrk : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
|
|
|
|
(ins KRC:$mask, RC:$src1, RC:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
|
2014-03-27 17:45:08 +08:00
|
|
|
[], itins.rr>, EVEX_4V, EVEX_K;
|
|
|
|
def rrkz : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
|
|
|
|
(ins KRC:$mask, RC:$src1, RC:$src2),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst {${mask}} {z}" ,
|
2014-03-27 17:45:08 +08:00
|
|
|
"|$dst {${mask}} {z}, $src1, $src2}"),
|
|
|
|
[], itins.rr>, EVEX_4V, EVEX_KZ;
|
|
|
|
}
|
|
|
|
let mayLoad = 1 in {
|
|
|
|
def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, x86memop:$src2),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2014-03-27 17:45:08 +08:00
|
|
|
[]>, EVEX_4V;
|
|
|
|
def rmk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins KRC:$mask, RC:$src1, x86memop:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
|
2014-03-27 17:45:08 +08:00
|
|
|
[], itins.rm>, EVEX_4V, EVEX_K;
|
|
|
|
def rmkz : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins KRC:$mask, RC:$src1, x86memop:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src2, $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, $src2}"),
|
2014-03-27 17:45:08 +08:00
|
|
|
[], itins.rm>, EVEX_4V, EVEX_KZ;
|
|
|
|
def rmb : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, x86scalar_mop:$src2),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{${src2}", BrdcstStr,
|
2014-03-27 17:45:08 +08:00
|
|
|
", $src1, $dst|$dst, $src1, ${src2}", BrdcstStr, "}"),
|
|
|
|
[], itins.rm>, EVEX_4V, EVEX_B;
|
|
|
|
def rmbk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins KRC:$mask, RC:$src1, x86scalar_mop:$src2),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{${src2}", BrdcstStr,
|
2014-03-27 17:45:08 +08:00
|
|
|
", $src1, $dst {${mask}}|$dst {${mask}}, $src1, ${src2}",
|
|
|
|
BrdcstStr, "}"),
|
|
|
|
[], itins.rm>, EVEX_4V, EVEX_B, EVEX_K;
|
|
|
|
def rmbkz : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins KRC:$mask, RC:$src1, x86scalar_mop:$src2),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{${src2}", BrdcstStr,
|
2014-03-27 17:45:08 +08:00
|
|
|
", $src1, $dst {${mask}} {z}|$dst {${mask}} {z}, $src1, ${src2}",
|
|
|
|
BrdcstStr, "}"),
|
|
|
|
[], itins.rm>, EVEX_4V, EVEX_B, EVEX_KZ;
|
|
|
|
}
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
2014-10-14 22:36:19 +08:00
|
|
|
defm VPADD : avx512_binop_rm_vl_all<0xFC, 0xFD, 0xFE, 0xD4, "vpadd", add,
|
|
|
|
SSE_INTALU_ITINS_P, 1>;
|
|
|
|
defm VPSUB : avx512_binop_rm_vl_all<0xF8, 0xF9, 0xFA, 0xFB, "vpsub", sub,
|
|
|
|
SSE_INTALU_ITINS_P, 0>;
|
|
|
|
defm VPMULLD : avx512_binop_rm_vl_d<0x40, "vpmull", mul,
|
|
|
|
SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
|
|
|
|
defm VPMULLW : avx512_binop_rm_vl_w<0xD5, "vpmull", mul,
|
|
|
|
SSE_INTALU_ITINS_P, HasBWI, 1>;
|
2014-10-14 23:13:56 +08:00
|
|
|
defm VPMULLQ : avx512_binop_rm_vl_q<0x40, "vpmull", mul,
|
|
|
|
SSE_INTALU_ITINS_P, HasDQI, 1>, T8PD;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
2014-03-27 17:45:08 +08:00
|
|
|
defm VPMULDQZ : avx512_binop_rm2<0x28, "vpmuldq", v8i64, v16i32, VK8WM, VR512,
|
|
|
|
memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
|
|
|
|
SSE_INTALU_ITINS_P, 1>, T8PD, EVEX_V512,
|
|
|
|
EVEX_CD8<64, CD8VF>, VEX_W;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
2014-03-27 17:45:08 +08:00
|
|
|
defm VPMULUDQZ : avx512_binop_rm2<0xF4, "vpmuludq", v8i64, v16i32, VK8WM, VR512,
|
|
|
|
memopv8i64, i512mem, loadi64, i64mem, "{1to8}",
|
|
|
|
SSE_INTMUL_ITINS_P, 1>, EVEX_V512, EVEX_CD8<64, CD8VF>, VEX_W;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
def : Pat<(v8i64 (X86pmuludq (v16i32 VR512:$src1), (v16i32 VR512:$src2))),
|
|
|
|
(VPMULUDQZrr VR512:$src1, VR512:$src2)>;
|
|
|
|
|
2014-01-08 18:54:22 +08:00
|
|
|
def : Pat<(v8i64 (int_x86_avx512_mask_pmulu_dq_512 (v16i32 VR512:$src1),
|
|
|
|
(v16i32 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
|
|
|
|
(VPMULUDQZrr VR512:$src1, VR512:$src2)>;
|
|
|
|
def : Pat<(v8i64 (int_x86_avx512_mask_pmul_dq_512 (v16i32 VR512:$src1),
|
|
|
|
(v16i32 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
|
|
|
|
(VPMULDQZrr VR512:$src1, VR512:$src2)>;
|
|
|
|
|
2014-10-14 22:36:19 +08:00
|
|
|
defm VPMAXSB : avx512_binop_rm_vl_b<0x3C, "vpmaxs", X86smax,
|
|
|
|
SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD;
|
|
|
|
defm VPMAXSW : avx512_binop_rm_vl_w<0xEE, "vpmaxs", X86smax,
|
|
|
|
SSE_INTALU_ITINS_P, HasBWI, 1>;
|
|
|
|
defm VPMAXS : avx512_binop_rm_vl_dq<0x3D, 0x3D, "vpmaxs", X86smax,
|
|
|
|
SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
|
|
|
|
|
|
|
|
defm VPMAXUB : avx512_binop_rm_vl_b<0xDE, "vpmaxu", X86umax,
|
|
|
|
SSE_INTALU_ITINS_P, HasBWI, 1>;
|
|
|
|
defm VPMAXUW : avx512_binop_rm_vl_w<0x3E, "vpmaxu", X86umax,
|
|
|
|
SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD;
|
|
|
|
defm VPMAXU : avx512_binop_rm_vl_dq<0x3F, 0x3F, "vpmaxu", X86umax,
|
|
|
|
SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
|
|
|
|
|
|
|
|
defm VPMINSB : avx512_binop_rm_vl_b<0x38, "vpmins", X86smin,
|
|
|
|
SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD;
|
|
|
|
defm VPMINSW : avx512_binop_rm_vl_w<0xEA, "vpmins", X86smin,
|
|
|
|
SSE_INTALU_ITINS_P, HasBWI, 1>;
|
|
|
|
defm VPMINS : avx512_binop_rm_vl_dq<0x39, 0x39, "vpmins", X86smin,
|
|
|
|
SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
|
|
|
|
|
|
|
|
defm VPMINUB : avx512_binop_rm_vl_b<0xDA, "vpminu", X86umin,
|
|
|
|
SSE_INTALU_ITINS_P, HasBWI, 1>;
|
|
|
|
defm VPMINUW : avx512_binop_rm_vl_w<0x3A, "vpminu", X86umin,
|
|
|
|
SSE_INTALU_ITINS_P, HasBWI, 1>, T8PD;
|
|
|
|
defm VPMINU : avx512_binop_rm_vl_dq<0x3B, 0x3B, "vpminu", X86umin,
|
|
|
|
SSE_INTALU_ITINS_P, HasAVX512, 1>, T8PD;
|
2013-10-27 16:18:37 +08:00
|
|
|
|
2014-01-08 18:54:22 +08:00
|
|
|
def : Pat <(v16i32 (int_x86_avx512_mask_pmaxs_d_512 (v16i32 VR512:$src1),
|
|
|
|
(v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
|
|
|
|
(VPMAXSDZrr VR512:$src1, VR512:$src2)>;
|
|
|
|
def : Pat <(v16i32 (int_x86_avx512_mask_pmaxu_d_512 (v16i32 VR512:$src1),
|
|
|
|
(v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
|
|
|
|
(VPMAXUDZrr VR512:$src1, VR512:$src2)>;
|
|
|
|
def : Pat <(v8i64 (int_x86_avx512_mask_pmaxs_q_512 (v8i64 VR512:$src1),
|
|
|
|
(v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
|
|
|
|
(VPMAXSQZrr VR512:$src1, VR512:$src2)>;
|
|
|
|
def : Pat <(v8i64 (int_x86_avx512_mask_pmaxu_q_512 (v8i64 VR512:$src1),
|
|
|
|
(v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
|
|
|
|
(VPMAXUQZrr VR512:$src1, VR512:$src2)>;
|
|
|
|
def : Pat <(v16i32 (int_x86_avx512_mask_pmins_d_512 (v16i32 VR512:$src1),
|
|
|
|
(v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
|
|
|
|
(VPMINSDZrr VR512:$src1, VR512:$src2)>;
|
|
|
|
def : Pat <(v16i32 (int_x86_avx512_mask_pminu_d_512 (v16i32 VR512:$src1),
|
|
|
|
(v16i32 VR512:$src2), (v16i32 immAllZerosV), (i16 -1))),
|
|
|
|
(VPMINUDZrr VR512:$src1, VR512:$src2)>;
|
|
|
|
def : Pat <(v8i64 (int_x86_avx512_mask_pmins_q_512 (v8i64 VR512:$src1),
|
|
|
|
(v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
|
|
|
|
(VPMINSQZrr VR512:$src1, VR512:$src2)>;
|
|
|
|
def : Pat <(v8i64 (int_x86_avx512_mask_pminu_q_512 (v8i64 VR512:$src1),
|
|
|
|
(v8i64 VR512:$src2), (bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
|
|
|
|
(VPMINUQZrr VR512:$src1, VR512:$src2)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AVX-512 - Unpack Instructions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
multiclass avx512_unpack_fp<bits<8> opc, SDNode OpNode, ValueType vt,
|
|
|
|
PatFrag mem_frag, RegisterClass RC,
|
|
|
|
X86MemOperand x86memop, string asm,
|
|
|
|
Domain d> {
|
|
|
|
def rr : AVX512PI<opc, MRMSrcReg,
|
|
|
|
(outs RC:$dst), (ins RC:$src1, RC:$src2),
|
|
|
|
asm, [(set RC:$dst,
|
|
|
|
(vt (OpNode RC:$src1, RC:$src2)))],
|
2013-10-02 14:39:07 +08:00
|
|
|
d>, EVEX_4V;
|
2013-09-17 15:34:34 +08:00
|
|
|
def rm : AVX512PI<opc, MRMSrcMem,
|
|
|
|
(outs RC:$dst), (ins RC:$src1, x86memop:$src2),
|
|
|
|
asm, [(set RC:$dst,
|
|
|
|
(vt (OpNode RC:$src1,
|
|
|
|
(bitconvert (mem_frag addr:$src2)))))],
|
2013-10-02 14:39:07 +08:00
|
|
|
d>, EVEX_4V;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
defm VUNPCKHPSZ: avx512_unpack_fp<0x15, X86Unpckh, v16f32, memopv8f64,
|
|
|
|
VR512, f512mem, "vunpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
2014-02-18 08:21:49 +08:00
|
|
|
SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
|
2013-09-17 15:34:34 +08:00
|
|
|
defm VUNPCKHPDZ: avx512_unpack_fp<0x15, X86Unpckh, v8f64, memopv8f64,
|
|
|
|
VR512, f512mem, "vunpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
2014-01-14 15:41:20 +08:00
|
|
|
SSEPackedDouble>, PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
|
2013-09-17 15:34:34 +08:00
|
|
|
defm VUNPCKLPSZ: avx512_unpack_fp<0x14, X86Unpckl, v16f32, memopv8f64,
|
|
|
|
VR512, f512mem, "vunpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
2014-02-18 08:21:49 +08:00
|
|
|
SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
|
2013-09-17 15:34:34 +08:00
|
|
|
defm VUNPCKLPDZ: avx512_unpack_fp<0x14, X86Unpckl, v8f64, memopv8f64,
|
|
|
|
VR512, f512mem, "vunpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
2014-01-14 15:41:20 +08:00
|
|
|
SSEPackedDouble>, PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
multiclass avx512_unpack_int<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
|
|
|
ValueType OpVT, RegisterClass RC, PatFrag memop_frag,
|
|
|
|
X86MemOperand x86memop> {
|
|
|
|
def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, RC:$src2),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1), (OpVT RC:$src2))))],
|
|
|
|
IIC_SSE_UNPCK>, EVEX_4V;
|
|
|
|
def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, x86memop:$src2),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set RC:$dst, (OpVT (OpNode (OpVT RC:$src1),
|
|
|
|
(bitconvert (memop_frag addr:$src2)))))],
|
|
|
|
IIC_SSE_UNPCK>, EVEX_4V;
|
|
|
|
}
|
|
|
|
defm VPUNPCKLDQZ : avx512_unpack_int<0x62, "vpunpckldq", X86Unpckl, v16i32,
|
|
|
|
VR512, memopv16i32, i512mem>, EVEX_V512,
|
|
|
|
EVEX_CD8<32, CD8VF>;
|
|
|
|
defm VPUNPCKLQDQZ : avx512_unpack_int<0x6C, "vpunpcklqdq", X86Unpckl, v8i64,
|
|
|
|
VR512, memopv8i64, i512mem>, EVEX_V512,
|
|
|
|
VEX_W, EVEX_CD8<64, CD8VF>;
|
|
|
|
defm VPUNPCKHDQZ : avx512_unpack_int<0x6A, "vpunpckhdq", X86Unpckh, v16i32,
|
|
|
|
VR512, memopv16i32, i512mem>, EVEX_V512,
|
|
|
|
EVEX_CD8<32, CD8VF>;
|
|
|
|
defm VPUNPCKHQDQZ : avx512_unpack_int<0x6D, "vpunpckhqdq", X86Unpckh, v8i64,
|
|
|
|
VR512, memopv8i64, i512mem>, EVEX_V512,
|
|
|
|
VEX_W, EVEX_CD8<64, CD8VF>;
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AVX-512 - PSHUFD
|
|
|
|
//
|
|
|
|
|
|
|
|
multiclass avx512_pshuf_imm<bits<8> opc, string OpcodeStr, RegisterClass RC,
|
|
|
|
SDNode OpNode, PatFrag mem_frag,
|
|
|
|
X86MemOperand x86memop, ValueType OpVT> {
|
|
|
|
def ri : AVX512Ii8<opc, MRMSrcReg, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, i8imm:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set RC:$dst,
|
|
|
|
(OpVT (OpNode RC:$src1, (i8 imm:$src2))))]>,
|
|
|
|
EVEX;
|
|
|
|
def mi : AVX512Ii8<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins x86memop:$src1, i8imm:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set RC:$dst,
|
|
|
|
(OpVT (OpNode (mem_frag addr:$src1),
|
|
|
|
(i8 imm:$src2))))]>, EVEX;
|
|
|
|
}
|
|
|
|
|
|
|
|
defm VPSHUFDZ : avx512_pshuf_imm<0x70, "vpshufd", VR512, X86PShufd, memopv16i32,
|
2014-01-14 15:41:20 +08:00
|
|
|
i512mem, v16i32>, PD, EVEX_V512, EVEX_CD8<32, CD8VF>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AVX-512 Logical Instructions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2014-10-14 22:36:19 +08:00
|
|
|
defm VPAND : avx512_binop_rm_vl_dq<0xDB, 0xDB, "vpand", and,
|
|
|
|
SSE_INTALU_ITINS_P, HasAVX512, 1>;
|
|
|
|
defm VPOR : avx512_binop_rm_vl_dq<0xEB, 0xEB, "vpor", or,
|
|
|
|
SSE_INTALU_ITINS_P, HasAVX512, 1>;
|
|
|
|
defm VPXOR : avx512_binop_rm_vl_dq<0xEF, 0xEF, "vpxor", xor,
|
|
|
|
SSE_INTALU_ITINS_P, HasAVX512, 1>;
|
|
|
|
defm VPANDN : avx512_binop_rm_vl_dq<0xDF, 0xDF, "vpandn", X86andnp,
|
|
|
|
SSE_INTALU_ITINS_P, HasAVX512, 1>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AVX-512 FP arithmetic
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
multiclass avx512_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
|
|
|
SizeItins itins> {
|
2013-12-11 22:31:04 +08:00
|
|
|
defm SSZ : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"), OpNode, FR32X,
|
2013-09-17 15:34:34 +08:00
|
|
|
f32mem, itins.s, 0>, XS, EVEX_4V, VEX_LIG,
|
|
|
|
EVEX_CD8<32, CD8VT1>;
|
2013-12-11 22:31:04 +08:00
|
|
|
defm SDZ : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "sd"), OpNode, FR64X,
|
2013-09-17 15:34:34 +08:00
|
|
|
f64mem, itins.d, 0>, XD, VEX_W, EVEX_4V, VEX_LIG,
|
|
|
|
EVEX_CD8<64, CD8VT1>;
|
|
|
|
}
|
|
|
|
|
|
|
|
let isCommutable = 1 in {
|
|
|
|
defm VADD : avx512_binop_s<0x58, "add", fadd, SSE_ALU_ITINS_S>;
|
|
|
|
defm VMUL : avx512_binop_s<0x59, "mul", fmul, SSE_ALU_ITINS_S>;
|
|
|
|
defm VMIN : avx512_binop_s<0x5D, "min", X86fmin, SSE_ALU_ITINS_S>;
|
|
|
|
defm VMAX : avx512_binop_s<0x5F, "max", X86fmax, SSE_ALU_ITINS_S>;
|
|
|
|
}
|
|
|
|
let isCommutable = 0 in {
|
|
|
|
defm VSUB : avx512_binop_s<0x5C, "sub", fsub, SSE_ALU_ITINS_S>;
|
|
|
|
defm VDIV : avx512_binop_s<0x5E, "div", fdiv, SSE_ALU_ITINS_S>;
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass avx512_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
2014-10-29 23:43:02 +08:00
|
|
|
X86VectorVTInfo _, bit IsCommutable> {
|
|
|
|
defm rr: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
|
|
|
|
(ins _.RC:$src1, _.RC:$src2), OpcodeStr##_.Suffix,
|
|
|
|
"$src2, $src1", "$src1, $src2",
|
|
|
|
(_.VT (OpNode _.RC:$src1, _.RC:$src2))>, EVEX_4V;
|
2013-09-17 15:34:34 +08:00
|
|
|
let mayLoad = 1 in {
|
2014-10-29 23:43:02 +08:00
|
|
|
defm rm: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
|
|
|
|
(ins _.RC:$src1, _.MemOp:$src2), OpcodeStr##_.Suffix,
|
|
|
|
"$src2, $src1", "$src1, $src2",
|
|
|
|
(OpNode _.RC:$src1, (_.LdFrag addr:$src2))>, EVEX_4V;
|
|
|
|
defm rmb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
|
|
|
|
(ins _.RC:$src1, _.ScalarMemOp:$src2), OpcodeStr##_.Suffix,
|
|
|
|
"${src2}"##_.BroadcastStr##", $src1",
|
|
|
|
"$src1, ${src2}"##_.BroadcastStr,
|
|
|
|
(OpNode _.RC:$src1, (_.VT (X86VBroadcast
|
|
|
|
(_.ScalarLdFrag addr:$src2))))>,
|
|
|
|
EVEX_4V, EVEX_B;
|
|
|
|
}//let mayLoad = 1
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass avx512_fp_binop_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
|
|
|
bit IsCommutable = 0> {
|
|
|
|
defm PSZ : avx512_fp_packed<opc, OpcodeStr, OpNode, v16f32_info,
|
|
|
|
IsCommutable>, EVEX_V512, PS,
|
|
|
|
EVEX_CD8<32, CD8VF>;
|
|
|
|
defm PDZ : avx512_fp_packed<opc, OpcodeStr, OpNode, v8f64_info,
|
|
|
|
IsCommutable>, EVEX_V512, PD, VEX_W,
|
|
|
|
EVEX_CD8<64, CD8VF>;
|
|
|
|
|
|
|
|
// Define only if AVX512VL feature is present.
|
|
|
|
let Predicates = [HasVLX] in {
|
|
|
|
defm PSZ128 : avx512_fp_packed<opc, OpcodeStr, OpNode, v4f32x_info,
|
|
|
|
IsCommutable>, EVEX_V128, PS,
|
|
|
|
EVEX_CD8<32, CD8VF>;
|
|
|
|
defm PSZ256 : avx512_fp_packed<opc, OpcodeStr, OpNode, v8f32x_info,
|
|
|
|
IsCommutable>, EVEX_V256, PS,
|
|
|
|
EVEX_CD8<32, CD8VF>;
|
|
|
|
defm PDZ128 : avx512_fp_packed<opc, OpcodeStr, OpNode, v2f64x_info,
|
|
|
|
IsCommutable>, EVEX_V128, PD, VEX_W,
|
|
|
|
EVEX_CD8<64, CD8VF>;
|
|
|
|
defm PDZ256 : avx512_fp_packed<opc, OpcodeStr, OpNode, v4f64x_info,
|
|
|
|
IsCommutable>, EVEX_V256, PD, VEX_W,
|
|
|
|
EVEX_CD8<64, CD8VF>;
|
2014-03-06 16:45:30 +08:00
|
|
|
}
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
2014-10-29 23:43:02 +08:00
|
|
|
defm VADD : avx512_fp_binop_p<0x58, "vadd", fadd, 1>;
|
|
|
|
defm VMUL : avx512_fp_binop_p<0x59, "vmul", fmul, 1>;
|
|
|
|
defm VMIN : avx512_fp_binop_p<0x5D, "vmin", X86fmin, 1>;
|
|
|
|
defm VMAX : avx512_fp_binop_p<0x5F, "vmax", X86fmax, 1>;
|
|
|
|
defm VSUB : avx512_fp_binop_p<0x5C, "vsub", fsub>;
|
|
|
|
defm VDIV : avx512_fp_binop_p<0x5E, "vdiv", fdiv>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
2014-01-05 18:46:09 +08:00
|
|
|
def : Pat<(v16f32 (int_x86_avx512_mask_max_ps_512 (v16f32 VR512:$src1),
|
|
|
|
(v16f32 VR512:$src2), (bc_v16f32 (v16i32 immAllZerosV)),
|
|
|
|
(i16 -1), FROUND_CURRENT)),
|
|
|
|
(VMAXPSZrr VR512:$src1, VR512:$src2)>;
|
|
|
|
|
|
|
|
def : Pat<(v8f64 (int_x86_avx512_mask_max_pd_512 (v8f64 VR512:$src1),
|
|
|
|
(v8f64 VR512:$src2), (bc_v8f64 (v16i32 immAllZerosV)),
|
|
|
|
(i8 -1), FROUND_CURRENT)),
|
|
|
|
(VMAXPDZrr VR512:$src1, VR512:$src2)>;
|
|
|
|
|
|
|
|
def : Pat<(v16f32 (int_x86_avx512_mask_min_ps_512 (v16f32 VR512:$src1),
|
|
|
|
(v16f32 VR512:$src2), (bc_v16f32 (v16i32 immAllZerosV)),
|
|
|
|
(i16 -1), FROUND_CURRENT)),
|
|
|
|
(VMINPSZrr VR512:$src1, VR512:$src2)>;
|
|
|
|
|
|
|
|
def : Pat<(v8f64 (int_x86_avx512_mask_min_pd_512 (v8f64 VR512:$src1),
|
|
|
|
(v8f64 VR512:$src2), (bc_v8f64 (v16i32 immAllZerosV)),
|
|
|
|
(i8 -1), FROUND_CURRENT)),
|
|
|
|
(VMINPDZrr VR512:$src1, VR512:$src2)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AVX-512 VPTESTM instructions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
multiclass avx512_vptest<bits<8> opc, string OpcodeStr, RegisterClass KRC,
|
|
|
|
RegisterClass RC, X86MemOperand x86memop, PatFrag memop_frag,
|
|
|
|
SDNode OpNode, ValueType vt> {
|
2014-02-05 15:05:03 +08:00
|
|
|
def rr : AVX512PI<opc, MRMSrcReg,
|
2013-09-17 15:34:34 +08:00
|
|
|
(outs KRC:$dst), (ins RC:$src1, RC:$src2),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2014-02-05 15:05:03 +08:00
|
|
|
[(set KRC:$dst, (OpNode (vt RC:$src1), (vt RC:$src2)))],
|
|
|
|
SSEPackedInt>, EVEX_4V;
|
|
|
|
def rm : AVX512PI<opc, MRMSrcMem,
|
2013-09-17 15:34:34 +08:00
|
|
|
(outs KRC:$dst), (ins RC:$src1, x86memop:$src2),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set KRC:$dst, (OpNode (vt RC:$src1),
|
2014-02-05 15:05:03 +08:00
|
|
|
(bitconvert (memop_frag addr:$src2))))], SSEPackedInt>, EVEX_4V;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
defm VPTESTMDZ : avx512_vptest<0x27, "vptestmd", VK16, VR512, f512mem,
|
2014-02-05 15:05:03 +08:00
|
|
|
memopv16i32, X86testm, v16i32>, T8PD, EVEX_V512,
|
2013-09-17 15:34:34 +08:00
|
|
|
EVEX_CD8<32, CD8VF>;
|
|
|
|
defm VPTESTMQZ : avx512_vptest<0x27, "vptestmq", VK8, VR512, f512mem,
|
2014-02-23 22:28:35 +08:00
|
|
|
memopv8i64, X86testm, v8i64>, T8PD, EVEX_V512, VEX_W,
|
2013-09-17 15:34:34 +08:00
|
|
|
EVEX_CD8<64, CD8VF>;
|
|
|
|
|
2014-02-05 15:05:03 +08:00
|
|
|
let Predicates = [HasCDI] in {
|
|
|
|
defm VPTESTNMDZ : avx512_vptest<0x27, "vptestnmd", VK16, VR512, f512mem,
|
|
|
|
memopv16i32, X86testnm, v16i32>, T8XS, EVEX_V512,
|
|
|
|
EVEX_CD8<32, CD8VF>;
|
|
|
|
defm VPTESTNMQZ : avx512_vptest<0x27, "vptestnmq", VK8, VR512, f512mem,
|
2014-02-23 22:28:35 +08:00
|
|
|
memopv8i64, X86testnm, v8i64>, T8XS, EVEX_V512, VEX_W,
|
2014-02-05 15:05:03 +08:00
|
|
|
EVEX_CD8<64, CD8VF>;
|
|
|
|
}
|
|
|
|
|
2014-01-13 20:55:03 +08:00
|
|
|
def : Pat <(i16 (int_x86_avx512_mask_ptestm_d_512 (v16i32 VR512:$src1),
|
|
|
|
(v16i32 VR512:$src2), (i16 -1))),
|
|
|
|
(COPY_TO_REGCLASS (VPTESTMDZrr VR512:$src1, VR512:$src2), GR16)>;
|
|
|
|
|
|
|
|
def : Pat <(i8 (int_x86_avx512_mask_ptestm_q_512 (v8i64 VR512:$src1),
|
|
|
|
(v8i64 VR512:$src2), (i8 -1))),
|
2014-02-23 22:28:35 +08:00
|
|
|
(COPY_TO_REGCLASS (VPTESTMQZrr VR512:$src1, VR512:$src2), GR8)>;
|
2014-11-26 04:41:51 +08:00
|
|
|
|
2013-09-17 15:34:34 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AVX-512 Shift instructions
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
multiclass avx512_shift_rmi<bits<8> opc, Format ImmFormR, Format ImmFormM,
|
2014-11-14 23:43:00 +08:00
|
|
|
string OpcodeStr, SDNode OpNode, X86VectorVTInfo _> {
|
|
|
|
defm ri : AVX512_maskable<opc, ImmFormR, _, (outs _.RC:$dst),
|
|
|
|
(ins _.RC:$src1, i8imm:$src2), OpcodeStr,
|
|
|
|
"$src2, $src1", "$src1, $src2",
|
|
|
|
(_.VT (OpNode _.RC:$src1, (i8 imm:$src2))),
|
|
|
|
" ", SSE_INTSHIFT_ITINS_P.rr>, AVX512BIi8Base, EVEX_4V;
|
|
|
|
defm mi : AVX512_maskable<opc, ImmFormM, _, (outs _.RC:$dst),
|
|
|
|
(ins _.MemOp:$src1, i8imm:$src2), OpcodeStr,
|
|
|
|
"$src2, $src1", "$src1, $src2",
|
|
|
|
(_.VT (OpNode (_.MemOpFrag addr:$src1), (i8 imm:$src2))),
|
|
|
|
" ", SSE_INTSHIFT_ITINS_P.rm>, AVX512BIi8Base, EVEX_4V;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
multiclass avx512_shift_rrm<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
2014-11-26 04:41:51 +08:00
|
|
|
ValueType SrcVT, PatFrag bc_frag, X86VectorVTInfo _> {
|
|
|
|
// src2 is always 128-bit
|
|
|
|
defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
|
|
|
|
(ins _.RC:$src1, VR128X:$src2), OpcodeStr,
|
|
|
|
"$src2, $src1", "$src1, $src2",
|
|
|
|
(_.VT (OpNode _.RC:$src1, (SrcVT VR128X:$src2))),
|
|
|
|
" ", SSE_INTSHIFT_ITINS_P.rr>, AVX512BIBase, EVEX_4V;
|
|
|
|
defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
|
|
|
|
(ins _.RC:$src1, i128mem:$src2), OpcodeStr,
|
|
|
|
"$src2, $src1", "$src1, $src2",
|
|
|
|
(_.VT (OpNode _.RC:$src1, (bc_frag (memopv2i64 addr:$src2)))),
|
|
|
|
" ", SSE_INTSHIFT_ITINS_P.rm>, AVX512BIBase, EVEX_4V;
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass avx512_varshift_sizes<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
|
|
|
ValueType SrcVT, PatFrag bc_frag, X86VectorVTInfo _> {
|
|
|
|
defm Z : avx512_shift_rrm<opc, OpcodeStr, OpNode, SrcVT, bc_frag, _>, EVEX_V512;
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass avx512_varshift_types<bits<8> opcd, bits<8> opcq, string OpcodeStr,
|
|
|
|
SDNode OpNode> {
|
|
|
|
defm D : avx512_varshift_sizes<opcd, OpcodeStr#"d", OpNode, v4i32, bc_v4i32,
|
|
|
|
v16i32_info>, EVEX_CD8<32, CD8VQ>;
|
|
|
|
defm Q : avx512_varshift_sizes<opcq, OpcodeStr#"q", OpNode, v2i64, bc_v2i64,
|
|
|
|
v8i64_info>, EVEX_CD8<64, CD8VQ>, VEX_W;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
defm VPSRLDZ : avx512_shift_rmi<0x72, MRM2r, MRM2m, "vpsrld", X86vsrli,
|
2014-11-14 23:43:00 +08:00
|
|
|
v16i32_info>,
|
2013-09-17 15:34:34 +08:00
|
|
|
EVEX_V512, EVEX_CD8<32, CD8VF>;
|
|
|
|
defm VPSRLQZ : avx512_shift_rmi<0x73, MRM2r, MRM2m, "vpsrlq", X86vsrli,
|
2014-11-14 23:43:00 +08:00
|
|
|
v8i64_info>, EVEX_V512,
|
2013-09-17 15:34:34 +08:00
|
|
|
EVEX_CD8<64, CD8VF>, VEX_W;
|
|
|
|
|
|
|
|
defm VPSLLDZ : avx512_shift_rmi<0x72, MRM6r, MRM6m, "vpslld", X86vshli,
|
2014-11-14 23:43:00 +08:00
|
|
|
v16i32_info>, EVEX_V512,
|
2013-09-17 15:34:34 +08:00
|
|
|
EVEX_CD8<32, CD8VF>;
|
|
|
|
defm VPSLLQZ : avx512_shift_rmi<0x73, MRM6r, MRM6m, "vpsllq", X86vshli,
|
2014-11-14 23:43:00 +08:00
|
|
|
v8i64_info>, EVEX_V512,
|
2013-09-17 15:34:34 +08:00
|
|
|
EVEX_CD8<64, CD8VF>, VEX_W;
|
|
|
|
|
|
|
|
defm VPSRADZ : avx512_shift_rmi<0x72, MRM4r, MRM4m, "vpsrad", X86vsrai,
|
2014-11-14 23:43:00 +08:00
|
|
|
v16i32_info>,
|
2013-09-17 15:34:34 +08:00
|
|
|
EVEX_V512, EVEX_CD8<32, CD8VF>;
|
|
|
|
defm VPSRAQZ : avx512_shift_rmi<0x72, MRM4r, MRM4m, "vpsraq", X86vsrai,
|
2014-11-14 23:43:00 +08:00
|
|
|
v8i64_info>, EVEX_V512,
|
2013-09-17 15:34:34 +08:00
|
|
|
EVEX_CD8<64, CD8VF>, VEX_W;
|
2014-11-26 04:41:51 +08:00
|
|
|
|
|
|
|
defm VPSRL : avx512_varshift_types<0xD2, 0xD3, "vpsrl", X86vsrl>;
|
|
|
|
defm VPSLL : avx512_varshift_types<0xF2, 0xF3, "vpsll", X86vshl>;
|
|
|
|
defm VPSRA : avx512_varshift_types<0xE2, 0xE2, "vpsra", X86vsra>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
//===-------------------------------------------------------------------===//
|
|
|
|
// Variable Bit Shifts
|
|
|
|
//===-------------------------------------------------------------------===//
|
|
|
|
multiclass avx512_var_shift<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
|
|
|
RegisterClass RC, ValueType vt,
|
|
|
|
X86MemOperand x86memop, PatFrag mem_frag> {
|
|
|
|
def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, RC:$src2),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set RC:$dst,
|
|
|
|
(vt (OpNode RC:$src1, (vt RC:$src2))))]>,
|
|
|
|
EVEX_4V;
|
|
|
|
def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, x86memop:$src2),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set RC:$dst,
|
|
|
|
(vt (OpNode RC:$src1, (mem_frag addr:$src2))))]>,
|
|
|
|
EVEX_4V;
|
|
|
|
}
|
|
|
|
|
|
|
|
defm VPSLLVDZ : avx512_var_shift<0x47, "vpsllvd", shl, VR512, v16i32,
|
|
|
|
i512mem, memopv16i32>, EVEX_V512,
|
|
|
|
EVEX_CD8<32, CD8VF>;
|
|
|
|
defm VPSLLVQZ : avx512_var_shift<0x47, "vpsllvq", shl, VR512, v8i64,
|
|
|
|
i512mem, memopv8i64>, EVEX_V512, VEX_W,
|
|
|
|
EVEX_CD8<64, CD8VF>;
|
|
|
|
defm VPSRLVDZ : avx512_var_shift<0x45, "vpsrlvd", srl, VR512, v16i32,
|
|
|
|
i512mem, memopv16i32>, EVEX_V512,
|
|
|
|
EVEX_CD8<32, CD8VF>;
|
|
|
|
defm VPSRLVQZ : avx512_var_shift<0x45, "vpsrlvq", srl, VR512, v8i64,
|
|
|
|
i512mem, memopv8i64>, EVEX_V512, VEX_W,
|
|
|
|
EVEX_CD8<64, CD8VF>;
|
|
|
|
defm VPSRAVDZ : avx512_var_shift<0x46, "vpsravd", sra, VR512, v16i32,
|
|
|
|
i512mem, memopv16i32>, EVEX_V512,
|
|
|
|
EVEX_CD8<32, CD8VF>;
|
|
|
|
defm VPSRAVQZ : avx512_var_shift<0x46, "vpsravq", sra, VR512, v8i64,
|
|
|
|
i512mem, memopv8i64>, EVEX_V512, VEX_W,
|
|
|
|
EVEX_CD8<64, CD8VF>;
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AVX-512 - MOVDDUP
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
multiclass avx512_movddup<string OpcodeStr, RegisterClass RC, ValueType VT,
|
|
|
|
X86MemOperand x86memop, PatFrag memop_frag> {
|
|
|
|
def rr : AVX512PDI<0x12, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set RC:$dst, (VT (X86Movddup RC:$src)))]>, EVEX;
|
|
|
|
def rm : AVX512PDI<0x12, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set RC:$dst,
|
|
|
|
(VT (X86Movddup (memop_frag addr:$src))))]>, EVEX;
|
|
|
|
}
|
|
|
|
|
|
|
|
defm VMOVDDUPZ : avx512_movddup<"vmovddup", VR512, v8f64, f512mem, memopv8f64>,
|
|
|
|
VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
|
|
|
|
def : Pat<(X86Movddup (v8f64 (scalar_to_vector (loadf64 addr:$src)))),
|
|
|
|
(VMOVDDUPZrm addr:$src)>;
|
|
|
|
|
2013-11-14 19:29:27 +08:00
|
|
|
//===---------------------------------------------------------------------===//
|
|
|
|
// Replicate Single FP - MOVSHDUP and MOVSLDUP
|
|
|
|
//===---------------------------------------------------------------------===//
|
|
|
|
multiclass avx512_replicate_sfp<bits<8> op, SDNode OpNode, string OpcodeStr,
|
|
|
|
ValueType vt, RegisterClass RC, PatFrag mem_frag,
|
|
|
|
X86MemOperand x86memop> {
|
|
|
|
def rr : AVX512XSI<op, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
2013-11-14 19:29:27 +08:00
|
|
|
[(set RC:$dst, (vt (OpNode RC:$src)))]>, EVEX;
|
|
|
|
let mayLoad = 1 in
|
|
|
|
def rm : AVX512XSI<op, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
2013-11-14 19:29:27 +08:00
|
|
|
[(set RC:$dst, (OpNode (mem_frag addr:$src)))]>, EVEX;
|
|
|
|
}
|
|
|
|
|
|
|
|
defm VMOVSHDUPZ : avx512_replicate_sfp<0x16, X86Movshdup, "vmovshdup",
|
|
|
|
v16f32, VR512, memopv16f32, f512mem>, EVEX_V512,
|
|
|
|
EVEX_CD8<32, CD8VF>;
|
|
|
|
defm VMOVSLDUPZ : avx512_replicate_sfp<0x12, X86Movsldup, "vmovsldup",
|
|
|
|
v16f32, VR512, memopv16f32, f512mem>, EVEX_V512,
|
|
|
|
EVEX_CD8<32, CD8VF>;
|
|
|
|
|
|
|
|
def : Pat<(v16i32 (X86Movshdup VR512:$src)), (VMOVSHDUPZrr VR512:$src)>;
|
|
|
|
def : Pat<(v16i32 (X86Movshdup (memopv16i32 addr:$src))),
|
|
|
|
(VMOVSHDUPZrm addr:$src)>;
|
|
|
|
def : Pat<(v16i32 (X86Movsldup VR512:$src)), (VMOVSLDUPZrr VR512:$src)>;
|
|
|
|
def : Pat<(v16i32 (X86Movsldup (memopv16i32 addr:$src))),
|
|
|
|
(VMOVSLDUPZrm addr:$src)>;
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Move Low to High and High to Low packed FP Instructions
|
|
|
|
//===----------------------------------------------------------------------===//
|
2013-09-17 15:34:34 +08:00
|
|
|
def VMOVLHPSZrr : AVX512PSI<0x16, MRMSrcReg, (outs VR128X:$dst),
|
|
|
|
(ins VR128X:$src1, VR128X:$src2),
|
2013-12-11 22:31:04 +08:00
|
|
|
"vmovlhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set VR128X:$dst, (v4f32 (X86Movlhps VR128X:$src1, VR128X:$src2)))],
|
|
|
|
IIC_SSE_MOV_LH>, EVEX_4V;
|
|
|
|
def VMOVHLPSZrr : AVX512PSI<0x12, MRMSrcReg, (outs VR128X:$dst),
|
|
|
|
(ins VR128X:$src1, VR128X:$src2),
|
2013-12-11 22:31:04 +08:00
|
|
|
"vmovhlps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set VR128X:$dst, (v4f32 (X86Movhlps VR128X:$src1, VR128X:$src2)))],
|
|
|
|
IIC_SSE_MOV_LH>, EVEX_4V;
|
|
|
|
|
2013-09-27 15:20:47 +08:00
|
|
|
let Predicates = [HasAVX512] in {
|
|
|
|
// MOVLHPS patterns
|
|
|
|
def : Pat<(v4i32 (X86Movlhps VR128X:$src1, VR128X:$src2)),
|
|
|
|
(VMOVLHPSZrr VR128X:$src1, VR128X:$src2)>;
|
|
|
|
def : Pat<(v2i64 (X86Movlhps VR128X:$src1, VR128X:$src2)),
|
|
|
|
(VMOVLHPSZrr (v2i64 VR128X:$src1), VR128X:$src2)>;
|
|
|
|
|
|
|
|
// MOVHLPS patterns
|
|
|
|
def : Pat<(v4i32 (X86Movhlps VR128X:$src1, VR128X:$src2)),
|
|
|
|
(VMOVHLPSZrr VR128X:$src1, VR128X:$src2)>;
|
|
|
|
}
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// FMA - Fused Multiply Operations
|
|
|
|
//
|
2014-10-24 08:02:55 +08:00
|
|
|
|
2013-09-17 15:34:34 +08:00
|
|
|
let Constraints = "$src1 = $dst" in {
|
2014-10-24 08:02:55 +08:00
|
|
|
// Omitting the parameter OpNode (= null_frag) disables ISel pattern matching.
|
|
|
|
multiclass avx512_fma3p_rm<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
|
|
|
|
SDPatternOperator OpNode = null_frag> {
|
2014-10-09 07:25:39 +08:00
|
|
|
defm r: AVX512_maskable_3src<opc, MRMSrcReg, _, (outs _.RC:$dst),
|
2014-09-30 06:54:41 +08:00
|
|
|
(ins _.RC:$src2, _.RC:$src3),
|
2014-08-15 01:13:19 +08:00
|
|
|
OpcodeStr, "$src3, $src2", "$src2, $src3",
|
2014-09-30 06:54:41 +08:00
|
|
|
(_.VT (OpNode _.RC:$src1, _.RC:$src2, _.RC:$src3))>,
|
2014-08-15 01:13:19 +08:00
|
|
|
AVX512FMA3Base;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
let mayLoad = 1 in
|
2014-09-30 06:54:41 +08:00
|
|
|
def m: AVX512FMA3<opc, MRMSrcMem, (outs _.RC:$dst),
|
|
|
|
(ins _.RC:$src1, _.RC:$src2, _.MemOp:$src3),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
2014-09-30 06:54:41 +08:00
|
|
|
[(set _.RC:$dst, (_.VT (OpNode _.RC:$src1, _.RC:$src2,
|
|
|
|
(_.MemOpFrag addr:$src3))))]>;
|
|
|
|
def mb: AVX512FMA3<opc, MRMSrcMem, (outs _.RC:$dst),
|
|
|
|
(ins _.RC:$src1, _.RC:$src2, _.ScalarMemOp:$src3),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{${src3}", _.BroadcastStr,
|
2014-09-30 06:54:41 +08:00
|
|
|
", $src2, $dst|$dst, $src2, ${src3}", _.BroadcastStr, "}"),
|
|
|
|
[(set _.RC:$dst, (OpNode _.RC:$src1, _.RC:$src2,
|
|
|
|
(_.VT (X86VBroadcast (_.ScalarLdFrag addr:$src3)))))]>, EVEX_B;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
} // Constraints = "$src1 = $dst"
|
|
|
|
|
2014-10-24 08:03:00 +08:00
|
|
|
multiclass avx512_fma3p_forms<bits<8> opc213, bits<8> opc231,
|
2014-10-24 08:02:55 +08:00
|
|
|
string OpcodeStr, X86VectorVTInfo VTI,
|
|
|
|
SDPatternOperator OpNode> {
|
|
|
|
defm v213 : avx512_fma3p_rm<opc213, !strconcat(OpcodeStr, "213", VTI.Suffix),
|
|
|
|
VTI, OpNode>,
|
|
|
|
EVEX_V512, EVEX_CD8<VTI.EltSize, CD8VF>;
|
2014-10-24 08:03:00 +08:00
|
|
|
|
|
|
|
defm v231 : avx512_fma3p_rm<opc231, !strconcat(OpcodeStr, "231", VTI.Suffix),
|
|
|
|
VTI>,
|
|
|
|
EVEX_V512, EVEX_CD8<VTI.EltSize, CD8VF>;
|
2014-10-24 08:02:55 +08:00
|
|
|
}
|
|
|
|
|
2013-09-17 15:34:34 +08:00
|
|
|
let ExeDomain = SSEPackedSingle in {
|
2014-10-24 08:03:00 +08:00
|
|
|
defm VFMADDPSZ : avx512_fma3p_forms<0xA8, 0xB8, "vfmadd",
|
2014-10-24 08:02:55 +08:00
|
|
|
v16f32_info, X86Fmadd>;
|
2014-10-24 08:03:00 +08:00
|
|
|
defm VFMSUBPSZ : avx512_fma3p_forms<0xAA, 0xBA, "vfmsub",
|
2014-10-24 08:02:55 +08:00
|
|
|
v16f32_info, X86Fmsub>;
|
2014-10-24 08:03:00 +08:00
|
|
|
defm VFMADDSUBPSZ : avx512_fma3p_forms<0xA6, 0xB6, "vfmaddsub",
|
2014-10-24 08:02:55 +08:00
|
|
|
v16f32_info, X86Fmaddsub>;
|
2014-10-24 08:03:00 +08:00
|
|
|
defm VFMSUBADDPSZ : avx512_fma3p_forms<0xA7, 0xB7, "vfmsubadd",
|
2014-10-24 08:02:55 +08:00
|
|
|
v16f32_info, X86Fmsubadd>;
|
2014-10-24 08:03:00 +08:00
|
|
|
defm VFNMADDPSZ : avx512_fma3p_forms<0xAC, 0xBC, "vfnmadd",
|
2014-10-24 08:02:55 +08:00
|
|
|
v16f32_info, X86Fnmadd>;
|
2014-10-24 08:03:00 +08:00
|
|
|
defm VFNMSUBPSZ : avx512_fma3p_forms<0xAE, 0xBE, "vfnmsub",
|
2014-10-24 08:02:55 +08:00
|
|
|
v16f32_info, X86Fnmsub>;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
let ExeDomain = SSEPackedDouble in {
|
2014-10-24 08:03:00 +08:00
|
|
|
defm VFMADDPDZ : avx512_fma3p_forms<0xA8, 0xB8, "vfmadd",
|
2014-10-24 08:02:55 +08:00
|
|
|
v8f64_info, X86Fmadd>, VEX_W;
|
2014-10-24 08:03:00 +08:00
|
|
|
defm VFMSUBPDZ : avx512_fma3p_forms<0xAA, 0xBA, "vfmsub",
|
2014-10-24 08:02:55 +08:00
|
|
|
v8f64_info, X86Fmsub>, VEX_W;
|
2014-10-24 08:03:00 +08:00
|
|
|
defm VFMADDSUBPDZ : avx512_fma3p_forms<0xA6, 0xB6, "vfmaddsub",
|
2014-10-24 08:02:55 +08:00
|
|
|
v8f64_info, X86Fmaddsub>, VEX_W;
|
2014-10-24 08:03:00 +08:00
|
|
|
defm VFMSUBADDPDZ : avx512_fma3p_forms<0xA7, 0xB7, "vfmsubadd",
|
2014-10-24 08:02:55 +08:00
|
|
|
v8f64_info, X86Fmsubadd>, VEX_W;
|
2014-10-24 08:03:00 +08:00
|
|
|
defm VFNMADDPDZ : avx512_fma3p_forms<0xAC, 0xBC, "vfnmadd",
|
2014-10-24 08:02:55 +08:00
|
|
|
v8f64_info, X86Fnmadd>, VEX_W;
|
2014-10-24 08:03:00 +08:00
|
|
|
defm VFNMSUBPDZ : avx512_fma3p_forms<0xAE, 0xBE, "vfnmsub",
|
2014-10-24 08:02:55 +08:00
|
|
|
v8f64_info, X86Fnmsub>, VEX_W;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
let Constraints = "$src1 = $dst" in {
|
2014-09-30 06:54:41 +08:00
|
|
|
multiclass avx512_fma3p_m132<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
|
|
|
X86VectorVTInfo _> {
|
2013-09-17 15:34:34 +08:00
|
|
|
let mayLoad = 1 in
|
2014-09-30 06:54:41 +08:00
|
|
|
def m: AVX512FMA3<opc, MRMSrcMem, (outs _.RC:$dst),
|
|
|
|
(ins _.RC:$src1, _.RC:$src3, _.MemOp:$src2),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src2, $src3, $dst|$dst, $src3, $src2}"),
|
2014-09-30 06:54:41 +08:00
|
|
|
[(set _.RC:$dst, (_.VT (OpNode _.RC:$src1, (_.MemOpFrag addr:$src2),
|
|
|
|
_.RC:$src3)))]>;
|
|
|
|
def mb: AVX512FMA3<opc, MRMSrcMem, (outs _.RC:$dst),
|
|
|
|
(ins _.RC:$src1, _.RC:$src3, _.ScalarMemOp:$src2),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{${src2}", _.BroadcastStr,
|
2014-09-30 06:54:41 +08:00
|
|
|
", $src3, $dst|$dst, $src3, ${src2}", _.BroadcastStr, "}"),
|
|
|
|
[(set _.RC:$dst,
|
|
|
|
(OpNode _.RC:$src1, (_.VT (X86VBroadcast
|
|
|
|
(_.ScalarLdFrag addr:$src2))),
|
|
|
|
_.RC:$src3))]>, EVEX_B;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
} // Constraints = "$src1 = $dst"
|
|
|
|
|
|
|
|
|
|
|
|
let ExeDomain = SSEPackedSingle in {
|
2014-09-30 06:54:41 +08:00
|
|
|
defm VFMADD132PSZ : avx512_fma3p_m132<0x98, "vfmadd132ps", X86Fmadd,
|
|
|
|
v16f32_info>,
|
|
|
|
EVEX_V512, EVEX_CD8<32, CD8VF>;
|
|
|
|
defm VFMSUB132PSZ : avx512_fma3p_m132<0x9A, "vfmsub132ps", X86Fmsub,
|
|
|
|
v16f32_info>,
|
|
|
|
EVEX_V512, EVEX_CD8<32, CD8VF>;
|
|
|
|
defm VFMADDSUB132PSZ : avx512_fma3p_m132<0x96, "vfmaddsub132ps", X86Fmaddsub,
|
|
|
|
v16f32_info>,
|
|
|
|
EVEX_V512, EVEX_CD8<32, CD8VF>;
|
|
|
|
defm VFMSUBADD132PSZ : avx512_fma3p_m132<0x97, "vfmsubadd132ps", X86Fmsubadd,
|
|
|
|
v16f32_info>,
|
|
|
|
EVEX_V512, EVEX_CD8<32, CD8VF>;
|
|
|
|
defm VFNMADD132PSZ : avx512_fma3p_m132<0x9C, "vfnmadd132ps", X86Fnmadd,
|
|
|
|
v16f32_info>,
|
|
|
|
EVEX_V512, EVEX_CD8<32, CD8VF>;
|
|
|
|
defm VFNMSUB132PSZ : avx512_fma3p_m132<0x9E, "vfnmsub132ps", X86Fnmsub,
|
|
|
|
v16f32_info>,
|
|
|
|
EVEX_V512, EVEX_CD8<32, CD8VF>;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
let ExeDomain = SSEPackedDouble in {
|
2014-09-30 06:54:41 +08:00
|
|
|
defm VFMADD132PDZ : avx512_fma3p_m132<0x98, "vfmadd132pd", X86Fmadd,
|
|
|
|
v8f64_info>,
|
|
|
|
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
|
|
|
|
defm VFMSUB132PDZ : avx512_fma3p_m132<0x9A, "vfmsub132pd", X86Fmsub,
|
|
|
|
v8f64_info>,
|
|
|
|
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
|
|
|
|
defm VFMADDSUB132PDZ : avx512_fma3p_m132<0x96, "vfmaddsub132pd", X86Fmaddsub,
|
|
|
|
v8f64_info>,
|
|
|
|
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
|
|
|
|
defm VFMSUBADD132PDZ : avx512_fma3p_m132<0x97, "vfmsubadd132pd", X86Fmsubadd,
|
|
|
|
v8f64_info>,
|
|
|
|
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
|
|
|
|
defm VFNMADD132PDZ : avx512_fma3p_m132<0x9C, "vfnmadd132pd", X86Fnmadd,
|
|
|
|
v8f64_info>,
|
|
|
|
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
|
|
|
|
defm VFNMSUB132PDZ : avx512_fma3p_m132<0x9E, "vfnmsub132pd", X86Fnmsub,
|
|
|
|
v8f64_info>,
|
|
|
|
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Scalar FMA
|
|
|
|
let Constraints = "$src1 = $dst" in {
|
|
|
|
multiclass avx512_fma3s_rm<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
|
|
|
RegisterClass RC, ValueType OpVT,
|
|
|
|
X86MemOperand x86memop, Operand memop,
|
|
|
|
PatFrag mem_frag> {
|
|
|
|
let isCommutable = 1 in
|
|
|
|
def r : AVX512FMA3<opc, MRMSrcReg, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, RC:$src2, RC:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set RC:$dst,
|
|
|
|
(OpVT (OpNode RC:$src2, RC:$src1, RC:$src3)))]>;
|
|
|
|
let mayLoad = 1 in
|
|
|
|
def m : AVX512FMA3<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, RC:$src2, f128mem:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set RC:$dst,
|
|
|
|
(OpVT (OpNode RC:$src2, RC:$src1,
|
|
|
|
(mem_frag addr:$src3))))]>;
|
|
|
|
}
|
|
|
|
|
|
|
|
} // Constraints = "$src1 = $dst"
|
|
|
|
|
2013-12-11 22:31:04 +08:00
|
|
|
defm VFMADDSSZ : avx512_fma3s_rm<0xA9, "vfmadd213ss", X86Fmadd, FR32X,
|
2013-09-17 15:34:34 +08:00
|
|
|
f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
|
2013-12-11 22:31:04 +08:00
|
|
|
defm VFMADDSDZ : avx512_fma3s_rm<0xA9, "vfmadd213sd", X86Fmadd, FR64X,
|
2013-09-17 15:34:34 +08:00
|
|
|
f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
|
2013-12-11 22:31:04 +08:00
|
|
|
defm VFMSUBSSZ : avx512_fma3s_rm<0xAB, "vfmsub213ss", X86Fmsub, FR32X,
|
2013-09-17 15:34:34 +08:00
|
|
|
f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
|
2013-12-11 22:31:04 +08:00
|
|
|
defm VFMSUBSDZ : avx512_fma3s_rm<0xAB, "vfmsub213sd", X86Fmsub, FR64X,
|
2013-09-17 15:34:34 +08:00
|
|
|
f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
|
2013-12-11 22:31:04 +08:00
|
|
|
defm VFNMADDSSZ : avx512_fma3s_rm<0xAD, "vfnmadd213ss", X86Fnmadd, FR32X,
|
2013-09-17 15:34:34 +08:00
|
|
|
f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
|
2013-12-11 22:31:04 +08:00
|
|
|
defm VFNMADDSDZ : avx512_fma3s_rm<0xAD, "vfnmadd213sd", X86Fnmadd, FR64X,
|
2013-09-17 15:34:34 +08:00
|
|
|
f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
|
2013-12-11 22:31:04 +08:00
|
|
|
defm VFNMSUBSSZ : avx512_fma3s_rm<0xAF, "vfnmsub213ss", X86Fnmsub, FR32X,
|
2013-09-17 15:34:34 +08:00
|
|
|
f32, f32mem, ssmem, loadf32>, EVEX_CD8<32, CD8VT1>;
|
2013-12-11 22:31:04 +08:00
|
|
|
defm VFNMSUBSDZ : avx512_fma3s_rm<0xAF, "vfnmsub213sd", X86Fnmsub, FR64X,
|
2013-09-17 15:34:34 +08:00
|
|
|
f64, f64mem, sdmem, loadf64>, VEX_W, EVEX_CD8<64, CD8VT1>;
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AVX-512 Scalar convert from sign integer to float/double
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
multiclass avx512_vcvtsi<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
|
|
|
|
X86MemOperand x86memop, string asm> {
|
2014-01-05 22:21:07 +08:00
|
|
|
let hasSideEffects = 0 in {
|
2013-09-17 15:34:34 +08:00
|
|
|
def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
|
2013-10-06 21:11:09 +08:00
|
|
|
EVEX_4V;
|
2013-09-17 15:34:34 +08:00
|
|
|
let mayLoad = 1 in
|
|
|
|
def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst),
|
|
|
|
(ins DstRC:$src1, x86memop:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>,
|
2013-10-06 21:11:09 +08:00
|
|
|
EVEX_4V;
|
2014-01-05 22:21:07 +08:00
|
|
|
} // hasSideEffects = 0
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
2013-10-09 13:11:10 +08:00
|
|
|
let Predicates = [HasAVX512] in {
|
2013-12-11 22:31:04 +08:00
|
|
|
defm VCVTSI2SSZ : avx512_vcvtsi<0x2A, GR32, FR32X, i32mem, "cvtsi2ss{l}">,
|
2013-09-17 15:34:34 +08:00
|
|
|
XS, VEX_LIG, EVEX_CD8<32, CD8VT1>;
|
2013-12-11 22:31:04 +08:00
|
|
|
defm VCVTSI642SSZ : avx512_vcvtsi<0x2A, GR64, FR32X, i64mem, "cvtsi2ss{q}">,
|
2013-09-17 15:34:34 +08:00
|
|
|
XS, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
|
2013-12-11 22:31:04 +08:00
|
|
|
defm VCVTSI2SDZ : avx512_vcvtsi<0x2A, GR32, FR64X, i32mem, "cvtsi2sd{l}">,
|
2013-09-17 15:34:34 +08:00
|
|
|
XD, VEX_LIG, EVEX_CD8<32, CD8VT1>;
|
2013-12-11 22:31:04 +08:00
|
|
|
defm VCVTSI642SDZ : avx512_vcvtsi<0x2A, GR64, FR64X, i64mem, "cvtsi2sd{q}">,
|
2013-09-17 15:34:34 +08:00
|
|
|
XD, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
|
|
|
|
|
|
|
|
def : Pat<(f32 (sint_to_fp (loadi32 addr:$src))),
|
|
|
|
(VCVTSI2SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
|
|
|
|
def : Pat<(f32 (sint_to_fp (loadi64 addr:$src))),
|
2013-10-06 21:11:09 +08:00
|
|
|
(VCVTSI642SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
def : Pat<(f64 (sint_to_fp (loadi32 addr:$src))),
|
|
|
|
(VCVTSI2SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
|
|
|
|
def : Pat<(f64 (sint_to_fp (loadi64 addr:$src))),
|
2013-10-06 21:11:09 +08:00
|
|
|
(VCVTSI642SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
def : Pat<(f32 (sint_to_fp GR32:$src)),
|
|
|
|
(VCVTSI2SSZrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
|
|
|
|
def : Pat<(f32 (sint_to_fp GR64:$src)),
|
2013-10-06 21:11:09 +08:00
|
|
|
(VCVTSI642SSZrr (f32 (IMPLICIT_DEF)), GR64:$src)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
def : Pat<(f64 (sint_to_fp GR32:$src)),
|
|
|
|
(VCVTSI2SDZrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
|
|
|
|
def : Pat<(f64 (sint_to_fp GR64:$src)),
|
2013-10-06 21:11:09 +08:00
|
|
|
(VCVTSI642SDZrr (f64 (IMPLICIT_DEF)), GR64:$src)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
2013-12-11 22:31:04 +08:00
|
|
|
defm VCVTUSI2SSZ : avx512_vcvtsi<0x7B, GR32, FR32X, i32mem, "cvtusi2ss{l}">,
|
2013-10-06 21:11:09 +08:00
|
|
|
XS, VEX_LIG, EVEX_CD8<32, CD8VT1>;
|
2013-12-11 22:31:04 +08:00
|
|
|
defm VCVTUSI642SSZ : avx512_vcvtsi<0x7B, GR64, FR32X, i64mem, "cvtusi2ss{q}">,
|
2013-10-06 21:11:09 +08:00
|
|
|
XS, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
|
2013-12-11 22:31:04 +08:00
|
|
|
defm VCVTUSI2SDZ : avx512_vcvtsi<0x7B, GR32, FR64X, i32mem, "cvtusi2sd{l}">,
|
2013-10-06 21:11:09 +08:00
|
|
|
XD, VEX_LIG, EVEX_CD8<32, CD8VT1>;
|
2013-12-11 22:31:04 +08:00
|
|
|
defm VCVTUSI642SDZ : avx512_vcvtsi<0x7B, GR64, FR64X, i64mem, "cvtusi2sd{q}">,
|
2013-10-06 21:11:09 +08:00
|
|
|
XD, VEX_W, VEX_LIG, EVEX_CD8<64, CD8VT1>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
2013-10-06 21:11:09 +08:00
|
|
|
def : Pat<(f32 (uint_to_fp (loadi32 addr:$src))),
|
|
|
|
(VCVTUSI2SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
|
|
|
|
def : Pat<(f32 (uint_to_fp (loadi64 addr:$src))),
|
|
|
|
(VCVTUSI642SSZrm (f32 (IMPLICIT_DEF)), addr:$src)>;
|
|
|
|
def : Pat<(f64 (uint_to_fp (loadi32 addr:$src))),
|
|
|
|
(VCVTUSI2SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
|
|
|
|
def : Pat<(f64 (uint_to_fp (loadi64 addr:$src))),
|
|
|
|
(VCVTUSI642SDZrm (f64 (IMPLICIT_DEF)), addr:$src)>;
|
|
|
|
|
|
|
|
def : Pat<(f32 (uint_to_fp GR32:$src)),
|
|
|
|
(VCVTUSI2SSZrr (f32 (IMPLICIT_DEF)), GR32:$src)>;
|
|
|
|
def : Pat<(f32 (uint_to_fp GR64:$src)),
|
|
|
|
(VCVTUSI642SSZrr (f32 (IMPLICIT_DEF)), GR64:$src)>;
|
|
|
|
def : Pat<(f64 (uint_to_fp GR32:$src)),
|
|
|
|
(VCVTUSI2SDZrr (f64 (IMPLICIT_DEF)), GR32:$src)>;
|
|
|
|
def : Pat<(f64 (uint_to_fp GR64:$src)),
|
|
|
|
(VCVTUSI642SDZrr (f64 (IMPLICIT_DEF)), GR64:$src)>;
|
2013-10-09 13:11:10 +08:00
|
|
|
}
|
2013-10-06 21:11:09 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AVX-512 Scalar convert from float/double to integer
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
multiclass avx512_cvt_s_int<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
|
|
|
|
Intrinsic Int, Operand memop, ComplexPattern mem_cpat,
|
|
|
|
string asm> {
|
2014-01-05 22:21:07 +08:00
|
|
|
let hasSideEffects = 0 in {
|
2013-10-06 21:11:09 +08:00
|
|
|
def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(asm,"\t{$src, $dst|$dst, $src}"),
|
2013-12-11 22:31:04 +08:00
|
|
|
[(set DstRC:$dst, (Int SrcRC:$src))]>, EVEX, VEX_LIG,
|
|
|
|
Requires<[HasAVX512]>;
|
2013-10-06 21:11:09 +08:00
|
|
|
let mayLoad = 1 in
|
|
|
|
def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins memop:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(asm,"\t{$src, $dst|$dst, $src}"), []>, EVEX, VEX_LIG,
|
2013-12-11 22:31:04 +08:00
|
|
|
Requires<[HasAVX512]>;
|
2014-01-05 22:21:07 +08:00
|
|
|
} // hasSideEffects = 0
|
2013-10-06 21:11:09 +08:00
|
|
|
}
|
|
|
|
let Predicates = [HasAVX512] in {
|
|
|
|
// Convert float/double to signed/unsigned int 32/64
|
|
|
|
defm VCVTSS2SIZ: avx512_cvt_s_int<0x2D, VR128X, GR32, int_x86_sse_cvtss2si,
|
2013-12-11 22:31:04 +08:00
|
|
|
ssmem, sse_load_f32, "cvtss2si">,
|
2013-10-06 21:11:09 +08:00
|
|
|
XS, EVEX_CD8<32, CD8VT1>;
|
|
|
|
defm VCVTSS2SI64Z: avx512_cvt_s_int<0x2D, VR128X, GR64, int_x86_sse_cvtss2si64,
|
2013-12-11 22:31:04 +08:00
|
|
|
ssmem, sse_load_f32, "cvtss2si">,
|
2013-10-06 21:11:09 +08:00
|
|
|
XS, VEX_W, EVEX_CD8<32, CD8VT1>;
|
|
|
|
defm VCVTSS2USIZ: avx512_cvt_s_int<0x79, VR128X, GR32, int_x86_avx512_cvtss2usi,
|
2013-12-11 22:31:04 +08:00
|
|
|
ssmem, sse_load_f32, "cvtss2usi">,
|
2013-10-06 21:11:09 +08:00
|
|
|
XS, EVEX_CD8<32, CD8VT1>;
|
|
|
|
defm VCVTSS2USI64Z: avx512_cvt_s_int<0x79, VR128X, GR64,
|
|
|
|
int_x86_avx512_cvtss2usi64, ssmem,
|
2013-12-11 22:31:04 +08:00
|
|
|
sse_load_f32, "cvtss2usi">, XS, VEX_W,
|
2013-10-06 21:11:09 +08:00
|
|
|
EVEX_CD8<32, CD8VT1>;
|
|
|
|
defm VCVTSD2SIZ: avx512_cvt_s_int<0x2D, VR128X, GR32, int_x86_sse2_cvtsd2si,
|
2013-12-11 22:31:04 +08:00
|
|
|
sdmem, sse_load_f64, "cvtsd2si">,
|
2013-10-06 21:11:09 +08:00
|
|
|
XD, EVEX_CD8<64, CD8VT1>;
|
|
|
|
defm VCVTSD2SI64Z: avx512_cvt_s_int<0x2D, VR128X, GR64, int_x86_sse2_cvtsd2si64,
|
2013-12-11 22:31:04 +08:00
|
|
|
sdmem, sse_load_f64, "cvtsd2si">,
|
2013-10-06 21:11:09 +08:00
|
|
|
XD, VEX_W, EVEX_CD8<64, CD8VT1>;
|
|
|
|
defm VCVTSD2USIZ: avx512_cvt_s_int<0x79, VR128X, GR32, int_x86_avx512_cvtsd2usi,
|
2013-12-11 22:31:04 +08:00
|
|
|
sdmem, sse_load_f64, "cvtsd2usi">,
|
2013-10-06 21:11:09 +08:00
|
|
|
XD, EVEX_CD8<64, CD8VT1>;
|
|
|
|
defm VCVTSD2USI64Z: avx512_cvt_s_int<0x79, VR128X, GR64,
|
|
|
|
int_x86_avx512_cvtsd2usi64, sdmem,
|
2013-12-11 22:31:04 +08:00
|
|
|
sse_load_f64, "cvtsd2usi">, XD, VEX_W,
|
2013-10-06 21:11:09 +08:00
|
|
|
EVEX_CD8<64, CD8VT1>;
|
|
|
|
|
2014-01-03 01:28:14 +08:00
|
|
|
let isCodeGenOnly = 1 in {
|
|
|
|
defm Int_VCVTSI2SSZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
|
|
|
|
int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss{l}",
|
|
|
|
SSE_CVT_Scalar, 0>, XS, EVEX_4V;
|
|
|
|
defm Int_VCVTSI2SS64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
|
|
|
|
int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss{q}",
|
|
|
|
SSE_CVT_Scalar, 0>, XS, EVEX_4V, VEX_W;
|
|
|
|
defm Int_VCVTSI2SDZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
|
|
|
|
int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd{l}",
|
|
|
|
SSE_CVT_Scalar, 0>, XD, EVEX_4V;
|
|
|
|
defm Int_VCVTSI2SD64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
|
|
|
|
int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}",
|
|
|
|
SSE_CVT_Scalar, 0>, XD, EVEX_4V, VEX_W;
|
|
|
|
|
|
|
|
defm Int_VCVTUSI2SSZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
|
|
|
|
int_x86_avx512_cvtusi2ss, i32mem, loadi32, "cvtusi2ss{l}",
|
|
|
|
SSE_CVT_Scalar, 0>, XS, EVEX_4V;
|
|
|
|
defm Int_VCVTUSI2SS64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
|
|
|
|
int_x86_avx512_cvtusi642ss, i64mem, loadi64, "cvtusi2ss{q}",
|
|
|
|
SSE_CVT_Scalar, 0>, XS, EVEX_4V, VEX_W;
|
|
|
|
defm Int_VCVTUSI2SDZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
|
|
|
|
int_x86_avx512_cvtusi2sd, i32mem, loadi32, "cvtusi2sd{l}",
|
|
|
|
SSE_CVT_Scalar, 0>, XD, EVEX_4V;
|
|
|
|
defm Int_VCVTUSI2SD64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
|
|
|
|
int_x86_avx512_cvtusi642sd, i64mem, loadi64, "cvtusi2sd{q}",
|
|
|
|
SSE_CVT_Scalar, 0>, XD, EVEX_4V, VEX_W;
|
|
|
|
} // isCodeGenOnly = 1
|
2013-10-06 21:11:09 +08:00
|
|
|
|
|
|
|
// Convert float/double to signed/unsigned int 32/64 with truncation
|
2014-01-03 01:28:14 +08:00
|
|
|
let isCodeGenOnly = 1 in {
|
|
|
|
defm Int_VCVTTSS2SIZ : avx512_cvt_s_int<0x2C, VR128X, GR32, int_x86_sse_cvttss2si,
|
|
|
|
ssmem, sse_load_f32, "cvttss2si">,
|
|
|
|
XS, EVEX_CD8<32, CD8VT1>;
|
|
|
|
defm Int_VCVTTSS2SI64Z : avx512_cvt_s_int<0x2C, VR128X, GR64,
|
|
|
|
int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
|
|
|
|
"cvttss2si">, XS, VEX_W,
|
|
|
|
EVEX_CD8<32, CD8VT1>;
|
|
|
|
defm Int_VCVTTSD2SIZ : avx512_cvt_s_int<0x2C, VR128X, GR32, int_x86_sse2_cvttsd2si,
|
|
|
|
sdmem, sse_load_f64, "cvttsd2si">, XD,
|
|
|
|
EVEX_CD8<64, CD8VT1>;
|
|
|
|
defm Int_VCVTTSD2SI64Z : avx512_cvt_s_int<0x2C, VR128X, GR64,
|
|
|
|
int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
|
|
|
|
"cvttsd2si">, XD, VEX_W,
|
|
|
|
EVEX_CD8<64, CD8VT1>;
|
|
|
|
defm Int_VCVTTSS2USIZ : avx512_cvt_s_int<0x78, VR128X, GR32,
|
|
|
|
int_x86_avx512_cvttss2usi, ssmem, sse_load_f32,
|
|
|
|
"cvttss2usi">, XS, EVEX_CD8<32, CD8VT1>;
|
|
|
|
defm Int_VCVTTSS2USI64Z : avx512_cvt_s_int<0x78, VR128X, GR64,
|
|
|
|
int_x86_avx512_cvttss2usi64, ssmem,
|
|
|
|
sse_load_f32, "cvttss2usi">, XS, VEX_W,
|
|
|
|
EVEX_CD8<32, CD8VT1>;
|
|
|
|
defm Int_VCVTTSD2USIZ : avx512_cvt_s_int<0x78, VR128X, GR32,
|
|
|
|
int_x86_avx512_cvttsd2usi,
|
|
|
|
sdmem, sse_load_f64, "cvttsd2usi">, XD,
|
|
|
|
EVEX_CD8<64, CD8VT1>;
|
|
|
|
defm Int_VCVTTSD2USI64Z : avx512_cvt_s_int<0x78, VR128X, GR64,
|
|
|
|
int_x86_avx512_cvttsd2usi64, sdmem,
|
|
|
|
sse_load_f64, "cvttsd2usi">, XD, VEX_W,
|
|
|
|
EVEX_CD8<64, CD8VT1>;
|
|
|
|
} // isCodeGenOnly = 1
|
2013-10-06 21:11:09 +08:00
|
|
|
|
|
|
|
multiclass avx512_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
|
|
|
|
SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
|
|
|
|
string asm> {
|
|
|
|
def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(asm,"\t{$src, $dst|$dst, $src}"),
|
2013-10-06 21:11:09 +08:00
|
|
|
[(set DstRC:$dst, (OpNode SrcRC:$src))]>, EVEX;
|
|
|
|
def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(asm,"\t{$src, $dst|$dst, $src}"),
|
2013-10-06 21:11:09 +08:00
|
|
|
[(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>, EVEX;
|
|
|
|
}
|
|
|
|
|
|
|
|
defm VCVTTSS2SIZ : avx512_cvt_s<0x2C, FR32X, GR32, fp_to_sint, f32mem,
|
2013-12-11 22:31:04 +08:00
|
|
|
loadf32, "cvttss2si">, XS,
|
2013-10-06 21:11:09 +08:00
|
|
|
EVEX_CD8<32, CD8VT1>;
|
|
|
|
defm VCVTTSS2USIZ : avx512_cvt_s<0x78, FR32X, GR32, fp_to_uint, f32mem,
|
2013-12-11 22:31:04 +08:00
|
|
|
loadf32, "cvttss2usi">, XS,
|
2013-10-06 21:11:09 +08:00
|
|
|
EVEX_CD8<32, CD8VT1>;
|
|
|
|
defm VCVTTSS2SI64Z : avx512_cvt_s<0x2C, FR32X, GR64, fp_to_sint, f32mem,
|
2013-12-11 22:31:04 +08:00
|
|
|
loadf32, "cvttss2si">, XS, VEX_W,
|
2013-10-06 21:11:09 +08:00
|
|
|
EVEX_CD8<32, CD8VT1>;
|
|
|
|
defm VCVTTSS2USI64Z : avx512_cvt_s<0x78, FR32X, GR64, fp_to_uint, f32mem,
|
2013-12-11 22:31:04 +08:00
|
|
|
loadf32, "cvttss2usi">, XS, VEX_W,
|
2013-10-06 21:11:09 +08:00
|
|
|
EVEX_CD8<32, CD8VT1>;
|
|
|
|
defm VCVTTSD2SIZ : avx512_cvt_s<0x2C, FR64X, GR32, fp_to_sint, f64mem,
|
2013-12-11 22:31:04 +08:00
|
|
|
loadf64, "cvttsd2si">, XD,
|
2013-10-06 21:11:09 +08:00
|
|
|
EVEX_CD8<64, CD8VT1>;
|
|
|
|
defm VCVTTSD2USIZ : avx512_cvt_s<0x78, FR64X, GR32, fp_to_uint, f64mem,
|
2013-12-11 22:31:04 +08:00
|
|
|
loadf64, "cvttsd2usi">, XD,
|
2013-10-06 21:11:09 +08:00
|
|
|
EVEX_CD8<64, CD8VT1>;
|
|
|
|
defm VCVTTSD2SI64Z : avx512_cvt_s<0x2C, FR64X, GR64, fp_to_sint, f64mem,
|
2013-12-11 22:31:04 +08:00
|
|
|
loadf64, "cvttsd2si">, XD, VEX_W,
|
2013-10-06 21:11:09 +08:00
|
|
|
EVEX_CD8<64, CD8VT1>;
|
|
|
|
defm VCVTTSD2USI64Z : avx512_cvt_s<0x78, FR64X, GR64, fp_to_uint, f64mem,
|
2013-12-11 22:31:04 +08:00
|
|
|
loadf64, "cvttsd2usi">, XD, VEX_W,
|
2013-10-06 21:11:09 +08:00
|
|
|
EVEX_CD8<64, CD8VT1>;
|
2013-12-11 22:31:04 +08:00
|
|
|
} // HasAVX512
|
2013-09-17 15:34:34 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AVX-512 Convert form float to double and back
|
|
|
|
//===----------------------------------------------------------------------===//
|
2014-01-05 22:21:07 +08:00
|
|
|
let hasSideEffects = 0 in {
|
2013-09-17 15:34:34 +08:00
|
|
|
def VCVTSS2SDZrr : AVX512XSI<0x5A, MRMSrcReg, (outs FR64X:$dst),
|
|
|
|
(ins FR32X:$src1, FR32X:$src2),
|
2013-12-11 22:31:04 +08:00
|
|
|
"vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
2013-09-17 15:34:34 +08:00
|
|
|
[]>, EVEX_4V, VEX_LIG, Sched<[WriteCvtF2F]>;
|
|
|
|
let mayLoad = 1 in
|
|
|
|
def VCVTSS2SDZrm : AVX512XSI<0x5A, MRMSrcMem, (outs FR64X:$dst),
|
|
|
|
(ins FR32X:$src1, f32mem:$src2),
|
2013-12-11 22:31:04 +08:00
|
|
|
"vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
2013-09-17 15:34:34 +08:00
|
|
|
[]>, EVEX_4V, VEX_LIG, Sched<[WriteCvtF2FLd, ReadAfterLd]>,
|
|
|
|
EVEX_CD8<32, CD8VT1>;
|
|
|
|
|
|
|
|
// Convert scalar double to scalar single
|
|
|
|
def VCVTSD2SSZrr : AVX512XDI<0x5A, MRMSrcReg, (outs FR32X:$dst),
|
|
|
|
(ins FR64X:$src1, FR64X:$src2),
|
2013-12-11 22:31:04 +08:00
|
|
|
"vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
2013-09-17 15:34:34 +08:00
|
|
|
[]>, EVEX_4V, VEX_LIG, VEX_W, Sched<[WriteCvtF2F]>;
|
|
|
|
let mayLoad = 1 in
|
|
|
|
def VCVTSD2SSZrm : AVX512XDI<0x5A, MRMSrcMem, (outs FR32X:$dst),
|
|
|
|
(ins FR64X:$src1, f64mem:$src2),
|
2013-12-11 22:31:04 +08:00
|
|
|
"vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
2013-09-17 15:34:34 +08:00
|
|
|
[]>, EVEX_4V, VEX_LIG, VEX_W,
|
|
|
|
Sched<[WriteCvtF2FLd, ReadAfterLd]>, EVEX_CD8<64, CD8VT1>;
|
|
|
|
}
|
|
|
|
|
|
|
|
def : Pat<(f64 (fextend FR32X:$src)), (VCVTSS2SDZrr FR32X:$src, FR32X:$src)>,
|
|
|
|
Requires<[HasAVX512]>;
|
|
|
|
def : Pat<(fextend (loadf32 addr:$src)),
|
|
|
|
(VCVTSS2SDZrm (f32 (IMPLICIT_DEF)), addr:$src)>, Requires<[HasAVX512]>;
|
|
|
|
|
|
|
|
def : Pat<(extloadf32 addr:$src),
|
|
|
|
(VCVTSS2SDZrm (f32 (IMPLICIT_DEF)), addr:$src)>,
|
|
|
|
Requires<[HasAVX512, OptForSize]>;
|
|
|
|
|
|
|
|
def : Pat<(extloadf32 addr:$src),
|
|
|
|
(VCVTSS2SDZrr (f32 (IMPLICIT_DEF)), (VMOVSSZrm addr:$src))>,
|
|
|
|
Requires<[HasAVX512, OptForSpeed]>;
|
|
|
|
|
|
|
|
def : Pat<(f32 (fround FR64X:$src)), (VCVTSD2SSZrr FR64X:$src, FR64X:$src)>,
|
|
|
|
Requires<[HasAVX512]>;
|
|
|
|
|
2014-01-05 18:46:09 +08:00
|
|
|
multiclass avx512_vcvt_fp_with_rc<bits<8> opc, string asm, RegisterClass SrcRC,
|
2013-09-17 15:34:34 +08:00
|
|
|
RegisterClass DstRC, SDNode OpNode, PatFrag mem_frag,
|
|
|
|
X86MemOperand x86memop, ValueType OpVT, ValueType InVT,
|
|
|
|
Domain d> {
|
2014-01-05 22:21:07 +08:00
|
|
|
let hasSideEffects = 0 in {
|
2013-09-17 15:34:34 +08:00
|
|
|
def rr : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(asm,"\t{$src, $dst|$dst, $src}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set DstRC:$dst,
|
|
|
|
(OpVT (OpNode (InVT SrcRC:$src))))], d>, EVEX;
|
2014-01-01 23:12:34 +08:00
|
|
|
def rrb : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src, AVX512RC:$rc),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(asm,"\t{$rc, $src, $dst|$dst, $src, $rc}"),
|
2014-01-13 20:55:03 +08:00
|
|
|
[], d>, EVEX, EVEX_B, EVEX_RC;
|
2013-09-17 15:34:34 +08:00
|
|
|
let mayLoad = 1 in
|
|
|
|
def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(asm,"\t{$src, $dst|$dst, $src}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set DstRC:$dst,
|
|
|
|
(OpVT (OpNode (InVT (bitconvert (mem_frag addr:$src))))))], d>, EVEX;
|
2014-01-05 22:21:07 +08:00
|
|
|
} // hasSideEffects = 0
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
2014-01-05 18:46:09 +08:00
|
|
|
multiclass avx512_vcvt_fp<bits<8> opc, string asm, RegisterClass SrcRC,
|
2014-01-01 23:12:34 +08:00
|
|
|
RegisterClass DstRC, SDNode OpNode, PatFrag mem_frag,
|
|
|
|
X86MemOperand x86memop, ValueType OpVT, ValueType InVT,
|
|
|
|
Domain d> {
|
2014-01-05 22:21:07 +08:00
|
|
|
let hasSideEffects = 0 in {
|
2014-01-01 23:12:34 +08:00
|
|
|
def rr : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(asm,"\t{$src, $dst|$dst, $src}"),
|
2014-01-01 23:12:34 +08:00
|
|
|
[(set DstRC:$dst,
|
|
|
|
(OpVT (OpNode (InVT SrcRC:$src))))], d>, EVEX;
|
|
|
|
let mayLoad = 1 in
|
|
|
|
def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(asm,"\t{$src, $dst|$dst, $src}"),
|
2014-01-01 23:12:34 +08:00
|
|
|
[(set DstRC:$dst,
|
|
|
|
(OpVT (OpNode (InVT (bitconvert (mem_frag addr:$src))))))], d>, EVEX;
|
2014-01-05 22:21:07 +08:00
|
|
|
} // hasSideEffects = 0
|
2014-01-01 23:12:34 +08:00
|
|
|
}
|
|
|
|
|
2014-01-05 18:46:09 +08:00
|
|
|
defm VCVTPD2PSZ : avx512_vcvt_fp_with_rc<0x5A, "vcvtpd2ps", VR512, VR256X, fround,
|
2013-09-17 15:34:34 +08:00
|
|
|
memopv8f64, f512mem, v8f32, v8f64,
|
2014-01-14 15:41:20 +08:00
|
|
|
SSEPackedSingle>, EVEX_V512, VEX_W, PD,
|
2013-09-17 15:34:34 +08:00
|
|
|
EVEX_CD8<64, CD8VF>;
|
|
|
|
|
|
|
|
defm VCVTPS2PDZ : avx512_vcvt_fp<0x5A, "vcvtps2pd", VR256X, VR512, fextend,
|
|
|
|
memopv4f64, f256mem, v8f64, v8f32,
|
2014-02-18 08:21:49 +08:00
|
|
|
SSEPackedDouble>, EVEX_V512, PS,
|
2014-02-01 16:17:56 +08:00
|
|
|
EVEX_CD8<32, CD8VH>;
|
2013-09-17 15:34:34 +08:00
|
|
|
def : Pat<(v8f64 (extloadv8f32 addr:$src)),
|
|
|
|
(VCVTPS2PDZrm addr:$src)>;
|
2014-01-06 16:45:54 +08:00
|
|
|
|
|
|
|
def : Pat<(v8f32 (int_x86_avx512_mask_cvtpd2ps_512 (v8f64 VR512:$src),
|
|
|
|
(bc_v8f32(v8i32 immAllZerosV)), (i8 -1), (i32 FROUND_CURRENT))),
|
|
|
|
(VCVTPD2PSZrr VR512:$src)>;
|
|
|
|
|
|
|
|
def : Pat<(v8f32 (int_x86_avx512_mask_cvtpd2ps_512 (v8f64 VR512:$src),
|
|
|
|
(bc_v8f32(v8i32 immAllZerosV)), (i8 -1), imm:$rc)),
|
|
|
|
(VCVTPD2PSZrrb VR512:$src, imm:$rc)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AVX-512 Vector convert from sign integer to float/double
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2014-01-05 18:46:09 +08:00
|
|
|
defm VCVTDQ2PSZ : avx512_vcvt_fp_with_rc<0x5B, "vcvtdq2ps", VR512, VR512, sint_to_fp,
|
2013-09-17 15:34:34 +08:00
|
|
|
memopv8i64, i512mem, v16f32, v16i32,
|
2014-02-18 08:21:49 +08:00
|
|
|
SSEPackedSingle>, EVEX_V512, PS,
|
2014-02-01 16:17:56 +08:00
|
|
|
EVEX_CD8<32, CD8VF>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
defm VCVTDQ2PDZ : avx512_vcvt_fp<0xE6, "vcvtdq2pd", VR256X, VR512, sint_to_fp,
|
|
|
|
memopv4i64, i256mem, v8f64, v8i32,
|
|
|
|
SSEPackedDouble>, EVEX_V512, XS,
|
|
|
|
EVEX_CD8<32, CD8VH>;
|
|
|
|
|
2014-01-05 18:46:09 +08:00
|
|
|
defm VCVTTPS2DQZ : avx512_vcvt_fp<0x5B, "vcvttps2dq", VR512, VR512, fp_to_sint,
|
2013-09-17 15:34:34 +08:00
|
|
|
memopv16f32, f512mem, v16i32, v16f32,
|
|
|
|
SSEPackedSingle>, EVEX_V512, XS,
|
|
|
|
EVEX_CD8<32, CD8VF>;
|
|
|
|
|
2014-01-05 18:46:09 +08:00
|
|
|
defm VCVTTPD2DQZ : avx512_vcvt_fp<0xE6, "vcvttpd2dq", VR512, VR256X, fp_to_sint,
|
2013-09-17 15:34:34 +08:00
|
|
|
memopv8f64, f512mem, v8i32, v8f64,
|
2014-01-14 15:41:20 +08:00
|
|
|
SSEPackedDouble>, EVEX_V512, PD, VEX_W,
|
2013-09-17 15:34:34 +08:00
|
|
|
EVEX_CD8<64, CD8VF>;
|
|
|
|
|
2014-01-05 18:46:09 +08:00
|
|
|
defm VCVTTPS2UDQZ : avx512_vcvt_fp<0x78, "vcvttps2udq", VR512, VR512, fp_to_uint,
|
2013-09-17 15:34:34 +08:00
|
|
|
memopv16f32, f512mem, v16i32, v16f32,
|
2014-02-18 08:21:49 +08:00
|
|
|
SSEPackedSingle>, EVEX_V512, PS,
|
2013-09-17 15:34:34 +08:00
|
|
|
EVEX_CD8<32, CD8VF>;
|
|
|
|
|
2014-01-01 23:12:34 +08:00
|
|
|
// cvttps2udq (src, 0, mask-all-ones, sae-current)
|
|
|
|
def : Pat<(v16i32 (int_x86_avx512_mask_cvttps2udq_512 (v16f32 VR512:$src),
|
|
|
|
(v16i32 immAllZerosV), (i16 -1), FROUND_CURRENT)),
|
|
|
|
(VCVTTPS2UDQZrr VR512:$src)>;
|
|
|
|
|
2014-01-05 18:46:09 +08:00
|
|
|
defm VCVTTPD2UDQZ : avx512_vcvt_fp<0x78, "vcvttpd2udq", VR512, VR256X, fp_to_uint,
|
2013-09-17 15:34:34 +08:00
|
|
|
memopv8f64, f512mem, v8i32, v8f64,
|
2014-02-18 08:21:49 +08:00
|
|
|
SSEPackedDouble>, EVEX_V512, PS, VEX_W,
|
2013-09-17 15:34:34 +08:00
|
|
|
EVEX_CD8<64, CD8VF>;
|
|
|
|
|
2014-01-01 23:12:34 +08:00
|
|
|
// cvttpd2udq (src, 0, mask-all-ones, sae-current)
|
|
|
|
def : Pat<(v8i32 (int_x86_avx512_mask_cvttpd2udq_512 (v8f64 VR512:$src),
|
|
|
|
(v8i32 immAllZerosV), (i8 -1), FROUND_CURRENT)),
|
|
|
|
(VCVTTPD2UDQZrr VR512:$src)>;
|
|
|
|
|
2013-09-17 15:34:34 +08:00
|
|
|
defm VCVTUDQ2PDZ : avx512_vcvt_fp<0x7A, "vcvtudq2pd", VR256X, VR512, uint_to_fp,
|
|
|
|
memopv4i64, f256mem, v8f64, v8i32,
|
|
|
|
SSEPackedDouble>, EVEX_V512, XS,
|
|
|
|
EVEX_CD8<32, CD8VH>;
|
|
|
|
|
2014-01-05 18:46:09 +08:00
|
|
|
defm VCVTUDQ2PSZ : avx512_vcvt_fp_with_rc<0x7A, "vcvtudq2ps", VR512, VR512, uint_to_fp,
|
2013-09-17 15:34:34 +08:00
|
|
|
memopv16i32, f512mem, v16f32, v16i32,
|
|
|
|
SSEPackedSingle>, EVEX_V512, XD,
|
|
|
|
EVEX_CD8<32, CD8VF>;
|
|
|
|
|
|
|
|
def : Pat<(v8i32 (fp_to_uint (v8f32 VR256X:$src1))),
|
|
|
|
(EXTRACT_SUBREG (v16i32 (VCVTTPS2UDQZrr
|
|
|
|
(v16f32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
|
|
|
|
|
2014-04-08 15:24:02 +08:00
|
|
|
def : Pat<(v4i32 (fp_to_uint (v4f32 VR128X:$src1))),
|
|
|
|
(EXTRACT_SUBREG (v16i32 (VCVTTPS2UDQZrr
|
|
|
|
(v16f32 (SUBREG_TO_REG (i32 0), VR128X:$src1, sub_xmm)))), sub_xmm)>;
|
|
|
|
|
|
|
|
def : Pat<(v8f32 (uint_to_fp (v8i32 VR256X:$src1))),
|
|
|
|
(EXTRACT_SUBREG (v16f32 (VCVTUDQ2PSZrr
|
|
|
|
(v16i32 (SUBREG_TO_REG (i32 0), VR256X:$src1, sub_ymm)))), sub_ymm)>;
|
|
|
|
|
|
|
|
def : Pat<(v4f32 (uint_to_fp (v4i32 VR128X:$src1))),
|
|
|
|
(EXTRACT_SUBREG (v16f32 (VCVTUDQ2PSZrr
|
|
|
|
(v16i32 (SUBREG_TO_REG (i32 0), VR128X:$src1, sub_xmm)))), sub_xmm)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
2014-06-18 22:04:37 +08:00
|
|
|
def : Pat<(v4f64 (uint_to_fp (v4i32 VR128X:$src1))),
|
|
|
|
(EXTRACT_SUBREG (v8f64 (VCVTUDQ2PDZrr
|
|
|
|
(v8i32 (SUBREG_TO_REG (i32 0), VR128X:$src1, sub_xmm)))), sub_ymm)>;
|
|
|
|
|
2014-01-01 23:12:34 +08:00
|
|
|
def : Pat<(v16f32 (int_x86_avx512_mask_cvtdq2ps_512 (v16i32 VR512:$src),
|
2014-01-05 18:46:09 +08:00
|
|
|
(bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), imm:$rc)),
|
2014-01-01 23:12:34 +08:00
|
|
|
(VCVTDQ2PSZrrb VR512:$src, imm:$rc)>;
|
2014-01-05 18:46:09 +08:00
|
|
|
def : Pat<(v8f64 (int_x86_avx512_mask_cvtdq2pd_512 (v8i32 VR256X:$src),
|
|
|
|
(bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))),
|
|
|
|
(VCVTDQ2PDZrr VR256X:$src)>;
|
|
|
|
def : Pat<(v16f32 (int_x86_avx512_mask_cvtudq2ps_512 (v16i32 VR512:$src),
|
|
|
|
(bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), imm:$rc)),
|
|
|
|
(VCVTUDQ2PSZrrb VR512:$src, imm:$rc)>;
|
|
|
|
def : Pat<(v8f64 (int_x86_avx512_mask_cvtudq2pd_512 (v8i32 VR256X:$src),
|
|
|
|
(bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))),
|
|
|
|
(VCVTUDQ2PDZrr VR256X:$src)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
2014-01-01 23:12:34 +08:00
|
|
|
multiclass avx512_vcvt_fp2int<bits<8> opc, string asm, RegisterClass SrcRC,
|
|
|
|
RegisterClass DstRC, PatFrag mem_frag,
|
|
|
|
X86MemOperand x86memop, Domain d> {
|
2014-01-05 22:21:07 +08:00
|
|
|
let hasSideEffects = 0 in {
|
2014-01-01 23:12:34 +08:00
|
|
|
def rr : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(asm,"\t{$src, $dst|$dst, $src}"),
|
2014-01-01 23:12:34 +08:00
|
|
|
[], d>, EVEX;
|
|
|
|
def rrb : AVX512PI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src, AVX512RC:$rc),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(asm,"\t{$rc, $src, $dst|$dst, $src, $rc}"),
|
2014-01-13 20:55:03 +08:00
|
|
|
[], d>, EVEX, EVEX_B, EVEX_RC;
|
2014-01-01 23:12:34 +08:00
|
|
|
let mayLoad = 1 in
|
|
|
|
def rm : AVX512PI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(asm,"\t{$src, $dst|$dst, $src}"),
|
2014-01-01 23:12:34 +08:00
|
|
|
[], d>, EVEX;
|
2014-01-05 22:21:07 +08:00
|
|
|
} // hasSideEffects = 0
|
2014-01-01 23:12:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
defm VCVTPS2DQZ : avx512_vcvt_fp2int<0x5B, "vcvtps2dq", VR512, VR512,
|
2014-01-14 15:41:20 +08:00
|
|
|
memopv16f32, f512mem, SSEPackedSingle>, PD,
|
2014-01-01 23:12:34 +08:00
|
|
|
EVEX_V512, EVEX_CD8<32, CD8VF>;
|
|
|
|
defm VCVTPD2DQZ : avx512_vcvt_fp2int<0xE6, "vcvtpd2dq", VR512, VR256X,
|
|
|
|
memopv8f64, f512mem, SSEPackedDouble>, XD, VEX_W,
|
|
|
|
EVEX_V512, EVEX_CD8<64, CD8VF>;
|
|
|
|
|
|
|
|
def : Pat <(v16i32 (int_x86_avx512_mask_cvtps2dq_512 (v16f32 VR512:$src),
|
|
|
|
(v16i32 immAllZerosV), (i16 -1), imm:$rc)),
|
|
|
|
(VCVTPS2DQZrrb VR512:$src, imm:$rc)>;
|
|
|
|
|
|
|
|
def : Pat <(v8i32 (int_x86_avx512_mask_cvtpd2dq_512 (v8f64 VR512:$src),
|
|
|
|
(v8i32 immAllZerosV), (i8 -1), imm:$rc)),
|
|
|
|
(VCVTPD2DQZrrb VR512:$src, imm:$rc)>;
|
|
|
|
|
|
|
|
defm VCVTPS2UDQZ : avx512_vcvt_fp2int<0x79, "vcvtps2udq", VR512, VR512,
|
|
|
|
memopv16f32, f512mem, SSEPackedSingle>,
|
2014-02-18 08:21:49 +08:00
|
|
|
PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
|
2014-01-01 23:12:34 +08:00
|
|
|
defm VCVTPD2UDQZ : avx512_vcvt_fp2int<0x79, "vcvtpd2udq", VR512, VR256X,
|
|
|
|
memopv8f64, f512mem, SSEPackedDouble>, VEX_W,
|
2014-02-18 08:21:49 +08:00
|
|
|
PS, EVEX_V512, EVEX_CD8<64, CD8VF>;
|
2014-01-01 23:12:34 +08:00
|
|
|
|
|
|
|
def : Pat <(v16i32 (int_x86_avx512_mask_cvtps2udq_512 (v16f32 VR512:$src),
|
|
|
|
(v16i32 immAllZerosV), (i16 -1), imm:$rc)),
|
|
|
|
(VCVTPS2UDQZrrb VR512:$src, imm:$rc)>;
|
|
|
|
|
|
|
|
def : Pat <(v8i32 (int_x86_avx512_mask_cvtpd2udq_512 (v8f64 VR512:$src),
|
|
|
|
(v8i32 immAllZerosV), (i8 -1), imm:$rc)),
|
|
|
|
(VCVTPD2UDQZrrb VR512:$src, imm:$rc)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
let Predicates = [HasAVX512] in {
|
|
|
|
def : Pat<(v8f32 (fround (loadv8f64 addr:$src))),
|
|
|
|
(VCVTPD2PSZrm addr:$src)>;
|
|
|
|
def : Pat<(v8f64 (extloadv8f32 addr:$src)),
|
|
|
|
(VCVTPS2PDZrm addr:$src)>;
|
|
|
|
}
|
|
|
|
|
2013-10-24 15:16:35 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Half precision conversion instructions
|
|
|
|
//===----------------------------------------------------------------------===//
|
2014-02-05 15:05:03 +08:00
|
|
|
multiclass avx512_cvtph2ps<RegisterClass destRC, RegisterClass srcRC,
|
|
|
|
X86MemOperand x86memop> {
|
2013-10-24 15:16:35 +08:00
|
|
|
def rr : AVX5128I<0x13, MRMSrcReg, (outs destRC:$dst), (ins srcRC:$src),
|
|
|
|
"vcvtph2ps\t{$src, $dst|$dst, $src}",
|
2014-02-05 15:05:03 +08:00
|
|
|
[]>, EVEX;
|
2014-01-05 22:21:07 +08:00
|
|
|
let hasSideEffects = 0, mayLoad = 1 in
|
2013-10-24 15:16:35 +08:00
|
|
|
def rm : AVX5128I<0x13, MRMSrcMem, (outs destRC:$dst), (ins x86memop:$src),
|
|
|
|
"vcvtph2ps\t{$src, $dst|$dst, $src}", []>, EVEX;
|
|
|
|
}
|
|
|
|
|
2014-02-05 15:05:03 +08:00
|
|
|
multiclass avx512_cvtps2ph<RegisterClass destRC, RegisterClass srcRC,
|
|
|
|
X86MemOperand x86memop> {
|
2013-10-24 15:16:35 +08:00
|
|
|
def rr : AVX512AIi8<0x1D, MRMDestReg, (outs destRC:$dst),
|
|
|
|
(ins srcRC:$src1, i32i8imm:$src2),
|
2014-11-26 04:11:23 +08:00
|
|
|
"vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
2014-02-05 15:05:03 +08:00
|
|
|
[]>, EVEX;
|
2014-01-05 22:21:07 +08:00
|
|
|
let hasSideEffects = 0, mayStore = 1 in
|
2013-10-24 15:16:35 +08:00
|
|
|
def mr : AVX512AIi8<0x1D, MRMDestMem, (outs),
|
|
|
|
(ins x86memop:$dst, srcRC:$src1, i32i8imm:$src2),
|
2014-11-26 04:11:23 +08:00
|
|
|
"vcvtps2ph\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, EVEX;
|
2013-10-24 15:16:35 +08:00
|
|
|
}
|
|
|
|
|
2014-02-05 15:05:03 +08:00
|
|
|
defm VCVTPH2PSZ : avx512_cvtph2ps<VR512, VR256X, f256mem>, EVEX_V512,
|
2013-10-24 15:16:35 +08:00
|
|
|
EVEX_CD8<32, CD8VH>;
|
2014-02-05 15:05:03 +08:00
|
|
|
defm VCVTPS2PHZ : avx512_cvtps2ph<VR256X, VR512, f256mem>, EVEX_V512,
|
2013-10-24 15:16:35 +08:00
|
|
|
EVEX_CD8<32, CD8VH>;
|
|
|
|
|
2014-02-05 15:05:03 +08:00
|
|
|
def : Pat<(v16i16 (int_x86_avx512_mask_vcvtps2ph_512 (v16f32 VR512:$src),
|
|
|
|
imm:$rc, (bc_v16i16(v8i32 immAllZerosV)), (i16 -1))),
|
|
|
|
(VCVTPS2PHZrr VR512:$src, imm:$rc)>;
|
|
|
|
|
|
|
|
def : Pat<(v16f32 (int_x86_avx512_mask_vcvtph2ps_512 (v16i16 VR256X:$src),
|
|
|
|
(bc_v16f32(v16i32 immAllZerosV)), (i16 -1), (i32 FROUND_CURRENT))),
|
|
|
|
(VCVTPH2PSZrr VR256X:$src)>;
|
|
|
|
|
2013-09-17 15:34:34 +08:00
|
|
|
let Defs = [EFLAGS], Predicates = [HasAVX512] in {
|
|
|
|
defm VUCOMISSZ : sse12_ord_cmp<0x2E, FR32X, X86cmp, f32, f32mem, loadf32,
|
2014-02-18 08:21:49 +08:00
|
|
|
"ucomiss">, PS, EVEX, VEX_LIG,
|
2013-09-17 15:34:34 +08:00
|
|
|
EVEX_CD8<32, CD8VT1>;
|
|
|
|
defm VUCOMISDZ : sse12_ord_cmp<0x2E, FR64X, X86cmp, f64, f64mem, loadf64,
|
2014-01-14 15:41:20 +08:00
|
|
|
"ucomisd">, PD, EVEX,
|
2013-09-17 15:34:34 +08:00
|
|
|
VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
|
|
|
|
let Pattern = []<dag> in {
|
|
|
|
defm VCOMISSZ : sse12_ord_cmp<0x2F, VR128X, undef, v4f32, f128mem, load,
|
2014-02-18 08:21:49 +08:00
|
|
|
"comiss">, PS, EVEX, VEX_LIG,
|
2013-09-17 15:34:34 +08:00
|
|
|
EVEX_CD8<32, CD8VT1>;
|
|
|
|
defm VCOMISDZ : sse12_ord_cmp<0x2F, VR128X, undef, v2f64, f128mem, load,
|
2014-01-14 15:41:20 +08:00
|
|
|
"comisd">, PD, EVEX,
|
2013-09-17 15:34:34 +08:00
|
|
|
VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
|
|
|
|
}
|
2014-01-03 01:28:14 +08:00
|
|
|
let isCodeGenOnly = 1 in {
|
|
|
|
defm Int_VUCOMISSZ : sse12_ord_cmp<0x2E, VR128X, X86ucomi, v4f32, f128mem,
|
2014-02-18 08:21:49 +08:00
|
|
|
load, "ucomiss">, PS, EVEX, VEX_LIG,
|
2014-01-03 01:28:14 +08:00
|
|
|
EVEX_CD8<32, CD8VT1>;
|
|
|
|
defm Int_VUCOMISDZ : sse12_ord_cmp<0x2E, VR128X, X86ucomi, v2f64, f128mem,
|
2014-01-14 15:41:20 +08:00
|
|
|
load, "ucomisd">, PD, EVEX,
|
2014-01-03 01:28:14 +08:00
|
|
|
VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
|
|
|
|
|
|
|
|
defm Int_VCOMISSZ : sse12_ord_cmp<0x2F, VR128X, X86comi, v4f32, f128mem,
|
2014-02-18 08:21:49 +08:00
|
|
|
load, "comiss">, PS, EVEX, VEX_LIG,
|
2014-01-03 01:28:14 +08:00
|
|
|
EVEX_CD8<32, CD8VT1>;
|
|
|
|
defm Int_VCOMISDZ : sse12_ord_cmp<0x2F, VR128X, X86comi, v2f64, f128mem,
|
2014-01-14 15:41:20 +08:00
|
|
|
load, "comisd">, PD, EVEX,
|
2014-01-03 01:28:14 +08:00
|
|
|
VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
|
|
|
|
}
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
2014-01-13 20:55:03 +08:00
|
|
|
/// avx512_fp14_s rcp14ss, rcp14sd, rsqrt14ss, rsqrt14sd
|
|
|
|
multiclass avx512_fp14_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
|
|
|
|
X86MemOperand x86memop> {
|
2013-09-17 15:34:34 +08:00
|
|
|
let hasSideEffects = 0 in {
|
2014-01-13 20:55:03 +08:00
|
|
|
def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, RC:$src2),
|
2013-09-17 15:34:34 +08:00
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
|
2013-09-17 15:34:34 +08:00
|
|
|
let mayLoad = 1 in {
|
2014-01-13 20:55:03 +08:00
|
|
|
def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, x86memop:$src2),
|
2013-09-17 15:34:34 +08:00
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
2014-01-13 20:55:03 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
defm VRCP14SS : avx512_fp14_s<0x4D, "vrcp14ss", FR32X, f32mem>,
|
|
|
|
EVEX_CD8<32, CD8VT1>;
|
|
|
|
defm VRCP14SD : avx512_fp14_s<0x4D, "vrcp14sd", FR64X, f64mem>,
|
|
|
|
VEX_W, EVEX_CD8<64, CD8VT1>;
|
|
|
|
defm VRSQRT14SS : avx512_fp14_s<0x4F, "vrsqrt14ss", FR32X, f32mem>,
|
|
|
|
EVEX_CD8<32, CD8VT1>;
|
|
|
|
defm VRSQRT14SD : avx512_fp14_s<0x4F, "vrsqrt14sd", FR64X, f64mem>,
|
|
|
|
VEX_W, EVEX_CD8<64, CD8VT1>;
|
|
|
|
|
|
|
|
def : Pat <(v4f32 (int_x86_avx512_rcp14_ss (v4f32 VR128X:$src1),
|
|
|
|
(v4f32 VR128X:$src2), (bc_v4f32 (v4i32 immAllZerosV)), (i8 -1))),
|
|
|
|
(COPY_TO_REGCLASS (VRCP14SSrr (COPY_TO_REGCLASS VR128X:$src1, FR32X),
|
|
|
|
(COPY_TO_REGCLASS VR128X:$src2, FR32X)), VR128X)>;
|
|
|
|
|
|
|
|
def : Pat <(v2f64 (int_x86_avx512_rcp14_sd (v2f64 VR128X:$src1),
|
|
|
|
(v2f64 VR128X:$src2), (bc_v2f64 (v4i32 immAllZerosV)), (i8 -1))),
|
|
|
|
(COPY_TO_REGCLASS (VRCP14SDrr (COPY_TO_REGCLASS VR128X:$src1, FR64X),
|
|
|
|
(COPY_TO_REGCLASS VR128X:$src2, FR64X)), VR128X)>;
|
|
|
|
|
|
|
|
def : Pat <(v4f32 (int_x86_avx512_rsqrt14_ss (v4f32 VR128X:$src1),
|
|
|
|
(v4f32 VR128X:$src2), (bc_v4f32 (v4i32 immAllZerosV)), (i8 -1))),
|
|
|
|
(COPY_TO_REGCLASS (VRSQRT14SSrr (COPY_TO_REGCLASS VR128X:$src1, FR32X),
|
|
|
|
(COPY_TO_REGCLASS VR128X:$src2, FR32X)), VR128X)>;
|
|
|
|
|
|
|
|
def : Pat <(v2f64 (int_x86_avx512_rsqrt14_sd (v2f64 VR128X:$src1),
|
|
|
|
(v2f64 VR128X:$src2), (bc_v2f64 (v4i32 immAllZerosV)), (i8 -1))),
|
|
|
|
(COPY_TO_REGCLASS (VRSQRT14SDrr (COPY_TO_REGCLASS VR128X:$src1, FR64X),
|
|
|
|
(COPY_TO_REGCLASS VR128X:$src2, FR64X)), VR128X)>;
|
|
|
|
|
|
|
|
/// avx512_fp14_p rcp14ps, rcp14pd, rsqrt14ps, rsqrt14pd
|
|
|
|
multiclass avx512_fp14_p<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
2014-10-29 00:37:13 +08:00
|
|
|
X86VectorVTInfo _> {
|
|
|
|
defm r: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
|
|
|
|
(ins _.RC:$src), OpcodeStr, "$src", "$src",
|
|
|
|
(_.FloatVT (OpNode _.RC:$src))>, EVEX, T8PD;
|
|
|
|
let mayLoad = 1 in {
|
|
|
|
defm m: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
|
|
|
|
(ins _.MemOp:$src), OpcodeStr, "$src", "$src",
|
|
|
|
(OpNode (_.FloatVT
|
|
|
|
(bitconvert (_.LdFrag addr:$src))))>, EVEX, T8PD;
|
|
|
|
defm mb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
|
|
|
|
(ins _.ScalarMemOp:$src), OpcodeStr,
|
|
|
|
"${src}"##_.BroadcastStr, "${src}"##_.BroadcastStr,
|
|
|
|
(OpNode (_.FloatVT
|
|
|
|
(X86VBroadcast (_.ScalarLdFrag addr:$src))))>,
|
|
|
|
EVEX, T8PD, EVEX_B;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass avx512_fp14_p_vl_all<bits<8> opc, string OpcodeStr, SDNode OpNode> {
|
|
|
|
defm PSZ : avx512_fp14_p<opc, !strconcat(OpcodeStr, "ps"), OpNode, v16f32_info>,
|
|
|
|
EVEX_V512, EVEX_CD8<32, CD8VF>;
|
|
|
|
defm PDZ : avx512_fp14_p<opc, !strconcat(OpcodeStr, "pd"), OpNode, v8f64_info>,
|
|
|
|
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
|
|
|
|
|
|
|
|
// Define only if AVX512VL feature is present.
|
|
|
|
let Predicates = [HasVLX] in {
|
|
|
|
defm PSZ128 : avx512_fp14_p<opc, !strconcat(OpcodeStr, "ps"),
|
|
|
|
OpNode, v4f32x_info>,
|
|
|
|
EVEX_V128, EVEX_CD8<32, CD8VF>;
|
|
|
|
defm PSZ256 : avx512_fp14_p<opc, !strconcat(OpcodeStr, "ps"),
|
|
|
|
OpNode, v8f32x_info>,
|
|
|
|
EVEX_V256, EVEX_CD8<32, CD8VF>;
|
|
|
|
defm PDZ128 : avx512_fp14_p<opc, !strconcat(OpcodeStr, "pd"),
|
|
|
|
OpNode, v2f64x_info>,
|
|
|
|
EVEX_V128, VEX_W, EVEX_CD8<64, CD8VF>;
|
|
|
|
defm PDZ256 : avx512_fp14_p<opc, !strconcat(OpcodeStr, "pd"),
|
|
|
|
OpNode, v4f64x_info>,
|
|
|
|
EVEX_V256, VEX_W, EVEX_CD8<64, CD8VF>;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
defm VRSQRT14 : avx512_fp14_p_vl_all<0x4E, "vrsqrt14", X86frsqrt>;
|
|
|
|
defm VRCP14 : avx512_fp14_p_vl_all<0x4C, "vrcp14", X86frcp>;
|
2014-01-13 20:55:03 +08:00
|
|
|
|
|
|
|
def : Pat <(v16f32 (int_x86_avx512_rsqrt14_ps_512 (v16f32 VR512:$src),
|
|
|
|
(bc_v16f32 (v16i32 immAllZerosV)), (i16 -1))),
|
|
|
|
(VRSQRT14PSZr VR512:$src)>;
|
|
|
|
def : Pat <(v8f64 (int_x86_avx512_rsqrt14_pd_512 (v8f64 VR512:$src),
|
|
|
|
(bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))),
|
|
|
|
(VRSQRT14PDZr VR512:$src)>;
|
|
|
|
|
|
|
|
def : Pat <(v16f32 (int_x86_avx512_rcp14_ps_512 (v16f32 VR512:$src),
|
|
|
|
(bc_v16f32 (v16i32 immAllZerosV)), (i16 -1))),
|
|
|
|
(VRCP14PSZr VR512:$src)>;
|
|
|
|
def : Pat <(v8f64 (int_x86_avx512_rcp14_pd_512 (v8f64 VR512:$src),
|
|
|
|
(bc_v8f64 (v16i32 immAllZerosV)), (i8 -1))),
|
|
|
|
(VRCP14PDZr VR512:$src)>;
|
|
|
|
|
|
|
|
/// avx512_fp28_s rcp28ss, rcp28sd, rsqrt28ss, rsqrt28sd
|
|
|
|
multiclass avx512_fp28_s<bits<8> opc, string OpcodeStr, RegisterClass RC,
|
|
|
|
X86MemOperand x86memop> {
|
|
|
|
let hasSideEffects = 0, Predicates = [HasERI] in {
|
|
|
|
def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, RC:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
|
2014-01-13 20:55:03 +08:00
|
|
|
def rrb : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, RC:$src2),
|
2013-09-17 15:34:34 +08:00
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{{sae}, $src2, $src1, $dst|$dst, $src1, $src2, {sae}}"),
|
2014-01-13 20:55:03 +08:00
|
|
|
[]>, EVEX_4V, EVEX_B;
|
2013-09-17 15:34:34 +08:00
|
|
|
let mayLoad = 1 in {
|
2014-01-13 20:55:03 +08:00
|
|
|
def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, x86memop:$src2),
|
2013-09-17 15:34:34 +08:00
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, EVEX_4V;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-13 20:55:03 +08:00
|
|
|
defm VRCP28SS : avx512_fp28_s<0xCB, "vrcp28ss", FR32X, f32mem>,
|
|
|
|
EVEX_CD8<32, CD8VT1>;
|
|
|
|
defm VRCP28SD : avx512_fp28_s<0xCB, "vrcp28sd", FR64X, f64mem>,
|
|
|
|
VEX_W, EVEX_CD8<64, CD8VT1>;
|
|
|
|
defm VRSQRT28SS : avx512_fp28_s<0xCD, "vrsqrt28ss", FR32X, f32mem>,
|
|
|
|
EVEX_CD8<32, CD8VT1>;
|
|
|
|
defm VRSQRT28SD : avx512_fp28_s<0xCD, "vrsqrt28sd", FR64X, f64mem>,
|
|
|
|
VEX_W, EVEX_CD8<64, CD8VT1>;
|
|
|
|
|
|
|
|
def : Pat <(v4f32 (int_x86_avx512_rcp28_ss (v4f32 VR128X:$src1),
|
|
|
|
(v4f32 VR128X:$src2), (bc_v4f32 (v4i32 immAllZerosV)), (i8 -1),
|
|
|
|
FROUND_NO_EXC)),
|
|
|
|
(COPY_TO_REGCLASS (VRCP28SSrrb (COPY_TO_REGCLASS VR128X:$src1, FR32X),
|
|
|
|
(COPY_TO_REGCLASS VR128X:$src2, FR32X)), VR128X)>;
|
|
|
|
|
|
|
|
def : Pat <(v2f64 (int_x86_avx512_rcp28_sd (v2f64 VR128X:$src1),
|
|
|
|
(v2f64 VR128X:$src2), (bc_v2f64 (v4i32 immAllZerosV)), (i8 -1),
|
|
|
|
FROUND_NO_EXC)),
|
|
|
|
(COPY_TO_REGCLASS (VRCP28SDrrb (COPY_TO_REGCLASS VR128X:$src1, FR64X),
|
|
|
|
(COPY_TO_REGCLASS VR128X:$src2, FR64X)), VR128X)>;
|
|
|
|
|
|
|
|
def : Pat <(v4f32 (int_x86_avx512_rsqrt28_ss (v4f32 VR128X:$src1),
|
|
|
|
(v4f32 VR128X:$src2), (bc_v4f32 (v4i32 immAllZerosV)), (i8 -1),
|
|
|
|
FROUND_NO_EXC)),
|
|
|
|
(COPY_TO_REGCLASS (VRSQRT28SSrrb (COPY_TO_REGCLASS VR128X:$src1, FR32X),
|
|
|
|
(COPY_TO_REGCLASS VR128X:$src2, FR32X)), VR128X)>;
|
|
|
|
|
|
|
|
def : Pat <(v2f64 (int_x86_avx512_rsqrt28_sd (v2f64 VR128X:$src1),
|
|
|
|
(v2f64 VR128X:$src2), (bc_v2f64 (v4i32 immAllZerosV)), (i8 -1),
|
|
|
|
FROUND_NO_EXC)),
|
|
|
|
(COPY_TO_REGCLASS (VRSQRT28SDrrb (COPY_TO_REGCLASS VR128X:$src1, FR64X),
|
|
|
|
(COPY_TO_REGCLASS VR128X:$src2, FR64X)), VR128X)>;
|
|
|
|
|
|
|
|
/// avx512_fp28_p rcp28ps, rcp28pd, rsqrt28ps, rsqrt28pd
|
2014-11-12 15:31:03 +08:00
|
|
|
|
|
|
|
multiclass avx512_fp28_p<bits<8> opc, string OpcodeStr, X86VectorVTInfo _,
|
|
|
|
SDNode OpNode> {
|
|
|
|
|
|
|
|
defm r : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
|
|
|
|
(ins _.RC:$src), OpcodeStr, "$src", "$src",
|
|
|
|
(OpNode (_.VT _.RC:$src), (i32 FROUND_CURRENT))>;
|
|
|
|
|
|
|
|
defm rb : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
|
|
|
|
(ins _.RC:$src), OpcodeStr,
|
|
|
|
"$src", "$src",
|
|
|
|
(OpNode (_.VT _.RC:$src), (i32 FROUND_NO_EXC)), "{sae}">, EVEX_B;
|
|
|
|
|
|
|
|
defm m : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
|
|
|
|
(ins _.MemOp:$src), OpcodeStr, "$src", "$src",
|
|
|
|
(OpNode (_.FloatVT
|
|
|
|
(bitconvert (_.LdFrag addr:$src))), (i32 FROUND_CURRENT))>;
|
|
|
|
|
|
|
|
defm mb : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
|
|
|
|
(ins _.MemOp:$src), OpcodeStr, "$src", "$src",
|
|
|
|
(OpNode (_.FloatVT
|
|
|
|
(X86VBroadcast (_.ScalarLdFrag addr:$src))),
|
|
|
|
(i32 FROUND_CURRENT))>, EVEX_B;
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass avx512_eri<bits<8> opc, string OpcodeStr, SDNode OpNode> {
|
|
|
|
defm PS : avx512_fp28_p<opc, OpcodeStr#"ps", v16f32_info, OpNode>,
|
|
|
|
EVEX_CD8<32, CD8VF>;
|
|
|
|
defm PD : avx512_fp28_p<opc, OpcodeStr#"pd", v8f64_info, OpNode>,
|
|
|
|
VEX_W, EVEX_CD8<32, CD8VF>;
|
|
|
|
}
|
|
|
|
|
|
|
|
let Predicates = [HasERI], hasSideEffects = 0 in {
|
|
|
|
|
|
|
|
defm VRSQRT28 : avx512_eri<0xCC, "vrsqrt28", X86rsqrt28>, EVEX, EVEX_V512, T8PD;
|
|
|
|
defm VRCP28 : avx512_eri<0xCA, "vrcp28", X86rcp28>, EVEX, EVEX_V512, T8PD;
|
|
|
|
defm VEXP2 : avx512_eri<0xC8, "vexp2", X86exp2>, EVEX, EVEX_V512, T8PD;
|
2013-10-09 16:16:14 +08:00
|
|
|
}
|
2014-01-13 20:55:03 +08:00
|
|
|
|
2014-10-29 02:15:20 +08:00
|
|
|
multiclass avx512_sqrt_packed<bits<8> opc, string OpcodeStr,
|
|
|
|
SDNode OpNode, X86VectorVTInfo _>{
|
2014-10-29 02:22:41 +08:00
|
|
|
defm r: AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
|
2014-10-29 02:15:20 +08:00
|
|
|
(ins _.RC:$src), OpcodeStr, "$src", "$src",
|
|
|
|
(_.FloatVT (OpNode _.RC:$src))>, EVEX;
|
|
|
|
let mayLoad = 1 in {
|
2014-10-29 02:22:41 +08:00
|
|
|
defm m: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
|
2014-10-29 02:15:20 +08:00
|
|
|
(ins _.MemOp:$src), OpcodeStr, "$src", "$src",
|
|
|
|
(OpNode (_.FloatVT
|
|
|
|
(bitconvert (_.LdFrag addr:$src))))>, EVEX;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
2014-10-29 02:22:41 +08:00
|
|
|
defm mb: AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
|
2014-10-29 02:15:20 +08:00
|
|
|
(ins _.ScalarMemOp:$src), OpcodeStr,
|
|
|
|
"${src}"##_.BroadcastStr, "${src}"##_.BroadcastStr,
|
|
|
|
(OpNode (_.FloatVT
|
|
|
|
(X86VBroadcast (_.ScalarLdFrag addr:$src))))>,
|
|
|
|
EVEX, EVEX_B;
|
|
|
|
}
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr,
|
|
|
|
Intrinsic F32Int, Intrinsic F64Int,
|
|
|
|
OpndItins itins_s, OpndItins itins_d> {
|
|
|
|
def SSZr : SI<opc, MRMSrcReg, (outs FR32X:$dst),
|
|
|
|
(ins FR32X:$src1, FR32X:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
2013-12-11 22:31:04 +08:00
|
|
|
"ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[], itins_s.rr>, XS, EVEX_4V;
|
2014-01-03 01:28:14 +08:00
|
|
|
let isCodeGenOnly = 1 in
|
2013-09-17 15:34:34 +08:00
|
|
|
def SSZr_Int : SIi8<opc, MRMSrcReg, (outs VR128X:$dst),
|
|
|
|
(ins VR128X:$src1, VR128X:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
2013-12-11 22:31:04 +08:00
|
|
|
"ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set VR128X:$dst,
|
|
|
|
(F32Int VR128X:$src1, VR128X:$src2))],
|
|
|
|
itins_s.rr>, XS, EVEX_4V;
|
|
|
|
let mayLoad = 1 in {
|
|
|
|
def SSZm : SI<opc, MRMSrcMem, (outs FR32X:$dst),
|
|
|
|
(ins FR32X:$src1, f32mem:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
2013-12-11 22:31:04 +08:00
|
|
|
"ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[], itins_s.rm>, XS, EVEX_4V, EVEX_CD8<32, CD8VT1>;
|
2014-01-03 01:28:14 +08:00
|
|
|
let isCodeGenOnly = 1 in
|
2013-09-17 15:34:34 +08:00
|
|
|
def SSZm_Int : SIi8<opc, MRMSrcMem, (outs VR128X:$dst),
|
|
|
|
(ins VR128X:$src1, ssmem:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
2013-12-11 22:31:04 +08:00
|
|
|
"ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set VR128X:$dst,
|
|
|
|
(F32Int VR128X:$src1, sse_load_f32:$src2))],
|
|
|
|
itins_s.rm>, XS, EVEX_4V, EVEX_CD8<32, CD8VT1>;
|
|
|
|
}
|
|
|
|
def SDZr : SI<opc, MRMSrcReg, (outs FR64X:$dst),
|
|
|
|
(ins FR64X:$src1, FR64X:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
2013-12-11 22:31:04 +08:00
|
|
|
"sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
|
2013-09-17 15:34:34 +08:00
|
|
|
XD, EVEX_4V, VEX_W;
|
2014-01-03 01:28:14 +08:00
|
|
|
let isCodeGenOnly = 1 in
|
2013-09-17 15:34:34 +08:00
|
|
|
def SDZr_Int : SIi8<opc, MRMSrcReg, (outs VR128X:$dst),
|
|
|
|
(ins VR128X:$src1, VR128X:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
2013-12-11 22:31:04 +08:00
|
|
|
"sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set VR128X:$dst,
|
|
|
|
(F64Int VR128X:$src1, VR128X:$src2))],
|
|
|
|
itins_s.rr>, XD, EVEX_4V, VEX_W;
|
|
|
|
let mayLoad = 1 in {
|
|
|
|
def SDZm : SI<opc, MRMSrcMem, (outs FR64X:$dst),
|
|
|
|
(ins FR64X:$src1, f64mem:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
2013-12-11 22:31:04 +08:00
|
|
|
"sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
|
2013-09-17 15:34:34 +08:00
|
|
|
XD, EVEX_4V, VEX_W, EVEX_CD8<64, CD8VT1>;
|
2014-01-03 01:28:14 +08:00
|
|
|
let isCodeGenOnly = 1 in
|
2013-09-17 15:34:34 +08:00
|
|
|
def SDZm_Int : SIi8<opc, MRMSrcMem, (outs VR128X:$dst),
|
|
|
|
(ins VR128X:$src1, sdmem:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
2013-12-11 22:31:04 +08:00
|
|
|
"sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set VR128X:$dst,
|
|
|
|
(F64Int VR128X:$src1, sse_load_f64:$src2))]>,
|
|
|
|
XD, EVEX_4V, VEX_W, EVEX_CD8<64, CD8VT1>;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-29 02:15:20 +08:00
|
|
|
multiclass avx512_sqrt_packed_all<bits<8> opc, string OpcodeStr,
|
|
|
|
SDNode OpNode> {
|
|
|
|
defm PSZ : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
|
|
|
|
v16f32_info>,
|
|
|
|
EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
|
|
|
|
defm PDZ : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
|
|
|
|
v8f64_info>,
|
|
|
|
EVEX_V512, VEX_W, PD, EVEX_CD8<64, CD8VF>;
|
|
|
|
// Define only if AVX512VL feature is present.
|
|
|
|
let Predicates = [HasVLX] in {
|
|
|
|
defm PSZ128 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "ps"),
|
|
|
|
OpNode, v4f32x_info>,
|
|
|
|
EVEX_V128, PS, EVEX_CD8<32, CD8VF>;
|
|
|
|
defm PSZ256 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "ps"),
|
|
|
|
OpNode, v8f32x_info>,
|
|
|
|
EVEX_V256, PS, EVEX_CD8<32, CD8VF>;
|
|
|
|
defm PDZ128 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "pd"),
|
|
|
|
OpNode, v2f64x_info>,
|
|
|
|
EVEX_V128, VEX_W, PD, EVEX_CD8<64, CD8VF>;
|
|
|
|
defm PDZ256 : avx512_sqrt_packed<opc, !strconcat(OpcodeStr, "pd"),
|
|
|
|
OpNode, v4f64x_info>,
|
|
|
|
EVEX_V256, VEX_W, PD, EVEX_CD8<64, CD8VF>;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
defm VSQRT : avx512_sqrt_packed_all<0x51, "vsqrt", fsqrt>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
defm VSQRT : avx512_sqrt_scalar<0x51, "sqrt",
|
|
|
|
int_x86_avx512_sqrt_ss, int_x86_avx512_sqrt_sd,
|
2014-10-29 02:15:20 +08:00
|
|
|
SSE_SQRTSS, SSE_SQRTSD>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
2013-10-09 16:16:14 +08:00
|
|
|
let Predicates = [HasAVX512] in {
|
2014-07-22 19:07:31 +08:00
|
|
|
def : Pat<(v16f32 (int_x86_avx512_sqrt_ps_512 (v16f32 VR512:$src1),
|
|
|
|
(bc_v16f32 (v16i32 immAllZerosV)), (i16 -1), FROUND_CURRENT)),
|
2014-10-29 02:22:41 +08:00
|
|
|
(VSQRTPSZr VR512:$src1)>;
|
2014-07-22 19:07:31 +08:00
|
|
|
def : Pat<(v8f64 (int_x86_avx512_sqrt_pd_512 (v8f64 VR512:$src1),
|
|
|
|
(bc_v8f64 (v16i32 immAllZerosV)), (i8 -1), FROUND_CURRENT)),
|
2014-10-29 02:22:41 +08:00
|
|
|
(VSQRTPDZr VR512:$src1)>;
|
2014-07-22 19:07:31 +08:00
|
|
|
|
2013-10-09 16:16:14 +08:00
|
|
|
def : Pat<(f32 (fsqrt FR32X:$src)),
|
|
|
|
(VSQRTSSZr (f32 (IMPLICIT_DEF)), FR32X:$src)>;
|
|
|
|
def : Pat<(f32 (fsqrt (load addr:$src))),
|
|
|
|
(VSQRTSSZm (f32 (IMPLICIT_DEF)), addr:$src)>,
|
|
|
|
Requires<[OptForSize]>;
|
|
|
|
def : Pat<(f64 (fsqrt FR64X:$src)),
|
|
|
|
(VSQRTSDZr (f64 (IMPLICIT_DEF)), FR64X:$src)>;
|
|
|
|
def : Pat<(f64 (fsqrt (load addr:$src))),
|
|
|
|
(VSQRTSDZm (f64 (IMPLICIT_DEF)), addr:$src)>,
|
|
|
|
Requires<[OptForSize]>;
|
|
|
|
|
|
|
|
def : Pat<(f32 (X86frsqrt FR32X:$src)),
|
2014-01-13 20:55:03 +08:00
|
|
|
(VRSQRT14SSrr (f32 (IMPLICIT_DEF)), FR32X:$src)>;
|
2013-10-09 16:16:14 +08:00
|
|
|
def : Pat<(f32 (X86frsqrt (load addr:$src))),
|
2014-01-13 20:55:03 +08:00
|
|
|
(VRSQRT14SSrm (f32 (IMPLICIT_DEF)), addr:$src)>,
|
2013-10-09 16:16:14 +08:00
|
|
|
Requires<[OptForSize]>;
|
|
|
|
|
|
|
|
def : Pat<(f32 (X86frcp FR32X:$src)),
|
2014-01-13 20:55:03 +08:00
|
|
|
(VRCP14SSrr (f32 (IMPLICIT_DEF)), FR32X:$src)>;
|
2013-10-09 16:16:14 +08:00
|
|
|
def : Pat<(f32 (X86frcp (load addr:$src))),
|
2014-01-13 20:55:03 +08:00
|
|
|
(VRCP14SSrm (f32 (IMPLICIT_DEF)), addr:$src)>,
|
2013-10-09 16:16:14 +08:00
|
|
|
Requires<[OptForSize]>;
|
|
|
|
|
|
|
|
def : Pat<(int_x86_sse_sqrt_ss VR128X:$src),
|
|
|
|
(COPY_TO_REGCLASS (VSQRTSSZr (f32 (IMPLICIT_DEF)),
|
|
|
|
(COPY_TO_REGCLASS VR128X:$src, FR32)),
|
|
|
|
VR128X)>;
|
|
|
|
def : Pat<(int_x86_sse_sqrt_ss sse_load_f32:$src),
|
|
|
|
(VSQRTSSZm_Int (v4f32 (IMPLICIT_DEF)), sse_load_f32:$src)>;
|
|
|
|
|
|
|
|
def : Pat<(int_x86_sse2_sqrt_sd VR128X:$src),
|
|
|
|
(COPY_TO_REGCLASS (VSQRTSDZr (f64 (IMPLICIT_DEF)),
|
|
|
|
(COPY_TO_REGCLASS VR128X:$src, FR64)),
|
|
|
|
VR128X)>;
|
|
|
|
def : Pat<(int_x86_sse2_sqrt_sd sse_load_f64:$src),
|
|
|
|
(VSQRTSDZm_Int (v2f64 (IMPLICIT_DEF)), sse_load_f64:$src)>;
|
|
|
|
}
|
|
|
|
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
multiclass avx512_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr,
|
|
|
|
X86MemOperand x86memop, RegisterClass RC,
|
|
|
|
PatFrag mem_frag32, PatFrag mem_frag64,
|
|
|
|
Intrinsic V4F32Int, Intrinsic V2F64Int,
|
|
|
|
CD8VForm VForm> {
|
|
|
|
let ExeDomain = SSEPackedSingle in {
|
|
|
|
// Intrinsic operation, reg.
|
|
|
|
// Vector intrinsic operation, reg
|
|
|
|
def PSr : AVX512AIi8<opcps, MRMSrcReg,
|
|
|
|
(outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
|
|
|
[(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>;
|
|
|
|
|
|
|
|
// Vector intrinsic operation, mem
|
|
|
|
def PSm : AVX512AIi8<opcps, MRMSrcMem,
|
|
|
|
(outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
|
|
|
[(set RC:$dst,
|
|
|
|
(V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>,
|
|
|
|
EVEX_CD8<32, VForm>;
|
|
|
|
} // ExeDomain = SSEPackedSingle
|
|
|
|
|
|
|
|
let ExeDomain = SSEPackedDouble in {
|
|
|
|
// Vector intrinsic operation, reg
|
|
|
|
def PDr : AVX512AIi8<opcpd, MRMSrcReg,
|
|
|
|
(outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
|
|
|
[(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>;
|
|
|
|
|
|
|
|
// Vector intrinsic operation, mem
|
|
|
|
def PDm : AVX512AIi8<opcpd, MRMSrcMem,
|
|
|
|
(outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
|
|
|
[(set RC:$dst,
|
|
|
|
(V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>,
|
|
|
|
EVEX_CD8<64, VForm>;
|
|
|
|
} // ExeDomain = SSEPackedDouble
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass avx512_fp_binop_rm<bits<8> opcss, bits<8> opcsd,
|
|
|
|
string OpcodeStr,
|
|
|
|
Intrinsic F32Int,
|
|
|
|
Intrinsic F64Int> {
|
|
|
|
let ExeDomain = GenericDomain in {
|
|
|
|
// Operation, reg.
|
|
|
|
let hasSideEffects = 0 in
|
|
|
|
def SSr : AVX512AIi8<opcss, MRMSrcReg,
|
|
|
|
(outs FR32X:$dst), (ins FR32X:$src1, FR32X:$src2, i32i8imm:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
|
|
|
|
[]>;
|
|
|
|
|
|
|
|
// Intrinsic operation, reg.
|
2014-01-03 01:28:14 +08:00
|
|
|
let isCodeGenOnly = 1 in
|
2013-09-17 15:34:34 +08:00
|
|
|
def SSr_Int : AVX512AIi8<opcss, MRMSrcReg,
|
|
|
|
(outs VR128X:$dst), (ins VR128X:$src1, VR128X:$src2, i32i8imm:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
|
|
|
|
[(set VR128X:$dst, (F32Int VR128X:$src1, VR128X:$src2, imm:$src3))]>;
|
|
|
|
|
|
|
|
// Intrinsic operation, mem.
|
|
|
|
def SSm : AVX512AIi8<opcss, MRMSrcMem, (outs VR128X:$dst),
|
|
|
|
(ins VR128X:$src1, ssmem:$src2, i32i8imm:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"ss\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
|
|
|
|
[(set VR128X:$dst, (F32Int VR128X:$src1,
|
|
|
|
sse_load_f32:$src2, imm:$src3))]>,
|
|
|
|
EVEX_CD8<32, CD8VT1>;
|
|
|
|
|
|
|
|
// Operation, reg.
|
|
|
|
let hasSideEffects = 0 in
|
|
|
|
def SDr : AVX512AIi8<opcsd, MRMSrcReg,
|
|
|
|
(outs FR64X:$dst), (ins FR64X:$src1, FR64X:$src2, i32i8imm:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
|
|
|
|
[]>, VEX_W;
|
|
|
|
|
|
|
|
// Intrinsic operation, reg.
|
2014-01-03 01:28:14 +08:00
|
|
|
let isCodeGenOnly = 1 in
|
2013-09-17 15:34:34 +08:00
|
|
|
def SDr_Int : AVX512AIi8<opcsd, MRMSrcReg,
|
|
|
|
(outs VR128X:$dst), (ins VR128X:$src1, VR128X:$src2, i32i8imm:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
|
|
|
|
[(set VR128X:$dst, (F64Int VR128X:$src1, VR128X:$src2, imm:$src3))]>,
|
|
|
|
VEX_W;
|
|
|
|
|
|
|
|
// Intrinsic operation, mem.
|
|
|
|
def SDm : AVX512AIi8<opcsd, MRMSrcMem,
|
|
|
|
(outs VR128X:$dst), (ins VR128X:$src1, sdmem:$src2, i32i8imm:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
|
|
|
"sd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
|
|
|
|
[(set VR128X:$dst,
|
|
|
|
(F64Int VR128X:$src1, sse_load_f64:$src2, imm:$src3))]>,
|
|
|
|
VEX_W, EVEX_CD8<64, CD8VT1>;
|
|
|
|
} // ExeDomain = GenericDomain
|
|
|
|
}
|
|
|
|
|
2014-01-01 23:12:34 +08:00
|
|
|
multiclass avx512_rndscale<bits<8> opc, string OpcodeStr,
|
|
|
|
X86MemOperand x86memop, RegisterClass RC,
|
|
|
|
PatFrag mem_frag, Domain d> {
|
|
|
|
let ExeDomain = d in {
|
|
|
|
// Intrinsic operation, reg.
|
|
|
|
// Vector intrinsic operation, reg
|
|
|
|
def r : AVX512AIi8<opc, MRMSrcReg,
|
|
|
|
(outs RC:$dst), (ins RC:$src1, i32i8imm:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2014-01-01 23:12:34 +08:00
|
|
|
[]>, EVEX;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
2014-01-01 23:12:34 +08:00
|
|
|
// Vector intrinsic operation, mem
|
|
|
|
def m : AVX512AIi8<opc, MRMSrcMem,
|
|
|
|
(outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2014-01-01 23:12:34 +08:00
|
|
|
[]>, EVEX;
|
|
|
|
} // ExeDomain
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
2014-01-01 23:12:34 +08:00
|
|
|
|
|
|
|
defm VRNDSCALEPSZ : avx512_rndscale<0x08, "vrndscaleps", f512mem, VR512,
|
|
|
|
memopv16f32, SSEPackedSingle>, EVEX_V512,
|
|
|
|
EVEX_CD8<32, CD8VF>;
|
|
|
|
|
|
|
|
def : Pat<(v16f32 (int_x86_avx512_mask_rndscale_ps_512 (v16f32 VR512:$src1),
|
2014-05-04 21:35:37 +08:00
|
|
|
imm:$src2, (v16f32 VR512:$src1), (i16 -1),
|
2014-01-01 23:12:34 +08:00
|
|
|
FROUND_CURRENT)),
|
|
|
|
(VRNDSCALEPSZr VR512:$src1, imm:$src2)>;
|
|
|
|
|
|
|
|
|
|
|
|
defm VRNDSCALEPDZ : avx512_rndscale<0x09, "vrndscalepd", f512mem, VR512,
|
|
|
|
memopv8f64, SSEPackedDouble>, EVEX_V512,
|
|
|
|
VEX_W, EVEX_CD8<64, CD8VF>;
|
|
|
|
|
|
|
|
def : Pat<(v8f64 (int_x86_avx512_mask_rndscale_pd_512 (v8f64 VR512:$src1),
|
2014-05-04 21:35:37 +08:00
|
|
|
imm:$src2, (v8f64 VR512:$src1), (i8 -1),
|
2014-01-01 23:12:34 +08:00
|
|
|
FROUND_CURRENT)),
|
|
|
|
(VRNDSCALEPDZr VR512:$src1, imm:$src2)>;
|
|
|
|
|
|
|
|
multiclass avx512_rndscale_scalar<bits<8> opc, string OpcodeStr,
|
|
|
|
Operand x86memop, RegisterClass RC, Domain d> {
|
|
|
|
let ExeDomain = d in {
|
|
|
|
def r : AVX512AIi8<opc, MRMSrcReg,
|
|
|
|
(outs RC:$dst), (ins RC:$src1, RC:$src2, i32i8imm:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2014-01-01 23:12:34 +08:00
|
|
|
[]>, EVEX_4V;
|
|
|
|
|
|
|
|
def m : AVX512AIi8<opc, MRMSrcMem,
|
|
|
|
(outs RC:$dst), (ins RC:$src1, x86memop:$src2, i32i8imm:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
2014-01-01 23:12:34 +08:00
|
|
|
[]>, EVEX_4V;
|
|
|
|
} // ExeDomain
|
|
|
|
}
|
|
|
|
|
|
|
|
defm VRNDSCALESS : avx512_rndscale_scalar<0x0A, "vrndscaless", ssmem, FR32X,
|
|
|
|
SSEPackedSingle>, EVEX_CD8<32, CD8VT1>;
|
|
|
|
|
|
|
|
defm VRNDSCALESD : avx512_rndscale_scalar<0x0B, "vrndscalesd", sdmem, FR64X,
|
|
|
|
SSEPackedDouble>, EVEX_CD8<64, CD8VT1>;
|
|
|
|
|
2013-09-17 15:34:34 +08:00
|
|
|
def : Pat<(ffloor FR32X:$src),
|
|
|
|
(VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0x1))>;
|
|
|
|
def : Pat<(f64 (ffloor FR64X:$src)),
|
|
|
|
(VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0x1))>;
|
|
|
|
def : Pat<(f32 (fnearbyint FR32X:$src)),
|
|
|
|
(VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0xC))>;
|
|
|
|
def : Pat<(f64 (fnearbyint FR64X:$src)),
|
|
|
|
(VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0xC))>;
|
|
|
|
def : Pat<(f32 (fceil FR32X:$src)),
|
|
|
|
(VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0x2))>;
|
|
|
|
def : Pat<(f64 (fceil FR64X:$src)),
|
|
|
|
(VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0x2))>;
|
|
|
|
def : Pat<(f32 (frint FR32X:$src)),
|
|
|
|
(VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0x4))>;
|
|
|
|
def : Pat<(f64 (frint FR64X:$src)),
|
|
|
|
(VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0x4))>;
|
|
|
|
def : Pat<(f32 (ftrunc FR32X:$src)),
|
|
|
|
(VRNDSCALESSr (f32 (IMPLICIT_DEF)), FR32X:$src, (i32 0x3))>;
|
|
|
|
def : Pat<(f64 (ftrunc FR64X:$src)),
|
|
|
|
(VRNDSCALESDr (f64 (IMPLICIT_DEF)), FR64X:$src, (i32 0x3))>;
|
|
|
|
|
|
|
|
def : Pat<(v16f32 (ffloor VR512:$src)),
|
2014-01-01 23:12:34 +08:00
|
|
|
(VRNDSCALEPSZr VR512:$src, (i32 0x1))>;
|
2013-09-17 15:34:34 +08:00
|
|
|
def : Pat<(v16f32 (fnearbyint VR512:$src)),
|
2014-01-01 23:12:34 +08:00
|
|
|
(VRNDSCALEPSZr VR512:$src, (i32 0xC))>;
|
2013-09-17 15:34:34 +08:00
|
|
|
def : Pat<(v16f32 (fceil VR512:$src)),
|
2014-01-01 23:12:34 +08:00
|
|
|
(VRNDSCALEPSZr VR512:$src, (i32 0x2))>;
|
2013-09-17 15:34:34 +08:00
|
|
|
def : Pat<(v16f32 (frint VR512:$src)),
|
2014-01-01 23:12:34 +08:00
|
|
|
(VRNDSCALEPSZr VR512:$src, (i32 0x4))>;
|
2013-09-17 15:34:34 +08:00
|
|
|
def : Pat<(v16f32 (ftrunc VR512:$src)),
|
2014-01-01 23:12:34 +08:00
|
|
|
(VRNDSCALEPSZr VR512:$src, (i32 0x3))>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
def : Pat<(v8f64 (ffloor VR512:$src)),
|
2014-01-01 23:12:34 +08:00
|
|
|
(VRNDSCALEPDZr VR512:$src, (i32 0x1))>;
|
2013-09-17 15:34:34 +08:00
|
|
|
def : Pat<(v8f64 (fnearbyint VR512:$src)),
|
2014-01-01 23:12:34 +08:00
|
|
|
(VRNDSCALEPDZr VR512:$src, (i32 0xC))>;
|
2013-09-17 15:34:34 +08:00
|
|
|
def : Pat<(v8f64 (fceil VR512:$src)),
|
2014-01-01 23:12:34 +08:00
|
|
|
(VRNDSCALEPDZr VR512:$src, (i32 0x2))>;
|
2013-09-17 15:34:34 +08:00
|
|
|
def : Pat<(v8f64 (frint VR512:$src)),
|
2014-01-01 23:12:34 +08:00
|
|
|
(VRNDSCALEPDZr VR512:$src, (i32 0x4))>;
|
2013-09-17 15:34:34 +08:00
|
|
|
def : Pat<(v8f64 (ftrunc VR512:$src)),
|
2014-01-01 23:12:34 +08:00
|
|
|
(VRNDSCALEPDZr VR512:$src, (i32 0x3))>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
//-------------------------------------------------
|
|
|
|
// Integer truncate and extend operations
|
|
|
|
//-------------------------------------------------
|
|
|
|
|
|
|
|
multiclass avx512_trunc_sat<bits<8> opc, string OpcodeStr,
|
|
|
|
RegisterClass dstRC, RegisterClass srcRC,
|
|
|
|
RegisterClass KRC, X86MemOperand x86memop> {
|
|
|
|
def rr : AVX512XS8I<opc, MRMDestReg, (outs dstRC:$dst),
|
|
|
|
(ins srcRC:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[]>, EVEX;
|
|
|
|
|
2014-04-22 19:36:19 +08:00
|
|
|
def rrk : AVX512XS8I<opc, MRMDestReg, (outs dstRC:$dst),
|
|
|
|
(ins KRC:$mask, srcRC:$src),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src, ${dst} {${mask}}|${dst} {${mask}}, $src}"),
|
2014-04-22 19:36:19 +08:00
|
|
|
[]>, EVEX, EVEX_K;
|
|
|
|
|
|
|
|
def rrkz : AVX512XS8I<opc, MRMDestReg, (outs dstRC:$dst),
|
2013-09-17 15:34:34 +08:00
|
|
|
(ins KRC:$mask, srcRC:$src),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[]>, EVEX, EVEX_KZ;
|
|
|
|
|
|
|
|
def mr : AVX512XS8I<opc, MRMDestMem, (outs), (ins x86memop:$dst, srcRC:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[]>, EVEX;
|
2014-04-22 19:36:19 +08:00
|
|
|
|
|
|
|
def mrk : AVX512XS8I<opc, MRMDestMem, (outs),
|
|
|
|
(ins x86memop:$dst, KRC:$mask, srcRC:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst {${mask}}|${dst} {${mask}}, $src}"),
|
2014-04-22 19:36:19 +08:00
|
|
|
[]>, EVEX, EVEX_K;
|
|
|
|
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
defm VPMOVQB : avx512_trunc_sat<0x32, "vpmovqb", VR128X, VR512, VK8WM,
|
|
|
|
i128mem>, EVEX_V512, EVEX_CD8<8, CD8VO>;
|
|
|
|
defm VPMOVSQB : avx512_trunc_sat<0x22, "vpmovsqb", VR128X, VR512, VK8WM,
|
|
|
|
i128mem>, EVEX_V512, EVEX_CD8<8, CD8VO>;
|
|
|
|
defm VPMOVUSQB : avx512_trunc_sat<0x12, "vpmovusqb", VR128X, VR512, VK8WM,
|
|
|
|
i128mem>, EVEX_V512, EVEX_CD8<8, CD8VO>;
|
|
|
|
defm VPMOVQW : avx512_trunc_sat<0x34, "vpmovqw", VR128X, VR512, VK8WM,
|
|
|
|
i128mem>, EVEX_V512, EVEX_CD8<16, CD8VQ>;
|
|
|
|
defm VPMOVSQW : avx512_trunc_sat<0x24, "vpmovsqw", VR128X, VR512, VK8WM,
|
|
|
|
i128mem>, EVEX_V512, EVEX_CD8<16, CD8VQ>;
|
|
|
|
defm VPMOVUSQW : avx512_trunc_sat<0x14, "vpmovusqw", VR128X, VR512, VK8WM,
|
|
|
|
i128mem>, EVEX_V512, EVEX_CD8<16, CD8VQ>;
|
|
|
|
defm VPMOVQD : avx512_trunc_sat<0x35, "vpmovqd", VR256X, VR512, VK8WM,
|
|
|
|
i256mem>, EVEX_V512, EVEX_CD8<32, CD8VH>;
|
|
|
|
defm VPMOVSQD : avx512_trunc_sat<0x25, "vpmovsqd", VR256X, VR512, VK8WM,
|
|
|
|
i256mem>, EVEX_V512, EVEX_CD8<32, CD8VH>;
|
|
|
|
defm VPMOVUSQD : avx512_trunc_sat<0x15, "vpmovusqd", VR256X, VR512, VK8WM,
|
|
|
|
i256mem>, EVEX_V512, EVEX_CD8<32, CD8VH>;
|
|
|
|
defm VPMOVDW : avx512_trunc_sat<0x33, "vpmovdw", VR256X, VR512, VK16WM,
|
|
|
|
i256mem>, EVEX_V512, EVEX_CD8<16, CD8VH>;
|
|
|
|
defm VPMOVSDW : avx512_trunc_sat<0x23, "vpmovsdw", VR256X, VR512, VK16WM,
|
|
|
|
i256mem>, EVEX_V512, EVEX_CD8<16, CD8VH>;
|
|
|
|
defm VPMOVUSDW : avx512_trunc_sat<0x13, "vpmovusdw", VR256X, VR512, VK16WM,
|
|
|
|
i256mem>, EVEX_V512, EVEX_CD8<16, CD8VH>;
|
|
|
|
defm VPMOVDB : avx512_trunc_sat<0x31, "vpmovdb", VR128X, VR512, VK16WM,
|
|
|
|
i128mem>, EVEX_V512, EVEX_CD8<8, CD8VQ>;
|
|
|
|
defm VPMOVSDB : avx512_trunc_sat<0x21, "vpmovsdb", VR128X, VR512, VK16WM,
|
|
|
|
i128mem>, EVEX_V512, EVEX_CD8<8, CD8VQ>;
|
|
|
|
defm VPMOVUSDB : avx512_trunc_sat<0x11, "vpmovusdb", VR128X, VR512, VK16WM,
|
|
|
|
i128mem>, EVEX_V512, EVEX_CD8<8, CD8VQ>;
|
|
|
|
|
|
|
|
def : Pat<(v16i8 (X86vtrunc (v8i64 VR512:$src))), (VPMOVQBrr VR512:$src)>;
|
|
|
|
def : Pat<(v8i16 (X86vtrunc (v8i64 VR512:$src))), (VPMOVQWrr VR512:$src)>;
|
|
|
|
def : Pat<(v16i16 (X86vtrunc (v16i32 VR512:$src))), (VPMOVDWrr VR512:$src)>;
|
|
|
|
def : Pat<(v16i8 (X86vtrunc (v16i32 VR512:$src))), (VPMOVDBrr VR512:$src)>;
|
|
|
|
def : Pat<(v8i32 (X86vtrunc (v8i64 VR512:$src))), (VPMOVQDrr VR512:$src)>;
|
|
|
|
|
|
|
|
def : Pat<(v16i8 (X86vtruncm VK16WM:$mask, (v16i32 VR512:$src))),
|
2014-04-22 19:36:19 +08:00
|
|
|
(VPMOVDBrrkz VK16WM:$mask, VR512:$src)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
def : Pat<(v16i16 (X86vtruncm VK16WM:$mask, (v16i32 VR512:$src))),
|
2014-04-22 19:36:19 +08:00
|
|
|
(VPMOVDWrrkz VK16WM:$mask, VR512:$src)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
def : Pat<(v8i16 (X86vtruncm VK8WM:$mask, (v8i64 VR512:$src))),
|
2014-04-22 19:36:19 +08:00
|
|
|
(VPMOVQWrrkz VK8WM:$mask, VR512:$src)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
def : Pat<(v8i32 (X86vtruncm VK8WM:$mask, (v8i64 VR512:$src))),
|
2014-04-22 19:36:19 +08:00
|
|
|
(VPMOVQDrrkz VK8WM:$mask, VR512:$src)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
|
2014-04-22 19:36:19 +08:00
|
|
|
multiclass avx512_extend<bits<8> opc, string OpcodeStr, RegisterClass KRC,
|
|
|
|
RegisterClass DstRC, RegisterClass SrcRC, SDNode OpNode,
|
|
|
|
PatFrag mem_frag, X86MemOperand x86memop,
|
|
|
|
ValueType OpVT, ValueType InVT> {
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
def rr : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst),
|
|
|
|
(ins SrcRC:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set DstRC:$dst, (OpVT (OpNode (InVT SrcRC:$src))))]>, EVEX;
|
2014-04-22 19:36:19 +08:00
|
|
|
|
|
|
|
def rrk : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst),
|
|
|
|
(ins KRC:$mask, SrcRC:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst {${mask}} |$dst {${mask}}, $src}"),
|
2014-04-22 19:36:19 +08:00
|
|
|
[]>, EVEX, EVEX_K;
|
|
|
|
|
|
|
|
def rrkz : AVX5128I<opc, MRMSrcReg, (outs DstRC:$dst),
|
|
|
|
(ins KRC:$mask, SrcRC:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
|
2014-04-22 19:36:19 +08:00
|
|
|
[]>, EVEX, EVEX_KZ;
|
|
|
|
|
|
|
|
let mayLoad = 1 in {
|
|
|
|
def rm : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst),
|
2013-09-17 15:34:34 +08:00
|
|
|
(ins x86memop:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr,"\t{$src, $dst|$dst, $src}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set DstRC:$dst,
|
|
|
|
(OpVT (OpNode (InVT (bitconvert (mem_frag addr:$src))))))]>,
|
|
|
|
EVEX;
|
2014-04-22 19:36:19 +08:00
|
|
|
|
|
|
|
def rmk : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst),
|
|
|
|
(ins KRC:$mask, x86memop:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr,"\t{$src, $dst {${mask}} |$dst {${mask}}, $src}"),
|
2014-04-22 19:36:19 +08:00
|
|
|
[]>,
|
|
|
|
EVEX, EVEX_K;
|
|
|
|
|
|
|
|
def rmkz : AVX5128I<opc, MRMSrcMem, (outs DstRC:$dst),
|
|
|
|
(ins KRC:$mask, x86memop:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr,"\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
|
2014-04-22 19:36:19 +08:00
|
|
|
[]>,
|
|
|
|
EVEX, EVEX_KZ;
|
|
|
|
}
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
2014-04-22 19:36:19 +08:00
|
|
|
defm VPMOVZXBDZ: avx512_extend<0x31, "vpmovzxbd", VK16WM, VR512, VR128X, X86vzext,
|
2013-09-17 15:34:34 +08:00
|
|
|
memopv2i64, i128mem, v16i32, v16i8>, EVEX_V512,
|
|
|
|
EVEX_CD8<8, CD8VQ>;
|
2014-04-22 19:36:19 +08:00
|
|
|
defm VPMOVZXBQZ: avx512_extend<0x32, "vpmovzxbq", VK8WM, VR512, VR128X, X86vzext,
|
2013-09-17 15:34:34 +08:00
|
|
|
memopv2i64, i128mem, v8i64, v16i8>, EVEX_V512,
|
|
|
|
EVEX_CD8<8, CD8VO>;
|
2014-04-22 19:36:19 +08:00
|
|
|
defm VPMOVZXWDZ: avx512_extend<0x33, "vpmovzxwd", VK16WM, VR512, VR256X, X86vzext,
|
2013-09-17 15:34:34 +08:00
|
|
|
memopv4i64, i256mem, v16i32, v16i16>, EVEX_V512,
|
|
|
|
EVEX_CD8<16, CD8VH>;
|
2014-04-22 19:36:19 +08:00
|
|
|
defm VPMOVZXWQZ: avx512_extend<0x34, "vpmovzxwq", VK8WM, VR512, VR128X, X86vzext,
|
2013-09-17 15:34:34 +08:00
|
|
|
memopv2i64, i128mem, v8i64, v8i16>, EVEX_V512,
|
|
|
|
EVEX_CD8<16, CD8VQ>;
|
2014-04-22 19:36:19 +08:00
|
|
|
defm VPMOVZXDQZ: avx512_extend<0x35, "vpmovzxdq", VK8WM, VR512, VR256X, X86vzext,
|
2013-09-17 15:34:34 +08:00
|
|
|
memopv4i64, i256mem, v8i64, v8i32>, EVEX_V512,
|
|
|
|
EVEX_CD8<32, CD8VH>;
|
2014-04-22 19:36:19 +08:00
|
|
|
|
|
|
|
defm VPMOVSXBDZ: avx512_extend<0x21, "vpmovsxbd", VK16WM, VR512, VR128X, X86vsext,
|
2013-09-17 15:34:34 +08:00
|
|
|
memopv2i64, i128mem, v16i32, v16i8>, EVEX_V512,
|
|
|
|
EVEX_CD8<8, CD8VQ>;
|
2014-04-22 19:36:19 +08:00
|
|
|
defm VPMOVSXBQZ: avx512_extend<0x22, "vpmovsxbq", VK8WM, VR512, VR128X, X86vsext,
|
2013-09-17 15:34:34 +08:00
|
|
|
memopv2i64, i128mem, v8i64, v16i8>, EVEX_V512,
|
|
|
|
EVEX_CD8<8, CD8VO>;
|
2014-04-22 19:36:19 +08:00
|
|
|
defm VPMOVSXWDZ: avx512_extend<0x23, "vpmovsxwd", VK16WM, VR512, VR256X, X86vsext,
|
2013-09-17 15:34:34 +08:00
|
|
|
memopv4i64, i256mem, v16i32, v16i16>, EVEX_V512,
|
|
|
|
EVEX_CD8<16, CD8VH>;
|
2014-04-22 19:36:19 +08:00
|
|
|
defm VPMOVSXWQZ: avx512_extend<0x24, "vpmovsxwq", VK8WM, VR512, VR128X, X86vsext,
|
2013-09-17 15:34:34 +08:00
|
|
|
memopv2i64, i128mem, v8i64, v8i16>, EVEX_V512,
|
|
|
|
EVEX_CD8<16, CD8VQ>;
|
2014-04-22 19:36:19 +08:00
|
|
|
defm VPMOVSXDQZ: avx512_extend<0x25, "vpmovsxdq", VK8WM, VR512, VR256X, X86vsext,
|
2013-09-17 15:34:34 +08:00
|
|
|
memopv4i64, i256mem, v8i64, v8i32>, EVEX_V512,
|
|
|
|
EVEX_CD8<32, CD8VH>;
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// GATHER - SCATTER Operations
|
|
|
|
|
|
|
|
multiclass avx512_gather<bits<8> opc, string OpcodeStr, RegisterClass KRC,
|
|
|
|
RegisterClass RC, X86MemOperand memop> {
|
|
|
|
let mayLoad = 1,
|
|
|
|
Constraints = "@earlyclobber $dst, $src1 = $dst, $mask = $mask_wb" in
|
|
|
|
def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst, KRC:$mask_wb),
|
|
|
|
(ins RC:$src1, KRC:$mask, memop:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[]>, EVEX, EVEX_K;
|
|
|
|
}
|
2014-03-26 21:50:50 +08:00
|
|
|
|
|
|
|
let ExeDomain = SSEPackedDouble in {
|
2013-09-17 15:34:34 +08:00
|
|
|
defm VGATHERDPDZ : avx512_gather<0x92, "vgatherdpd", VK8WM, VR512, vy64xmem>,
|
|
|
|
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
|
|
|
|
defm VGATHERQPDZ : avx512_gather<0x93, "vgatherqpd", VK8WM, VR512, vz64mem>,
|
|
|
|
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
|
2014-03-26 21:50:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
let ExeDomain = SSEPackedSingle in {
|
|
|
|
defm VGATHERDPSZ : avx512_gather<0x92, "vgatherdps", VK16WM, VR512, vz32mem>,
|
|
|
|
EVEX_V512, EVEX_CD8<32, CD8VT1>;
|
2013-09-17 15:34:34 +08:00
|
|
|
defm VGATHERQPSZ : avx512_gather<0x93, "vgatherqps", VK8WM, VR256X, vz64mem>,
|
|
|
|
EVEX_V512, EVEX_CD8<32, CD8VT1>;
|
2014-03-26 21:50:50 +08:00
|
|
|
}
|
2013-09-17 15:34:34 +08:00
|
|
|
|
|
|
|
defm VPGATHERDQZ : avx512_gather<0x90, "vpgatherdq", VK8WM, VR512, vy64xmem>,
|
|
|
|
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
|
|
|
|
defm VPGATHERDDZ : avx512_gather<0x90, "vpgatherdd", VK16WM, VR512, vz32mem>,
|
|
|
|
EVEX_V512, EVEX_CD8<32, CD8VT1>;
|
|
|
|
|
|
|
|
defm VPGATHERQQZ : avx512_gather<0x91, "vpgatherqq", VK8WM, VR512, vz64mem>,
|
|
|
|
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
|
|
|
|
defm VPGATHERQDZ : avx512_gather<0x91, "vpgatherqd", VK8WM, VR256X, vz64mem>,
|
|
|
|
EVEX_V512, EVEX_CD8<32, CD8VT1>;
|
|
|
|
|
|
|
|
multiclass avx512_scatter<bits<8> opc, string OpcodeStr, RegisterClass KRC,
|
|
|
|
RegisterClass RC, X86MemOperand memop> {
|
|
|
|
let mayStore = 1, Constraints = "$mask = $mask_wb" in
|
|
|
|
def mr : AVX5128I<opc, MRMDestMem, (outs KRC:$mask_wb),
|
|
|
|
(ins memop:$dst, KRC:$mask, RC:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[]>, EVEX, EVEX_K;
|
|
|
|
}
|
|
|
|
|
2014-03-26 21:50:50 +08:00
|
|
|
let ExeDomain = SSEPackedDouble in {
|
2013-09-17 15:34:34 +08:00
|
|
|
defm VSCATTERDPDZ : avx512_scatter<0xA2, "vscatterdpd", VK8WM, VR512, vy64xmem>,
|
|
|
|
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
|
|
|
|
defm VSCATTERQPDZ : avx512_scatter<0xA3, "vscatterqpd", VK8WM, VR512, vz64mem>,
|
|
|
|
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
|
2014-03-26 21:50:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
let ExeDomain = SSEPackedSingle in {
|
|
|
|
defm VSCATTERDPSZ : avx512_scatter<0xA2, "vscatterdps", VK16WM, VR512, vz32mem>,
|
|
|
|
EVEX_V512, EVEX_CD8<32, CD8VT1>;
|
2013-09-17 15:34:34 +08:00
|
|
|
defm VSCATTERQPSZ : avx512_scatter<0xA3, "vscatterqps", VK8WM, VR256X, vz64mem>,
|
|
|
|
EVEX_V512, EVEX_CD8<32, CD8VT1>;
|
2014-03-26 21:50:50 +08:00
|
|
|
}
|
|
|
|
|
2013-09-17 15:34:34 +08:00
|
|
|
defm VPSCATTERDQZ : avx512_scatter<0xA0, "vpscatterdq", VK8WM, VR512, vy64xmem>,
|
|
|
|
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
|
|
|
|
defm VPSCATTERDDZ : avx512_scatter<0xA0, "vpscatterdd", VK16WM, VR512, vz32mem>,
|
|
|
|
EVEX_V512, EVEX_CD8<32, CD8VT1>;
|
|
|
|
|
|
|
|
defm VPSCATTERQQZ : avx512_scatter<0xA1, "vpscatterqq", VK8WM, VR512, vz64mem>,
|
|
|
|
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
|
|
|
|
defm VPSCATTERQDZ : avx512_scatter<0xA1, "vpscatterqd", VK8WM, VR256X, vz64mem>,
|
|
|
|
EVEX_V512, EVEX_CD8<32, CD8VT1>;
|
|
|
|
|
2014-05-12 15:18:51 +08:00
|
|
|
// prefetch
|
|
|
|
multiclass avx512_gather_scatter_prefetch<bits<8> opc, Format F, string OpcodeStr,
|
|
|
|
RegisterClass KRC, X86MemOperand memop> {
|
|
|
|
let Predicates = [HasPFI], hasSideEffects = 1 in
|
|
|
|
def m : AVX5128I<opc, F, (outs), (ins KRC:$mask, memop:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src {${mask}}|{${mask}}, $src}"),
|
2014-05-12 15:18:51 +08:00
|
|
|
[]>, EVEX, EVEX_K;
|
|
|
|
}
|
|
|
|
|
|
|
|
defm VGATHERPF0DPS: avx512_gather_scatter_prefetch<0xC6, MRM1m, "vgatherpf0dps",
|
|
|
|
VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
|
|
|
|
|
|
|
|
defm VGATHERPF0QPS: avx512_gather_scatter_prefetch<0xC7, MRM1m, "vgatherpf0qps",
|
|
|
|
VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
|
|
|
|
|
|
|
|
defm VGATHERPF0DPD: avx512_gather_scatter_prefetch<0xC6, MRM1m, "vgatherpf0dpd",
|
|
|
|
VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
|
|
|
|
|
|
|
|
defm VGATHERPF0QPD: avx512_gather_scatter_prefetch<0xC7, MRM1m, "vgatherpf0qpd",
|
|
|
|
VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
|
|
|
|
|
|
|
|
defm VGATHERPF1DPS: avx512_gather_scatter_prefetch<0xC6, MRM2m, "vgatherpf1dps",
|
|
|
|
VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
|
|
|
|
|
|
|
|
defm VGATHERPF1QPS: avx512_gather_scatter_prefetch<0xC7, MRM2m, "vgatherpf1qps",
|
|
|
|
VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
|
|
|
|
|
|
|
|
defm VGATHERPF1DPD: avx512_gather_scatter_prefetch<0xC6, MRM2m, "vgatherpf1dpd",
|
|
|
|
VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
|
|
|
|
|
|
|
|
defm VGATHERPF1QPD: avx512_gather_scatter_prefetch<0xC7, MRM2m, "vgatherpf1qpd",
|
|
|
|
VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
|
|
|
|
|
|
|
|
defm VSCATTERPF0DPS: avx512_gather_scatter_prefetch<0xC6, MRM5m, "vscatterpf0dps",
|
|
|
|
VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
|
|
|
|
|
|
|
|
defm VSCATTERPF0QPS: avx512_gather_scatter_prefetch<0xC7, MRM5m, "vscatterpf0qps",
|
|
|
|
VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
|
|
|
|
|
|
|
|
defm VSCATTERPF0DPD: avx512_gather_scatter_prefetch<0xC6, MRM5m, "vscatterpf0dpd",
|
|
|
|
VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
|
|
|
|
|
|
|
|
defm VSCATTERPF0QPD: avx512_gather_scatter_prefetch<0xC7, MRM5m, "vscatterpf0qpd",
|
|
|
|
VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
|
|
|
|
|
|
|
|
defm VSCATTERPF1DPS: avx512_gather_scatter_prefetch<0xC6, MRM6m, "vscatterpf1dps",
|
|
|
|
VK16WM, vz32mem>, EVEX_V512, EVEX_CD8<32, CD8VT1>;
|
|
|
|
|
|
|
|
defm VSCATTERPF1QPS: avx512_gather_scatter_prefetch<0xC7, MRM6m, "vscatterpf1qps",
|
|
|
|
VK8WM, vz64mem>, EVEX_V512, EVEX_CD8<64, CD8VT1>;
|
|
|
|
|
|
|
|
defm VSCATTERPF1DPD: avx512_gather_scatter_prefetch<0xC6, MRM6m, "vscatterpf1dpd",
|
|
|
|
VK8WM, vy32mem>, EVEX_V512, VEX_W, EVEX_CD8<32, CD8VT1>;
|
|
|
|
|
|
|
|
defm VSCATTERPF1QPD: avx512_gather_scatter_prefetch<0xC7, MRM6m, "vscatterpf1qpd",
|
|
|
|
VK8WM, vz64mem>, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VT1>;
|
2013-09-17 15:34:34 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// VSHUFPS - VSHUFPD Operations
|
|
|
|
|
|
|
|
multiclass avx512_shufp<RegisterClass RC, X86MemOperand x86memop,
|
|
|
|
ValueType vt, string OpcodeStr, PatFrag mem_frag,
|
|
|
|
Domain d> {
|
|
|
|
def rmi : AVX512PIi8<0xC6, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, x86memop:$src2, i8imm:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set RC:$dst, (vt (X86Shufp RC:$src1, (mem_frag addr:$src2),
|
|
|
|
(i8 imm:$src3))))], d, IIC_SSE_SHUFP>,
|
2013-10-02 14:39:07 +08:00
|
|
|
EVEX_4V, Sched<[WriteShuffleLd, ReadAfterLd]>;
|
2013-09-17 15:34:34 +08:00
|
|
|
def rri : AVX512PIi8<0xC6, MRMSrcReg, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, RC:$src2, i8imm:$src3),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[(set RC:$dst, (vt (X86Shufp RC:$src1, RC:$src2,
|
|
|
|
(i8 imm:$src3))))], d, IIC_SSE_SHUFP>,
|
2013-10-02 14:39:07 +08:00
|
|
|
EVEX_4V, Sched<[WriteShuffle]>;
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
defm VSHUFPSZ : avx512_shufp<VR512, f512mem, v16f32, "vshufps", memopv16f32,
|
2014-02-18 08:21:49 +08:00
|
|
|
SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
|
2013-09-17 15:34:34 +08:00
|
|
|
defm VSHUFPDZ : avx512_shufp<VR512, f512mem, v8f64, "vshufpd", memopv8f64,
|
2014-01-14 15:41:20 +08:00
|
|
|
SSEPackedDouble>, PD, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
2013-10-06 14:11:18 +08:00
|
|
|
def : Pat<(v16i32 (X86Shufp VR512:$src1, VR512:$src2, (i8 imm:$imm))),
|
|
|
|
(VSHUFPSZrri VR512:$src1, VR512:$src2, imm:$imm)>;
|
|
|
|
def : Pat<(v16i32 (X86Shufp VR512:$src1,
|
|
|
|
(memopv16i32 addr:$src2), (i8 imm:$imm))),
|
|
|
|
(VSHUFPSZrmi VR512:$src1, addr:$src2, imm:$imm)>;
|
|
|
|
|
|
|
|
def : Pat<(v8i64 (X86Shufp VR512:$src1, VR512:$src2, (i8 imm:$imm))),
|
|
|
|
(VSHUFPDZrri VR512:$src1, VR512:$src2, imm:$imm)>;
|
|
|
|
def : Pat<(v8i64 (X86Shufp VR512:$src1,
|
|
|
|
(memopv8i64 addr:$src2), (i8 imm:$imm))),
|
|
|
|
(VSHUFPDZrmi VR512:$src1, addr:$src2, imm:$imm)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
2014-08-22 03:50:07 +08:00
|
|
|
multiclass avx512_valign<X86VectorVTInfo _> {
|
2014-10-09 07:25:39 +08:00
|
|
|
defm rri : AVX512_maskable<0x03, MRMSrcReg, _, (outs _.RC:$dst),
|
2014-08-22 03:50:07 +08:00
|
|
|
(ins _.RC:$src1, _.RC:$src2, i8imm:$src3),
|
|
|
|
"valign"##_.Suffix,
|
2014-08-08 01:53:55 +08:00
|
|
|
"$src3, $src2, $src1", "$src1, $src2, $src3",
|
2014-08-22 03:50:07 +08:00
|
|
|
(_.VT (X86VAlign _.RC:$src2, _.RC:$src1,
|
2014-09-30 06:54:41 +08:00
|
|
|
(i8 imm:$src3)))>,
|
2014-08-08 01:53:55 +08:00
|
|
|
AVX512AIi8Base, EVEX_4V;
|
2014-08-06 01:23:04 +08:00
|
|
|
|
2014-08-06 01:22:50 +08:00
|
|
|
// Also match valign of packed floats.
|
2014-08-22 03:50:07 +08:00
|
|
|
def : Pat<(_.FloatVT (X86VAlign _.RC:$src1, _.RC:$src2, (i8 imm:$imm))),
|
|
|
|
(!cast<Instruction>(NAME##rri) _.RC:$src2, _.RC:$src1, imm:$imm)>;
|
2014-08-06 01:22:50 +08:00
|
|
|
|
2013-11-14 19:29:27 +08:00
|
|
|
let mayLoad = 1 in
|
2014-08-22 03:50:07 +08:00
|
|
|
def rmi : AVX512AIi8<0x03, MRMSrcMem, (outs _.RC:$dst),
|
|
|
|
(ins _.RC:$src1, _.MemOp:$src2, i8imm:$src3),
|
|
|
|
!strconcat("valign"##_.Suffix,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src3, $src2, $src1, $dst|"
|
2014-08-06 01:22:47 +08:00
|
|
|
"$dst, $src1, $src2, $src3}"),
|
2013-09-17 15:34:34 +08:00
|
|
|
[]>, EVEX_4V;
|
|
|
|
}
|
2014-08-22 03:50:07 +08:00
|
|
|
defm VALIGND : avx512_valign<v16i32_info>, EVEX_V512, EVEX_CD8<32, CD8VF>;
|
|
|
|
defm VALIGNQ : avx512_valign<v8i64_info>, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
2014-03-27 17:45:08 +08:00
|
|
|
// Helper fragments to match sext vXi1 to vXiY.
|
|
|
|
def v16i1sextv16i32 : PatLeaf<(v16i32 (X86vsrai VR512:$src, (i8 31)))>;
|
|
|
|
def v8i1sextv8i64 : PatLeaf<(v8i64 (X86vsrai VR512:$src, (i8 63)))>;
|
|
|
|
|
|
|
|
multiclass avx512_vpabs<bits<8> opc, string OpcodeStr, ValueType OpVT,
|
|
|
|
RegisterClass KRC, RegisterClass RC,
|
|
|
|
X86MemOperand x86memop, X86MemOperand x86scalar_mop,
|
|
|
|
string BrdcstStr> {
|
|
|
|
def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
2014-03-27 17:45:08 +08:00
|
|
|
[]>, EVEX;
|
|
|
|
def rrk : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
|
2014-03-27 17:45:08 +08:00
|
|
|
[]>, EVEX, EVEX_K;
|
|
|
|
def rrkz : AVX5128I<opc, MRMSrcReg, (outs RC:$dst), (ins KRC:$mask, RC:$src),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
|
2014-03-27 17:45:08 +08:00
|
|
|
[]>, EVEX, EVEX_KZ;
|
|
|
|
let mayLoad = 1 in {
|
|
|
|
def rm : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
|
|
|
|
(ins x86memop:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"),
|
2014-03-27 17:45:08 +08:00
|
|
|
[]>, EVEX;
|
|
|
|
def rmk : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
|
|
|
|
(ins KRC:$mask, x86memop:$src),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src, $dst {${mask}}|$dst {${mask}}, $src}"),
|
2014-03-27 17:45:08 +08:00
|
|
|
[]>, EVEX, EVEX_K;
|
|
|
|
def rmkz : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
|
|
|
|
(ins KRC:$mask, x86memop:$src),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src, $dst {${mask}} {z}|$dst {${mask}} {z}, $src}"),
|
2014-03-27 17:45:08 +08:00
|
|
|
[]>, EVEX, EVEX_KZ;
|
|
|
|
def rmb : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
|
|
|
|
(ins x86scalar_mop:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{${src}", BrdcstStr,
|
2014-03-27 17:45:08 +08:00
|
|
|
", $dst|$dst, ${src}", BrdcstStr, "}"),
|
|
|
|
[]>, EVEX, EVEX_B;
|
|
|
|
def rmbk : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
|
|
|
|
(ins KRC:$mask, x86scalar_mop:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{${src}", BrdcstStr,
|
2014-03-27 17:45:08 +08:00
|
|
|
", $dst {${mask}}|$dst {${mask}}, ${src}", BrdcstStr, "}"),
|
|
|
|
[]>, EVEX, EVEX_B, EVEX_K;
|
|
|
|
def rmbkz : AVX5128I<opc, MRMSrcMem, (outs VR512:$dst),
|
|
|
|
(ins KRC:$mask, x86scalar_mop:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{${src}", BrdcstStr,
|
2014-03-27 17:45:08 +08:00
|
|
|
", $dst {${mask}} {z}|$dst {${mask}} {z}, ${src}",
|
|
|
|
BrdcstStr, "}"),
|
|
|
|
[]>, EVEX, EVEX_B, EVEX_KZ;
|
|
|
|
}
|
2013-09-17 15:34:34 +08:00
|
|
|
}
|
|
|
|
|
2014-03-27 17:45:08 +08:00
|
|
|
defm VPABSDZ : avx512_vpabs<0x1E, "vpabsd", v16i32, VK16WM, VR512,
|
|
|
|
i512mem, i32mem, "{1to16}">, EVEX_V512,
|
|
|
|
EVEX_CD8<32, CD8VF>;
|
|
|
|
defm VPABSQZ : avx512_vpabs<0x1F, "vpabsq", v8i64, VK8WM, VR512,
|
|
|
|
i512mem, i64mem, "{1to8}">, EVEX_V512, VEX_W,
|
|
|
|
EVEX_CD8<64, CD8VF>;
|
|
|
|
|
|
|
|
def : Pat<(xor
|
|
|
|
(bc_v16i32 (v16i1sextv16i32)),
|
|
|
|
(bc_v16i32 (add (v16i32 VR512:$src), (v16i1sextv16i32)))),
|
|
|
|
(VPABSDZrr VR512:$src)>;
|
|
|
|
def : Pat<(xor
|
|
|
|
(bc_v8i64 (v8i1sextv8i64)),
|
|
|
|
(bc_v8i64 (add (v8i64 VR512:$src), (v8i1sextv8i64)))),
|
|
|
|
(VPABSQZrr VR512:$src)>;
|
2013-09-17 15:34:34 +08:00
|
|
|
|
2014-01-08 18:54:22 +08:00
|
|
|
def : Pat<(v16i32 (int_x86_avx512_mask_pabs_d_512 (v16i32 VR512:$src),
|
|
|
|
(v16i32 immAllZerosV), (i16 -1))),
|
2014-03-27 17:45:08 +08:00
|
|
|
(VPABSDZrr VR512:$src)>;
|
2014-01-08 18:54:22 +08:00
|
|
|
def : Pat<(v8i64 (int_x86_avx512_mask_pabs_q_512 (v8i64 VR512:$src),
|
|
|
|
(bc_v8i64 (v16i32 immAllZerosV)), (i8 -1))),
|
2014-03-27 17:45:08 +08:00
|
|
|
(VPABSQZrr VR512:$src)>;
|
2014-01-08 18:54:22 +08:00
|
|
|
|
2013-11-03 21:46:31 +08:00
|
|
|
multiclass avx512_conflict<bits<8> opc, string OpcodeStr,
|
2013-12-10 19:58:35 +08:00
|
|
|
RegisterClass RC, RegisterClass KRC,
|
|
|
|
X86MemOperand x86memop,
|
|
|
|
X86MemOperand x86scalar_mop, string BrdcstStr> {
|
2013-11-03 21:46:31 +08:00
|
|
|
def rr : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
|
|
|
|
(ins RC:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src, ${dst} |${dst}, $src}"),
|
2013-12-10 19:58:35 +08:00
|
|
|
[]>, EVEX;
|
2013-11-03 21:46:31 +08:00
|
|
|
def rm : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins x86memop:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{$src, ${dst}|${dst}, $src}"),
|
2013-12-10 19:58:35 +08:00
|
|
|
[]>, EVEX;
|
2013-11-03 21:46:31 +08:00
|
|
|
def rmb : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins x86scalar_mop:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{${src}", BrdcstStr,
|
2013-11-03 21:46:31 +08:00
|
|
|
", ${dst}|${dst}, ${src}", BrdcstStr, "}"),
|
|
|
|
[]>, EVEX, EVEX_B;
|
|
|
|
def rrkz : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
|
|
|
|
(ins KRC:$mask, RC:$src),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
|
2013-12-10 19:58:35 +08:00
|
|
|
[]>, EVEX, EVEX_KZ;
|
2013-11-03 21:46:31 +08:00
|
|
|
def rmkz : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins KRC:$mask, x86memop:$src),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src, ${dst} {${mask}} {z}|${dst} {${mask}} {z}, $src}"),
|
2013-12-10 19:58:35 +08:00
|
|
|
[]>, EVEX, EVEX_KZ;
|
2013-11-03 21:46:31 +08:00
|
|
|
def rmbkz : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins KRC:$mask, x86scalar_mop:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{${src}", BrdcstStr,
|
2013-11-03 21:46:31 +08:00
|
|
|
", ${dst} {${mask}} {z}|${dst} {${mask}} {z}, ${src}",
|
|
|
|
BrdcstStr, "}"),
|
|
|
|
[]>, EVEX, EVEX_KZ, EVEX_B;
|
|
|
|
|
|
|
|
let Constraints = "$src1 = $dst" in {
|
|
|
|
def rrk : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, KRC:$mask, RC:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
|
2013-12-10 19:58:35 +08:00
|
|
|
[]>, EVEX, EVEX_K;
|
2013-11-03 21:46:31 +08:00
|
|
|
def rmk : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, KRC:$mask, x86memop:$src2),
|
|
|
|
!strconcat(OpcodeStr,
|
2014-11-26 04:11:23 +08:00
|
|
|
"\t{$src2, ${dst} {${mask}}|${dst} {${mask}}, $src2}"),
|
2013-12-10 19:58:35 +08:00
|
|
|
[]>, EVEX, EVEX_K;
|
2013-11-03 21:46:31 +08:00
|
|
|
def rmbk : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
|
|
|
|
(ins RC:$src1, KRC:$mask, x86scalar_mop:$src2),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr, "\t{${src2}", BrdcstStr,
|
2013-11-03 21:46:31 +08:00
|
|
|
", ${dst} {${mask}}|${dst} {${mask}}, ${src2}", BrdcstStr, "}"),
|
|
|
|
[]>, EVEX, EVEX_K, EVEX_B;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let Predicates = [HasCDI] in {
|
|
|
|
defm VPCONFLICTD : avx512_conflict<0xC4, "vpconflictd", VR512, VK16WM,
|
2013-12-10 19:58:35 +08:00
|
|
|
i512mem, i32mem, "{1to16}">,
|
2013-11-03 21:46:31 +08:00
|
|
|
EVEX_V512, EVEX_CD8<32, CD8VF>;
|
|
|
|
|
2013-12-10 19:58:35 +08:00
|
|
|
|
2013-11-03 21:46:31 +08:00
|
|
|
defm VPCONFLICTQ : avx512_conflict<0xC4, "vpconflictq", VR512, VK8WM,
|
2013-12-10 19:58:35 +08:00
|
|
|
i512mem, i64mem, "{1to8}">,
|
2013-11-03 21:46:31 +08:00
|
|
|
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
|
2013-12-10 19:58:35 +08:00
|
|
|
|
2013-11-03 21:46:31 +08:00
|
|
|
}
|
2013-12-10 19:58:35 +08:00
|
|
|
|
|
|
|
def : Pat<(int_x86_avx512_mask_conflict_d_512 VR512:$src2, VR512:$src1,
|
|
|
|
GR16:$mask),
|
|
|
|
(VPCONFLICTDrrk VR512:$src1,
|
|
|
|
(v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), VR512:$src2)>;
|
|
|
|
|
|
|
|
def : Pat<(int_x86_avx512_mask_conflict_q_512 VR512:$src2, VR512:$src1,
|
|
|
|
GR8:$mask),
|
|
|
|
(VPCONFLICTQrrk VR512:$src1,
|
|
|
|
(v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), VR512:$src2)>;
|
2014-04-09 20:37:50 +08:00
|
|
|
|
2014-06-11 20:54:45 +08:00
|
|
|
let Predicates = [HasCDI] in {
|
|
|
|
defm VPLZCNTD : avx512_conflict<0x44, "vplzcntd", VR512, VK16WM,
|
|
|
|
i512mem, i32mem, "{1to16}">,
|
|
|
|
EVEX_V512, EVEX_CD8<32, CD8VF>;
|
|
|
|
|
|
|
|
|
|
|
|
defm VPLZCNTQ : avx512_conflict<0x44, "vplzcntq", VR512, VK8WM,
|
|
|
|
i512mem, i64mem, "{1to8}">,
|
|
|
|
EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
def : Pat<(int_x86_avx512_mask_lzcnt_d_512 VR512:$src2, VR512:$src1,
|
|
|
|
GR16:$mask),
|
|
|
|
(VPLZCNTDrrk VR512:$src1,
|
|
|
|
(v16i1 (COPY_TO_REGCLASS GR16:$mask, VK16WM)), VR512:$src2)>;
|
|
|
|
|
|
|
|
def : Pat<(int_x86_avx512_mask_lzcnt_q_512 VR512:$src2, VR512:$src1,
|
|
|
|
GR8:$mask),
|
|
|
|
(VPLZCNTQrrk VR512:$src1,
|
|
|
|
(v8i1 (COPY_TO_REGCLASS GR8:$mask, VK8WM)), VR512:$src2)>;
|
|
|
|
|
2014-06-16 22:12:28 +08:00
|
|
|
def : Pat<(v16i32 (ctlz (memopv16i32 addr:$src))),
|
|
|
|
(VPLZCNTDrm addr:$src)>;
|
|
|
|
def : Pat<(v16i32 (ctlz (v16i32 VR512:$src))),
|
|
|
|
(VPLZCNTDrr VR512:$src)>;
|
|
|
|
def : Pat<(v8i64 (ctlz (memopv8i64 addr:$src))),
|
|
|
|
(VPLZCNTQrm addr:$src)>;
|
|
|
|
def : Pat<(v8i64 (ctlz (v8i64 VR512:$src))),
|
|
|
|
(VPLZCNTQrr VR512:$src)>;
|
|
|
|
|
2014-04-09 20:37:50 +08:00
|
|
|
def : Pat<(store (i1 -1), addr:$dst), (MOV8mi addr:$dst, (i8 1))>;
|
|
|
|
def : Pat<(store (i1 1), addr:$dst), (MOV8mi addr:$dst, (i8 1))>;
|
|
|
|
def : Pat<(store (i1 0), addr:$dst), (MOV8mi addr:$dst, (i8 0))>;
|
2014-04-22 22:13:10 +08:00
|
|
|
|
|
|
|
def : Pat<(store VK1:$src, addr:$dst),
|
|
|
|
(KMOVWmk addr:$dst, (COPY_TO_REGCLASS VK1:$src, VK16))>;
|
|
|
|
|
|
|
|
def truncstorei1 : PatFrag<(ops node:$val, node:$ptr),
|
|
|
|
(truncstore node:$val, node:$ptr), [{
|
|
|
|
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1;
|
|
|
|
}]>;
|
|
|
|
|
|
|
|
def : Pat<(truncstorei1 GR8:$src, addr:$dst),
|
|
|
|
(MOV8mr addr:$dst, GR8:$src)>;
|
|
|
|
|
2014-10-05 22:11:08 +08:00
|
|
|
multiclass cvt_by_vec_width<bits<8> opc, X86VectorVTInfo Vec, string OpcodeStr > {
|
|
|
|
def rr : AVX512XS8I<opc, MRMDestReg, (outs Vec.RC:$dst), (ins Vec.KRC:$src),
|
2014-11-26 04:11:23 +08:00
|
|
|
!strconcat(OpcodeStr##Vec.Suffix, "\t{$src, $dst|$dst, $src}"),
|
2014-10-05 22:11:08 +08:00
|
|
|
[(set Vec.RC:$dst, (Vec.VT (X86vsext Vec.KRC:$src)))]>, EVEX;
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass cvt_mask_by_elt_width<bits<8> opc, AVX512VLVectorVTInfo VTInfo,
|
|
|
|
string OpcodeStr, Predicate prd> {
|
|
|
|
let Predicates = [prd] in
|
|
|
|
defm Z : cvt_by_vec_width<opc, VTInfo.info512, OpcodeStr>, EVEX_V512;
|
|
|
|
|
|
|
|
let Predicates = [prd, HasVLX] in {
|
|
|
|
defm Z256 : cvt_by_vec_width<opc, VTInfo.info256, OpcodeStr>, EVEX_V256;
|
|
|
|
defm Z128 : cvt_by_vec_width<opc, VTInfo.info128, OpcodeStr>, EVEX_V128;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
multiclass avx512_convert_mask_to_vector<string OpcodeStr> {
|
|
|
|
defm NAME##B : cvt_mask_by_elt_width<0x28, avx512vl_i8_info, OpcodeStr,
|
|
|
|
HasBWI>;
|
|
|
|
defm NAME##W : cvt_mask_by_elt_width<0x28, avx512vl_i16_info, OpcodeStr,
|
|
|
|
HasBWI>, VEX_W;
|
|
|
|
defm NAME##D : cvt_mask_by_elt_width<0x38, avx512vl_i32_info, OpcodeStr,
|
|
|
|
HasDQI>;
|
|
|
|
defm NAME##Q : cvt_mask_by_elt_width<0x38, avx512vl_i64_info, OpcodeStr,
|
|
|
|
HasDQI>, VEX_W;
|
|
|
|
}
|
|
|
|
|
|
|
|
defm VPMOVM2 : avx512_convert_mask_to_vector<"vpmovm2">;
|