diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 88c7c0725e05..38031b5057c2 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -876,12 +876,14 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, setOperationPromotedToType(ISD::OR, VT, MVT::v2i64); setOperationPromotedToType(ISD::XOR, VT, MVT::v2i64); setOperationPromotedToType(ISD::LOAD, VT, MVT::v2i64); - setOperationPromotedToType(ISD::SELECT, VT, MVT::v2i64); } // Custom lower v2i64 and v2f64 selects. setOperationAction(ISD::SELECT, MVT::v2f64, Custom); setOperationAction(ISD::SELECT, MVT::v2i64, Custom); + setOperationAction(ISD::SELECT, MVT::v4i32, Custom); + setOperationAction(ISD::SELECT, MVT::v8i16, Custom); + setOperationAction(ISD::SELECT, MVT::v16i8, Custom); setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal); setOperationAction(ISD::FP_TO_SINT, MVT::v2i32, Custom); @@ -1058,6 +1060,9 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, setOperationAction(ISD::SELECT, MVT::v4f64, Custom); setOperationAction(ISD::SELECT, MVT::v4i64, Custom); + setOperationAction(ISD::SELECT, MVT::v8i32, Custom); + setOperationAction(ISD::SELECT, MVT::v16i16, Custom); + setOperationAction(ISD::SELECT, MVT::v32i8, Custom); setOperationAction(ISD::SELECT, MVT::v8f32, Custom); for (auto VT : { MVT::v16i16, MVT::v8i32, MVT::v4i64 }) { @@ -1174,7 +1179,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, setOperationPromotedToType(ISD::OR, VT, MVT::v4i64); setOperationPromotedToType(ISD::XOR, VT, MVT::v4i64); setOperationPromotedToType(ISD::LOAD, VT, MVT::v4i64); - setOperationPromotedToType(ISD::SELECT, VT, MVT::v4i64); } if (HasInt256) { @@ -1347,6 +1351,9 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, setOperationAction(ISD::SELECT, MVT::v8f64, Custom); setOperationAction(ISD::SELECT, MVT::v8i64, Custom); + setOperationAction(ISD::SELECT, MVT::v16i32, Custom); + setOperationAction(ISD::SELECT, MVT::v32i16, Custom); + setOperationAction(ISD::SELECT, MVT::v64i8, Custom); setOperationAction(ISD::SELECT, MVT::v16f32, Custom); for (auto VT : { MVT::v16i32, MVT::v8i64 }) { @@ -1421,7 +1428,6 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, } for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32 }) { setOperationPromotedToType(ISD::LOAD, VT, MVT::v8i64); - setOperationPromotedToType(ISD::SELECT, VT, MVT::v8i64); } // Need to custom split v32i16/v64i8 bitcasts. diff --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td index 379ed2822bf1..de45b4697acc 100644 --- a/llvm/lib/Target/X86/X86InstrCompiler.td +++ b/llvm/lib/Target/X86/X86InstrCompiler.td @@ -611,26 +611,58 @@ def : Pat<(f128 (X86cmov VR128:$t, VR128:$f, imm:$cond, EFLAGS)), (CMOV_VR128 VR128:$t, VR128:$f, imm:$cond)>; let Predicates = [NoVLX] in { + def : Pat<(v16i8 (X86cmov VR128:$t, VR128:$f, imm:$cond, EFLAGS)), + (CMOV_VR128 VR128:$t, VR128:$f, imm:$cond)>; + def : Pat<(v8i16 (X86cmov VR128:$t, VR128:$f, imm:$cond, EFLAGS)), + (CMOV_VR128 VR128:$t, VR128:$f, imm:$cond)>; + def : Pat<(v4i32 (X86cmov VR128:$t, VR128:$f, imm:$cond, EFLAGS)), + (CMOV_VR128 VR128:$t, VR128:$f, imm:$cond)>; def : Pat<(v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond, EFLAGS)), (CMOV_VR128 VR128:$t, VR128:$f, imm:$cond)>; def : Pat<(v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond, EFLAGS)), (CMOV_VR128 VR128:$t, VR128:$f, imm:$cond)>; + + def : Pat<(v32i8 (X86cmov VR256:$t, VR256:$f, imm:$cond, EFLAGS)), + (CMOV_VR256 VR256:$t, VR256:$f, imm:$cond)>; + def : Pat<(v16i16 (X86cmov VR256:$t, VR256:$f, imm:$cond, EFLAGS)), + (CMOV_VR256 VR256:$t, VR256:$f, imm:$cond)>; + def : Pat<(v8i32 (X86cmov VR256:$t, VR256:$f, imm:$cond, EFLAGS)), + (CMOV_VR256 VR256:$t, VR256:$f, imm:$cond)>; def : Pat<(v8f32 (X86cmov VR256:$t, VR256:$f, imm:$cond, EFLAGS)), (CMOV_VR256 VR256:$t, VR256:$f, imm:$cond)>; def : Pat<(v4f64 (X86cmov VR256:$t, VR256:$f, imm:$cond, EFLAGS)), (CMOV_VR256 VR256:$t, VR256:$f, imm:$cond)>; } let Predicates = [HasVLX] in { + def : Pat<(v16i8 (X86cmov VR128X:$t, VR128X:$f, imm:$cond, EFLAGS)), + (CMOV_VR128X VR128X:$t, VR128X:$f, imm:$cond)>; + def : Pat<(v8i16 (X86cmov VR128X:$t, VR128X:$f, imm:$cond, EFLAGS)), + (CMOV_VR128X VR128X:$t, VR128X:$f, imm:$cond)>; + def : Pat<(v4i32 (X86cmov VR128X:$t, VR128X:$f, imm:$cond, EFLAGS)), + (CMOV_VR128X VR128X:$t, VR128X:$f, imm:$cond)>; def : Pat<(v4f32 (X86cmov VR128X:$t, VR128X:$f, imm:$cond, EFLAGS)), (CMOV_VR128X VR128X:$t, VR128X:$f, imm:$cond)>; def : Pat<(v2f64 (X86cmov VR128X:$t, VR128X:$f, imm:$cond, EFLAGS)), (CMOV_VR128X VR128X:$t, VR128X:$f, imm:$cond)>; + + def : Pat<(v32i8 (X86cmov VR256X:$t, VR256X:$f, imm:$cond, EFLAGS)), + (CMOV_VR256X VR256X:$t, VR256X:$f, imm:$cond)>; + def : Pat<(v16i16 (X86cmov VR256X:$t, VR256X:$f, imm:$cond, EFLAGS)), + (CMOV_VR256X VR256X:$t, VR256X:$f, imm:$cond)>; + def : Pat<(v8i32 (X86cmov VR256X:$t, VR256X:$f, imm:$cond, EFLAGS)), + (CMOV_VR256X VR256X:$t, VR256X:$f, imm:$cond)>; def : Pat<(v8f32 (X86cmov VR256X:$t, VR256X:$f, imm:$cond, EFLAGS)), (CMOV_VR256X VR256X:$t, VR256X:$f, imm:$cond)>; def : Pat<(v4f64 (X86cmov VR256X:$t, VR256X:$f, imm:$cond, EFLAGS)), (CMOV_VR256X VR256X:$t, VR256X:$f, imm:$cond)>; } +def : Pat<(v64i8 (X86cmov VR512:$t, VR512:$f, imm:$cond, EFLAGS)), + (CMOV_VR512 VR512:$t, VR512:$f, imm:$cond)>; +def : Pat<(v32i16 (X86cmov VR512:$t, VR512:$f, imm:$cond, EFLAGS)), + (CMOV_VR512 VR512:$t, VR512:$f, imm:$cond)>; +def : Pat<(v16i32 (X86cmov VR512:$t, VR512:$f, imm:$cond, EFLAGS)), + (CMOV_VR512 VR512:$t, VR512:$f, imm:$cond)>; def : Pat<(v16f32 (X86cmov VR512:$t, VR512:$f, imm:$cond, EFLAGS)), (CMOV_VR512 VR512:$t, VR512:$f, imm:$cond)>; def : Pat<(v8f64 (X86cmov VR512:$t, VR512:$f, imm:$cond, EFLAGS)),