[X86] Add a target-specific DAG combine to combine extract_subvector from all zero/one build_vectors.

llvm-svn: 311841
This commit is contained in:
Craig Topper 2017-08-27 05:39:57 +00:00
parent 71dab64a57
commit 36bd247f64
4 changed files with 26 additions and 19 deletions

View File

@ -1628,6 +1628,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
setTargetDAGCombine(ISD::INSERT_SUBVECTOR);
setTargetDAGCombine(ISD::EXTRACT_SUBVECTOR);
setTargetDAGCombine(ISD::BITCAST);
setTargetDAGCombine(ISD::VSELECT);
setTargetDAGCombine(ISD::SELECT);
@ -35678,6 +35679,25 @@ static SDValue combineInsertSubvector(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
static SDValue combineExtractSubvector(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
if (DCI.isBeforeLegalizeOps())
return SDValue();
MVT OpVT = N->getSimpleValueType(0);
if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()))
return getZeroVector(OpVT, Subtarget, DAG, SDLoc(N));
if (ISD::isBuildVectorAllOnes(N->getOperand(0).getNode())) {
if (OpVT.getScalarType() == MVT::i1)
return DAG.getConstant(1, SDLoc(N), OpVT);
return getZeroVector(OpVT, Subtarget, DAG, SDLoc(N));
}
return SDValue();
}
SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
@ -35691,6 +35711,8 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
return combineExtractVectorElt_SSE(N, DAG, DCI, Subtarget);
case ISD::INSERT_SUBVECTOR:
return combineInsertSubvector(N, DAG, DCI, Subtarget);
case ISD::EXTRACT_SUBVECTOR:
return combineExtractSubvector(N, DAG, DCI, Subtarget);
case ISD::VSELECT:
case ISD::SELECT:
case X86ISD::SHRUNKBLEND: return combineSelect(N, DAG, DCI, Subtarget);

View File

@ -7,7 +7,6 @@ define void @bad_cast() {
; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovaps %xmm0, (%eax)
; CHECK-NEXT: movl $0, (%eax)
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retl
%vext.i = shufflevector <2 x i64> undef, <2 x i64> undef, <3 x i32> <i32 0, i32 1, i32 undef>
%vecinit8.i = shufflevector <3 x i64> zeroinitializer, <3 x i64> %vext.i, <3 x i32> <i32 0, i32 3, i32 4>

View File

@ -722,10 +722,8 @@ define <16 x float> @test14(float* %base, i32 %ind, <16 x float*> %vec) {
; KNL_64-NEXT: vpsllq $2, %zmm1, %zmm1
; KNL_64-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; KNL_64-NEXT: kxnorw %k0, %k0, %k1
; KNL_64-NEXT: kshiftrw $8, %k1, %k2
; KNL_64-NEXT: vgatherqps (,%zmm0), %ymm1 {%k2}
; KNL_64-NEXT: vgatherqps (,%zmm0), %ymm2 {%k1}
; KNL_64-NEXT: vinsertf64x4 $1, %ymm1, %zmm2, %zmm0
; KNL_64-NEXT: vgatherqps (,%zmm0), %ymm1 {%k1}
; KNL_64-NEXT: vinsertf64x4 $1, %ymm1, %zmm1, %zmm0
; KNL_64-NEXT: retq
;
; KNL_32-LABEL: test14:
@ -747,10 +745,8 @@ define <16 x float> @test14(float* %base, i32 %ind, <16 x float*> %vec) {
; SKX-NEXT: vpsllq $2, %zmm1, %zmm1
; SKX-NEXT: vpaddq %zmm1, %zmm0, %zmm0
; SKX-NEXT: kxnorw %k0, %k0, %k1
; SKX-NEXT: kshiftrw $8, %k1, %k2
; SKX-NEXT: vgatherqps (,%zmm0), %ymm1 {%k2}
; SKX-NEXT: vgatherqps (,%zmm0), %ymm2 {%k1}
; SKX-NEXT: vinsertf64x4 $1, %ymm1, %zmm2, %zmm0
; SKX-NEXT: vgatherqps (,%zmm0), %ymm1 {%k1}
; SKX-NEXT: vinsertf64x4 $1, %ymm1, %zmm1, %zmm0
; SKX-NEXT: retq
;
; SKX_32-LABEL: test14:
@ -1624,7 +1620,6 @@ define <16 x float*> @test31(<16 x float**> %ptrs) {
; KNL_64-NEXT: kxnorw %k0, %k0, %k1
; KNL_64-NEXT: kxnorw %k0, %k0, %k2
; KNL_64-NEXT: vpgatherqq (,%zmm0), %zmm2 {%k2}
; KNL_64-NEXT: kshiftrw $8, %k1, %k1
; KNL_64-NEXT: vpgatherqq (,%zmm1), %zmm3 {%k1}
; KNL_64-NEXT: vmovdqa64 %zmm2, %zmm0
; KNL_64-NEXT: vmovdqa64 %zmm3, %zmm1
@ -1642,7 +1637,6 @@ define <16 x float*> @test31(<16 x float**> %ptrs) {
; SKX-NEXT: kxnorw %k0, %k0, %k1
; SKX-NEXT: kxnorw %k0, %k0, %k2
; SKX-NEXT: vpgatherqq (,%zmm0), %zmm2 {%k2}
; SKX-NEXT: kshiftrw $8, %k1, %k1
; SKX-NEXT: vpgatherqq (,%zmm1), %zmm3 {%k1}
; SKX-NEXT: vmovdqa64 %zmm2, %zmm0
; SKX-NEXT: vmovdqa64 %zmm3, %zmm1

View File

@ -6,14 +6,6 @@ define void @f_f(<16 x double>* %ptr) {
; CHECK: # BB#0:
; CHECK-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovdqa %xmm0, (%rax)
; CHECK-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0
; CHECK-NEXT: vmovapd (%rdi), %zmm1
; CHECK-NEXT: vmovapd 64(%rdi), %zmm2
; CHECK-NEXT: vptestmq %zmm0, %zmm0, %k1
; CHECK-NEXT: vmovapd %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovapd %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vmovapd %zmm2, 64(%rdi)
; CHECK-NEXT: vmovapd %zmm1, (%rdi)
store <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>, <16 x i8>* undef
%load_mask8.i.i.i = load <16 x i8>, <16 x i8>* undef
%v.i.i.i.i = load <16 x double>, <16 x double>* %ptr