[X86] Add a DAG combine to simplify masks for AVX2 gather instructions.

AVX2 gathers only use the upper bit of the mask allowing us to simplify sign_extend_inreg to a shift left.

llvm-svn: 319514
This commit is contained in:
Craig Topper 2017-12-01 02:49:07 +00:00
parent d30b23d6a5
commit 11f733df9b
2 changed files with 27 additions and 40 deletions

View File

@ -35991,6 +35991,21 @@ static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
NewOps[2] = Mask.getOperand(0);
DAG.UpdateNodeOperands(N, NewOps);
}
// With AVX2 we only demand the upper bit of the mask.
if (!Subtarget.hasAVX512()) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
!DCI.isBeforeLegalizeOps());
KnownBits Known;
APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
if (TLI.SimplifyDemandedBits(Mask, DemandedMask, Known, TLO)) {
DCI.AddToWorklist(Mask.getNode());
DCI.CommitTargetLoweringOpt(TLO);
return SDValue(N, 0);
}
}
return SDValue();
}
@ -37097,6 +37112,8 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
case X86ISD::FMSUBADD_RND:
case X86ISD::FMADDSUB:
case X86ISD::FMSUBADD: return combineFMADDSUB(N, DAG, Subtarget);
case X86ISD::MGATHER:
case X86ISD::MSCATTER:
case ISD::MGATHER:
case ISD::MSCATTER: return combineGatherScatter(N, DAG, DCI, Subtarget);
case X86ISD::TESTM: return combineTestM(N, DAG, Subtarget);

View File

@ -10,12 +10,10 @@ define <2 x i32> @masked_gather_v2i32(<2 x i32*>* %ptr, <2 x i1> %masks, <2 x i3
; X86-LABEL: masked_gather_v2i32:
; X86: # BB#0: # %entry
; X86-NEXT: vpsllq $63, %xmm0, %xmm0
; X86-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X86-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X86-NEXT: vpgatherqd %xmm0, (,%xmm2), %xmm1
; X86-NEXT: vpmovsxdq %xmm1, %xmm0
; X86-NEXT: retl
@ -23,11 +21,9 @@ define <2 x i32> @masked_gather_v2i32(<2 x i32*>* %ptr, <2 x i1> %masks, <2 x i3
; X64-LABEL: masked_gather_v2i32:
; X64: # BB#0: # %entry
; X64-NEXT: vpsllq $63, %xmm0, %xmm0
; X64-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X64-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0
; X64-NEXT: vmovdqa (%rdi), %xmm2
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X64-NEXT: vpgatherqd %xmm0, (,%xmm2), %xmm1
; X64-NEXT: vpmovsxdq %xmm1, %xmm0
; X64-NEXT: retq
@ -64,12 +60,10 @@ define <4 x i32> @masked_gather_v2i32_concat(<2 x i32*>* %ptr, <2 x i1> %masks,
; X86-LABEL: masked_gather_v2i32_concat:
; X86: # BB#0: # %entry
; X86-NEXT: vpsllq $63, %xmm0, %xmm0
; X86-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X86-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X86-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X86-NEXT: vpgatherqd %xmm0, (,%xmm2), %xmm1
; X86-NEXT: vpmovsxdq %xmm1, %xmm0
; X86-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@ -78,11 +72,9 @@ define <4 x i32> @masked_gather_v2i32_concat(<2 x i32*>* %ptr, <2 x i1> %masks,
; X64-LABEL: masked_gather_v2i32_concat:
; X64: # BB#0: # %entry
; X64-NEXT: vpsllq $63, %xmm0, %xmm0
; X64-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X64-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0
; X64-NEXT: vmovdqa (%rdi), %xmm2
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X64-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X64-NEXT: vpgatherqd %xmm0, (,%xmm2), %xmm1
; X64-NEXT: vpmovsxdq %xmm1, %xmm0
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
@ -125,7 +117,6 @@ define <2 x float> @masked_gather_v2float(<2 x float*>* %ptr, <2 x i1> %masks, <
; X86: # BB#0: # %entry
; X86-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; X86-NEXT: vpslld $31, %xmm0, %xmm0
; X86-NEXT: vpsrad $31, %xmm0, %xmm0
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; X86-NEXT: vgatherdps %xmm0, (,%xmm2), %xmm1
@ -137,7 +128,6 @@ define <2 x float> @masked_gather_v2float(<2 x float*>* %ptr, <2 x i1> %masks, <
; X64-NEXT: vmovaps (%rdi), %xmm2
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X64-NEXT: vpslld $31, %xmm0, %xmm0
; X64-NEXT: vpsrad $31, %xmm0, %xmm0
; X64-NEXT: vgatherqps %xmm0, (,%xmm2), %xmm1
; X64-NEXT: vmovaps %xmm1, %xmm0
; X64-NEXT: retq
@ -175,7 +165,6 @@ define <4 x float> @masked_gather_v2float_concat(<2 x float*>* %ptr, <2 x i1> %m
; X86: # BB#0: # %entry
; X86-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,2],zero,zero
; X86-NEXT: vpslld $31, %xmm0, %xmm0
; X86-NEXT: vpsrad $31, %xmm0, %xmm0
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
; X86-NEXT: vgatherdps %xmm0, (,%xmm2), %xmm1
@ -187,7 +176,6 @@ define <4 x float> @masked_gather_v2float_concat(<2 x float*>* %ptr, <2 x i1> %m
; X64-NEXT: vmovaps (%rdi), %xmm2
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X64-NEXT: vpslld $31, %xmm0, %xmm0
; X64-NEXT: vpsrad $31, %xmm0, %xmm0
; X64-NEXT: vgatherqps %xmm0, (,%xmm2), %xmm1
; X64-NEXT: vmovaps %xmm1, %xmm0
; X64-NEXT: retq
@ -228,7 +216,6 @@ define <4 x i32> @masked_gather_v4i32(<4 x i32*> %ptrs, <4 x i1> %masks, <4 x i3
; X86-LABEL: masked_gather_v4i32:
; X86: # BB#0: # %entry
; X86-NEXT: vpslld $31, %xmm1, %xmm1
; X86-NEXT: vpsrad $31, %xmm1, %xmm1
; X86-NEXT: vpgatherdd %xmm1, (,%xmm0), %xmm2
; X86-NEXT: vmovdqa %xmm2, %xmm0
; X86-NEXT: retl
@ -236,7 +223,6 @@ define <4 x i32> @masked_gather_v4i32(<4 x i32*> %ptrs, <4 x i1> %masks, <4 x i3
; X64-LABEL: masked_gather_v4i32:
; X64: # BB#0: # %entry
; X64-NEXT: vpslld $31, %xmm1, %xmm1
; X64-NEXT: vpsrad $31, %xmm1, %xmm1
; X64-NEXT: vpgatherqd %xmm1, (,%ymm0), %xmm2
; X64-NEXT: vmovdqa %xmm2, %xmm0
; X64-NEXT: vzeroupper
@ -290,7 +276,6 @@ define <4 x float> @masked_gather_v4float(<4 x float*> %ptrs, <4 x i1> %masks, <
; X86-LABEL: masked_gather_v4float:
; X86: # BB#0: # %entry
; X86-NEXT: vpslld $31, %xmm1, %xmm1
; X86-NEXT: vpsrad $31, %xmm1, %xmm1
; X86-NEXT: vgatherdps %xmm1, (,%xmm0), %xmm2
; X86-NEXT: vmovaps %xmm2, %xmm0
; X86-NEXT: retl
@ -298,7 +283,6 @@ define <4 x float> @masked_gather_v4float(<4 x float*> %ptrs, <4 x i1> %masks, <
; X64-LABEL: masked_gather_v4float:
; X64: # BB#0: # %entry
; X64-NEXT: vpslld $31, %xmm1, %xmm1
; X64-NEXT: vpsrad $31, %xmm1, %xmm1
; X64-NEXT: vgatherqps %xmm1, (,%ymm0), %xmm2
; X64-NEXT: vmovaps %xmm2, %xmm0
; X64-NEXT: vzeroupper
@ -353,7 +337,6 @@ define <8 x i32> @masked_gather_v8i32(<8 x i32*>* %ptr, <8 x i1> %masks, <8 x i3
; X86: # BB#0: # %entry
; X86-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X86-NEXT: vpslld $31, %ymm0, %ymm0
; X86-NEXT: vpsrad $31, %ymm0, %ymm0
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vmovdqa (%eax), %ymm2
; X86-NEXT: vpgatherdd %ymm0, (,%ymm2), %ymm1
@ -470,7 +453,6 @@ define <8 x float> @masked_gather_v8float(<8 x float*>* %ptr, <8 x i1> %masks, <
; X86: # BB#0: # %entry
; X86-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; X86-NEXT: vpslld $31, %ymm0, %ymm0
; X86-NEXT: vpsrad $31, %ymm0, %ymm0
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vmovaps (%eax), %ymm2
; X86-NEXT: vgatherdps %ymm0, (,%ymm2), %ymm1
@ -587,7 +569,6 @@ define <4 x i64> @masked_gather_v4i64(<4 x i64*>* %ptr, <4 x i1> %masks, <4 x i6
; X86-LABEL: masked_gather_v4i64:
; X86: # BB#0: # %entry
; X86-NEXT: vpslld $31, %xmm0, %xmm0
; X86-NEXT: vpsrad $31, %xmm0, %xmm0
; X86-NEXT: vpmovsxdq %xmm0, %ymm0
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vmovdqa (%eax), %xmm2
@ -598,7 +579,6 @@ define <4 x i64> @masked_gather_v4i64(<4 x i64*>* %ptr, <4 x i1> %masks, <4 x i6
; X64-LABEL: masked_gather_v4i64:
; X64: # BB#0: # %entry
; X64-NEXT: vpslld $31, %xmm0, %xmm0
; X64-NEXT: vpsrad $31, %xmm0, %xmm0
; X64-NEXT: vpmovsxdq %xmm0, %ymm0
; X64-NEXT: vmovdqa (%rdi), %ymm2
; X64-NEXT: vpgatherqq %ymm0, (,%ymm2), %ymm1
@ -664,7 +644,6 @@ define <4 x double> @masked_gather_v4double(<4 x double*>* %ptr, <4 x i1> %masks
; X86-LABEL: masked_gather_v4double:
; X86: # BB#0: # %entry
; X86-NEXT: vpslld $31, %xmm0, %xmm0
; X86-NEXT: vpsrad $31, %xmm0, %xmm0
; X86-NEXT: vpmovsxdq %xmm0, %ymm0
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vmovapd (%eax), %xmm2
@ -675,7 +654,6 @@ define <4 x double> @masked_gather_v4double(<4 x double*>* %ptr, <4 x i1> %masks
; X64-LABEL: masked_gather_v4double:
; X64: # BB#0: # %entry
; X64-NEXT: vpslld $31, %xmm0, %xmm0
; X64-NEXT: vpsrad $31, %xmm0, %xmm0
; X64-NEXT: vpmovsxdq %xmm0, %ymm0
; X64-NEXT: vmovapd (%rdi), %ymm2
; X64-NEXT: vgatherqpd %ymm0, (,%ymm2), %ymm1
@ -740,20 +718,16 @@ declare <2 x i64> @llvm.masked.gather.v2i64(<2 x i64*> %ptrs, i32 %align, <2 x i
define <2 x i64> @masked_gather_v2i64(<2 x i64*>* %ptr, <2 x i1> %masks, <2 x i64> %passthro) {
; X86-LABEL: masked_gather_v2i64:
; X86: # BB#0: # %entry
; X86-NEXT: vpsllq $63, %xmm0, %xmm0
; X86-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmovzxdq {{.*#+}} xmm3 = mem[0],zero,mem[1],zero
; X86-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0
; X86-NEXT: vpgatherqq %xmm0, (,%xmm3), %xmm1
; X86-NEXT: vpmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
; X86-NEXT: vpsllq $63, %xmm0, %xmm0
; X86-NEXT: vpgatherqq %xmm0, (,%xmm2), %xmm1
; X86-NEXT: vmovdqa %xmm1, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: masked_gather_v2i64:
; X64: # BB#0: # %entry
; X64-NEXT: vpsllq $63, %xmm0, %xmm0
; X64-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X64-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0
; X64-NEXT: vmovdqa (%rdi), %xmm2
; X64-NEXT: vpgatherqq %xmm0, (,%xmm2), %xmm1
; X64-NEXT: vmovdqa %xmm1, %xmm0
@ -791,20 +765,16 @@ declare <2 x double> @llvm.masked.gather.v2double(<2 x double*> %ptrs, i32 %alig
define <2 x double> @masked_gather_v2double(<2 x double*>* %ptr, <2 x i1> %masks, <2 x double> %passthro) {
; X86-LABEL: masked_gather_v2double:
; X86: # BB#0: # %entry
; X86-NEXT: vpsllq $63, %xmm0, %xmm0
; X86-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vpmovzxdq {{.*#+}} xmm3 = mem[0],zero,mem[1],zero
; X86-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0
; X86-NEXT: vgatherqpd %xmm0, (,%xmm3), %xmm1
; X86-NEXT: vpmovzxdq {{.*#+}} xmm2 = mem[0],zero,mem[1],zero
; X86-NEXT: vpsllq $63, %xmm0, %xmm0
; X86-NEXT: vgatherqpd %xmm0, (,%xmm2), %xmm1
; X86-NEXT: vmovapd %xmm1, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: masked_gather_v2double:
; X64: # BB#0: # %entry
; X64-NEXT: vpsllq $63, %xmm0, %xmm0
; X64-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X64-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm0
; X64-NEXT: vmovapd (%rdi), %xmm2
; X64-NEXT: vgatherqpd %xmm0, (,%xmm2), %xmm1
; X64-NEXT: vmovapd %xmm1, %xmm0