[X86][SSE] Improve combineLogicBlendIntoPBLENDV to use general masks.

Currently combineLogicBlendIntoPBLENDV can only match ASHR to detect sign splatting of a bit mask, this patch generalises this to use computeNumSignBits instead.

This is a first step in several things we can do to improve PBLENDV support:

 * Better matching of X86ISD::ANDNP patterns.
 * Handle floating point cases.
 * Better vector and bitcast support in computeNumSignBits.
 * Recognise that PBLENDV only uses the sign bit of the mask, we should be able strip away sign splats (ASHR, PCMPGT isNeg tests etc.).

Differential Revision: https://reviews.llvm.org/D32953

llvm-svn: 302424
This commit is contained in:
Simon Pilgrim 2017-05-08 14:16:39 +00:00
parent 0c8dcfd743
commit df39b03f29
4 changed files with 34 additions and 58 deletions

View File

@ -31561,20 +31561,22 @@ static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
// (sub (xor X, M), M) // (sub (xor X, M), M)
static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG, static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) { const X86Subtarget &Subtarget) {
assert(N->getOpcode() == ISD::OR); assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
SDValue N0 = N->getOperand(0); SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1); SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0); EVT VT = N->getValueType(0);
if (!((VT == MVT::v2i64) || (VT == MVT::v4i64 && Subtarget.hasInt256()))) if (!((VT.is128BitVector() && Subtarget.hasSSE2()) ||
(VT.is256BitVector() && Subtarget.hasInt256())))
return SDValue(); return SDValue();
assert(Subtarget.hasSSE2() && "Unexpected i64 vector without SSE2!");
// Canonicalize pandn to RHS // Canonicalize AND to LHS.
if (N0.getOpcode() == X86ISD::ANDNP) if (N1.getOpcode() == ISD::AND)
std::swap(N0, N1); std::swap(N0, N1);
// TODO: Attempt to match against AND(XOR(-1,X),Y) as well, waiting for
// ANDNP combine allows other combines to happen that prevent matching.
if (N0.getOpcode() != ISD::AND || N1.getOpcode() != X86ISD::ANDNP) if (N0.getOpcode() != ISD::AND || N1.getOpcode() != X86ISD::ANDNP)
return SDValue(); return SDValue();
@ -31596,20 +31598,10 @@ static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
Y = peekThroughBitcasts(Y); Y = peekThroughBitcasts(Y);
EVT MaskVT = Mask.getValueType(); EVT MaskVT = Mask.getValueType();
// Validate that the Mask operand is a vector sra node.
// FIXME: what to do for bytes, since there is a psignb/pblendvb, but
// there is no psrai.b
unsigned EltBits = MaskVT.getScalarSizeInBits(); unsigned EltBits = MaskVT.getScalarSizeInBits();
unsigned SraAmt = ~0;
if (Mask.getOpcode() == ISD::SRA) {
if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
if (auto *AmtConst = AmtBV->getConstantSplatNode())
SraAmt = AmtConst->getZExtValue();
} else if (Mask.getOpcode() == X86ISD::VSRAI)
SraAmt = Mask.getConstantOperandVal(1);
if ((SraAmt + 1) != EltBits) // TODO: Attempt to handle floating point cases as well?
if (!MaskVT.isInteger() || DAG.ComputeNumSignBits(Mask) != EltBits)
return SDValue(); return SDValue();
SDLoc DL(N); SDLoc DL(N);
@ -31630,7 +31622,8 @@ static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
// (add (xor X, M), (and M, 1)) // (add (xor X, M), (and M, 1))
// And further to: // And further to:
// (sub (xor X, M), M) // (sub (xor X, M), M)
if (X.getValueType() == MaskVT && Y.getValueType() == MaskVT) { if (X.getValueType() == MaskVT && Y.getValueType() == MaskVT &&
DAG.getTargetLoweringInfo().isOperationLegal(ISD::SUB, MaskVT)) {
auto IsNegV = [](SDNode *N, SDValue V) { auto IsNegV = [](SDNode *N, SDValue V) {
return N->getOpcode() == ISD::SUB && N->getOperand(1) == V && return N->getOpcode() == ISD::SUB && N->getOperand(1) == V &&
ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()); ISD::isBuildVectorAllZeros(N->getOperand(0).getNode());
@ -31642,9 +31635,6 @@ static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
V = Y; V = Y;
if (V) { if (V) {
if (EltBits != 8 && EltBits != 16 && EltBits != 32)
return SDValue();
SDValue SubOp1 = DAG.getNode(ISD::XOR, DL, MaskVT, V, Mask); SDValue SubOp1 = DAG.getNode(ISD::XOR, DL, MaskVT, V, Mask);
SDValue SubOp2 = Mask; SDValue SubOp2 = Mask;
@ -31661,8 +31651,8 @@ static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
if (V == Y) if (V == Y)
std::swap(SubOp1, SubOp2); std::swap(SubOp1, SubOp2);
return DAG.getBitcast(VT, SDValue Res = DAG.getNode(ISD::SUB, DL, MaskVT, SubOp1, SubOp2);
DAG.getNode(ISD::SUB, DL, MaskVT, SubOp1, SubOp2)); return DAG.getBitcast(VT, Res);
} }
} }

View File

@ -200,32 +200,29 @@ define <8 x i16> @trunc(<8 x i16> %a, <8 x i16> %b, <8 x i32> %c, <8 x i32> %d)
; SSE41: # BB#0: ; SSE41: # BB#0:
; SSE41-NEXT: pcmpeqw %xmm1, %xmm0 ; SSE41-NEXT: pcmpeqw %xmm1, %xmm0
; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; SSE41-NEXT: pshufb %xmm1, %xmm5
; SSE41-NEXT: pshufb %xmm1, %xmm4
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm5[0]
; SSE41-NEXT: pshufb %xmm1, %xmm3 ; SSE41-NEXT: pshufb %xmm1, %xmm3
; SSE41-NEXT: pshufb %xmm1, %xmm2 ; SSE41-NEXT: pshufb %xmm1, %xmm2
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] ; SSE41-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; SSE41-NEXT: pand %xmm0, %xmm2 ; SSE41-NEXT: pshufb %xmm1, %xmm5
; SSE41-NEXT: pandn %xmm4, %xmm0 ; SSE41-NEXT: pshufb %xmm1, %xmm4
; SSE41-NEXT: por %xmm2, %xmm0 ; SSE41-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm5[0]
; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm4
; SSE41-NEXT: movdqa %xmm4, %xmm0
; SSE41-NEXT: retq ; SSE41-NEXT: retq
; ;
; AVX1-LABEL: trunc: ; AVX1-LABEL: trunc:
; AVX1: # BB#0: ; AVX1: # BB#0:
; AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm1 ; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] ; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX1-NEXT: vpshufb %xmm4, %xmm1, %xmm1 ; AVX1-NEXT: vpshufb %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm1[0]
; AVX1-NEXT: vpandn %xmm1, %xmm0, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
; AVX1-NEXT: vpshufb %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vpshufb %xmm4, %xmm2, %xmm2 ; AVX1-NEXT: vpshufb %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX1-NEXT: vpand %xmm0, %xmm2, %xmm0 ; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm2
; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpshufb %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpshufb %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm3[0],xmm2[0]
; AVX1-NEXT: vpblendvb %xmm0, %xmm1, %xmm2, %xmm0
; AVX1-NEXT: vzeroupper ; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq ; AVX1-NEXT: retq
; ;
@ -233,13 +230,11 @@ define <8 x i16> @trunc(<8 x i16> %a, <8 x i16> %b, <8 x i32> %c, <8 x i32> %d)
; AVX2: # BB#0: ; AVX2: # BB#0:
; AVX2-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] ; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpshufb %ymm1, %ymm3, %ymm3 ; AVX2-NEXT: vpshufb %ymm1, %ymm2, %ymm2
; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3] ; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vpandn %xmm3, %xmm0, %xmm3 ; AVX2-NEXT: vpshufb %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vpshufb %ymm1, %ymm2, %ymm1
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3] ; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vpand %xmm0, %xmm1, %xmm0 ; AVX2-NEXT: vpblendvb %xmm0, %xmm2, %xmm1, %xmm0
; AVX2-NEXT: vpor %xmm3, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper ; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq ; AVX2-NEXT: retq
%cmp = icmp eq <8 x i16> %a, %b %cmp = icmp eq <8 x i16> %a, %b

View File

@ -35,11 +35,8 @@ define <2 x i64> @PR32907(<2 x i64> %astype.i, <2 x i64> %astype6.i) {
; AVX512: # BB#0: # %entry ; AVX512: # BB#0: # %entry
; AVX512-NEXT: vpsubq %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpsubq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpsraq $63, %zmm0, %zmm1 ; AVX512-NEXT: vpsraq $63, %zmm0, %zmm1
; AVX512-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX512-NEXT: vpxor %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpsubq %xmm0, %xmm2, %xmm2 ; AVX512-NEXT: vpsubq %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpandn %xmm0, %xmm1, %xmm0
; AVX512-NEXT: vpand %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper ; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq ; AVX512-NEXT: retq
entry: entry:

View File

@ -35,9 +35,7 @@ define <8 x i16> @signbit_sel_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %mask)
; AVX: # BB#0: ; AVX: # BB#0:
; AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3 ; AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2 ; AVX-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2
; AVX-NEXT: vpandn %xmm1, %xmm2, %xmm1 ; AVX-NEXT: vpblendvb %xmm2, %xmm0, %xmm1, %xmm0
; AVX-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq ; AVX-NEXT: retq
%tr = icmp slt <8 x i16> %mask, zeroinitializer %tr = icmp slt <8 x i16> %mask, zeroinitializer
%z = select <8 x i1> %tr, <8 x i16> %x, <8 x i16> %y %z = select <8 x i1> %tr, <8 x i16> %x, <8 x i16> %y
@ -162,18 +160,14 @@ define <16 x i16> @signbit_sel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> %
; AVX2: # BB#0: ; AVX2: # BB#0:
; AVX2-NEXT: vpxor %ymm3, %ymm3, %ymm3 ; AVX2-NEXT: vpxor %ymm3, %ymm3, %ymm3
; AVX2-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm2 ; AVX2-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm2
; AVX2-NEXT: vpandn %ymm1, %ymm2, %ymm1 ; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq ; AVX2-NEXT: retq
; ;
; AVX512-LABEL: signbit_sel_v16i16: ; AVX512-LABEL: signbit_sel_v16i16:
; AVX512: # BB#0: ; AVX512: # BB#0:
; AVX512-NEXT: vpxor %ymm3, %ymm3, %ymm3 ; AVX512-NEXT: vpxor %ymm3, %ymm3, %ymm3
; AVX512-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm2 ; AVX512-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm2
; AVX512-NEXT: vpandn %ymm1, %ymm2, %ymm1 ; AVX512-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0
; AVX512-NEXT: vpand %ymm2, %ymm0, %ymm0
; AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq ; AVX512-NEXT: retq
%tr = icmp slt <16 x i16> %mask, zeroinitializer %tr = icmp slt <16 x i16> %mask, zeroinitializer
%z = select <16 x i1> %tr, <16 x i16> %x, <16 x i16> %y %z = select <16 x i1> %tr, <16 x i16> %x, <16 x i16> %y