[X86][SSE] combineSetCCMOVMSK - add initial support for allof patterns.

Handle MOVMSK 'allof' comparisons (X86ISD::SUB X, AllBitsMask) as well as 'anyof' patterns.

This allows us to handle these patterns in the MOVMSK(BITCAST(X)) pattern to fix PR37087.
This commit is contained in:
Simon Pilgrim 2020-06-07 15:59:12 +01:00
parent dc52ce424b
commit 3a28ae091b
3 changed files with 38 additions and 27 deletions

View File

@ -40234,15 +40234,19 @@ static SDValue combinePTESTCC(SDValue EFLAGS, X86::CondCode &CC,
static SDValue combineSetCCMOVMSK(SDValue EFLAGS, X86::CondCode &CC, static SDValue combineSetCCMOVMSK(SDValue EFLAGS, X86::CondCode &CC,
SelectionDAG &DAG, SelectionDAG &DAG,
const X86Subtarget &Subtarget) { const X86Subtarget &Subtarget) {
// Only handle eq/ne against zero (any_of). // Handle eq/ne against zero (any_of).
// TODO: Handle eq/ne against -1 (all_of) as well. // Handle eq/ne against -1 (all_of).
if (!(CC == X86::COND_E || CC == X86::COND_NE)) if (!(CC == X86::COND_E || CC == X86::COND_NE))
return SDValue(); return SDValue();
if (EFLAGS.getValueType() != MVT::i32) if (EFLAGS.getValueType() != MVT::i32)
return SDValue(); return SDValue();
unsigned CmpOpcode = EFLAGS.getOpcode(); unsigned CmpOpcode = EFLAGS.getOpcode();
if (CmpOpcode != X86ISD::CMP || !isNullConstant(EFLAGS.getOperand(1))) if (CmpOpcode != X86ISD::CMP && CmpOpcode != X86ISD::SUB)
return SDValue(); return SDValue();
auto *CmpConstant = dyn_cast<ConstantSDNode>(EFLAGS.getOperand(1));
if (!CmpConstant)
return SDValue();
const APInt &CmpVal = CmpConstant->getAPIntValue();
SDValue CmpOp = EFLAGS.getOperand(0); SDValue CmpOp = EFLAGS.getOperand(0);
unsigned CmpBits = CmpOp.getValueSizeInBits(); unsigned CmpBits = CmpOp.getValueSizeInBits();
@ -40259,6 +40263,14 @@ static SDValue combineSetCCMOVMSK(SDValue EFLAGS, X86::CondCode &CC,
MVT VecVT = Vec.getSimpleValueType(); MVT VecVT = Vec.getSimpleValueType();
assert((VecVT.is128BitVector() || VecVT.is256BitVector()) && assert((VecVT.is128BitVector() || VecVT.is256BitVector()) &&
"Unexpected MOVMSK operand"); "Unexpected MOVMSK operand");
unsigned NumElts = VecVT.getVectorNumElements();
unsigned NumEltBits = VecVT.getScalarSizeInBits();
bool IsAnyOf = CmpOpcode == X86ISD::CMP && CmpVal.isNullValue();
bool IsAllOf = CmpOpcode == X86ISD::SUB && NumElts <= CmpVal.getBitWidth() &&
CmpVal.isMask(NumElts);
if (!IsAnyOf && !IsAllOf)
return SDValue();
// See if we can peek through to a vector with a wider element type, if the // See if we can peek through to a vector with a wider element type, if the
// signbits extend down to all the sub-elements as well. // signbits extend down to all the sub-elements as well.
@ -40266,15 +40278,17 @@ static SDValue combineSetCCMOVMSK(SDValue EFLAGS, X86::CondCode &CC,
// potential SimplifyDemandedBits/Elts cases. // potential SimplifyDemandedBits/Elts cases.
if (Vec.getOpcode() == ISD::BITCAST) { if (Vec.getOpcode() == ISD::BITCAST) {
SDValue BC = peekThroughBitcasts(Vec); SDValue BC = peekThroughBitcasts(Vec);
unsigned NumEltBits = VecVT.getScalarSizeInBits(); MVT BCVT = BC.getSimpleValueType();
unsigned BCNumEltBits = BC.getScalarValueSizeInBits(); unsigned BCNumElts = BCVT.getVectorNumElements();
unsigned BCNumEltBits = BCVT.getScalarSizeInBits();
if ((BCNumEltBits == 32 || BCNumEltBits == 64) && if ((BCNumEltBits == 32 || BCNumEltBits == 64) &&
BCNumEltBits > NumEltBits && BCNumEltBits > NumEltBits &&
DAG.ComputeNumSignBits(BC) > (BCNumEltBits - NumEltBits)) { DAG.ComputeNumSignBits(BC) > (BCNumEltBits - NumEltBits)) {
SDLoc DL(EFLAGS); SDLoc DL(EFLAGS);
unsigned CmpMask = IsAnyOf ? 0 : ((1 << BCNumElts) - 1);
return DAG.getNode(X86ISD::CMP, DL, MVT::i32, return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, BC), DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, BC),
DAG.getConstant(0, DL, MVT::i32)); DAG.getConstant(CmpMask, DL, MVT::i32));
} }
} }
@ -40282,7 +40296,8 @@ static SDValue combineSetCCMOVMSK(SDValue EFLAGS, X86::CondCode &CC,
// For vXi16 cases we can use a v2Xi8 PMOVMSKB. We must mask out // For vXi16 cases we can use a v2Xi8 PMOVMSKB. We must mask out
// sign bits prior to the comparison with zero unless we know that // sign bits prior to the comparison with zero unless we know that
// the vXi16 splats the sign bit down to the lower i8 half. // the vXi16 splats the sign bit down to the lower i8 half.
if (Vec.getOpcode() == X86ISD::PACKSS && VecVT == MVT::v16i8) { // TODO: Handle all_of patterns.
if (IsAnyOf && Vec.getOpcode() == X86ISD::PACKSS && VecVT == MVT::v16i8) {
SDValue VecOp0 = Vec.getOperand(0); SDValue VecOp0 = Vec.getOperand(0);
SDValue VecOp1 = Vec.getOperand(1); SDValue VecOp1 = Vec.getOperand(1);
bool SignExt0 = DAG.ComputeNumSignBits(VecOp0) > 8; bool SignExt0 = DAG.ComputeNumSignBits(VecOp0) > 8;

View File

@ -30,8 +30,8 @@ define i1 @movmskps_allof_bitcast_v4f64(<4 x double> %a0) {
; CHECK: # %bb.0: ; CHECK: # %bb.0:
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1 ; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm0 ; CHECK-NEXT: vcmpeqpd %ymm1, %ymm0, %ymm0
; CHECK-NEXT: vmovmskps %ymm0, %eax ; CHECK-NEXT: vmovmskpd %ymm0, %eax
; CHECK-NEXT: cmpl $255, %eax ; CHECK-NEXT: cmpl $15, %eax
; CHECK-NEXT: sete %al ; CHECK-NEXT: sete %al
; CHECK-NEXT: vzeroupper ; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq ; CHECK-NEXT: retq

View File

@ -41,8 +41,8 @@ define i1 @movmskps_allof_bitcast_v2f64(<2 x double> %a0) {
; SSE: # %bb.0: ; SSE: # %bb.0:
; SSE-NEXT: xorpd %xmm1, %xmm1 ; SSE-NEXT: xorpd %xmm1, %xmm1
; SSE-NEXT: cmpeqpd %xmm0, %xmm1 ; SSE-NEXT: cmpeqpd %xmm0, %xmm1
; SSE-NEXT: movmskps %xmm1, %eax ; SSE-NEXT: movmskpd %xmm1, %eax
; SSE-NEXT: cmpl $15, %eax ; SSE-NEXT: cmpl $3, %eax
; SSE-NEXT: sete %al ; SSE-NEXT: sete %al
; SSE-NEXT: retq ; SSE-NEXT: retq
; ;
@ -50,8 +50,8 @@ define i1 @movmskps_allof_bitcast_v2f64(<2 x double> %a0) {
; AVX: # %bb.0: ; AVX: # %bb.0:
; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vcmpeqpd %xmm0, %xmm1, %xmm0 ; AVX-NEXT: vcmpeqpd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vmovmskps %xmm0, %eax ; AVX-NEXT: vmovmskpd %xmm0, %eax
; AVX-NEXT: cmpl $15, %eax ; AVX-NEXT: cmpl $3, %eax
; AVX-NEXT: sete %al ; AVX-NEXT: sete %al
; AVX-NEXT: retq ; AVX-NEXT: retq
%1 = fcmp oeq <2 x double> zeroinitializer, %a0 %1 = fcmp oeq <2 x double> zeroinitializer, %a0
@ -100,26 +100,22 @@ define i1 @pmovmskb_allof_bitcast_v2i64(<2 x i64> %a0) {
; SSE2-NEXT: pxor %xmm1, %xmm1 ; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: pcmpgtd %xmm0, %xmm1 ; SSE2-NEXT: pcmpgtd %xmm0, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
; SSE2-NEXT: pmovmskb %xmm0, %eax ; SSE2-NEXT: movmskps %xmm0, %eax
; SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; SSE2-NEXT: cmpl $15, %eax
; SSE2-NEXT: sete %al ; SSE2-NEXT: sete %al
; SSE2-NEXT: retq ; SSE2-NEXT: retq
; ;
; SSE42-LABEL: pmovmskb_allof_bitcast_v2i64: ; SSE42-LABEL: pmovmskb_allof_bitcast_v2i64:
; SSE42: # %bb.0: ; SSE42: # %bb.0:
; SSE42-NEXT: pxor %xmm1, %xmm1 ; SSE42-NEXT: movmskpd %xmm0, %eax
; SSE42-NEXT: pcmpgtq %xmm0, %xmm1 ; SSE42-NEXT: cmpl $3, %eax
; SSE42-NEXT: pmovmskb %xmm1, %eax
; SSE42-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; SSE42-NEXT: sete %al ; SSE42-NEXT: sete %al
; SSE42-NEXT: retq ; SSE42-NEXT: retq
; ;
; AVX-LABEL: pmovmskb_allof_bitcast_v2i64: ; AVX-LABEL: pmovmskb_allof_bitcast_v2i64:
; AVX: # %bb.0: ; AVX: # %bb.0:
; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vmovmskpd %xmm0, %eax
; AVX-NEXT: vpcmpgtq %xmm0, %xmm1, %xmm0 ; AVX-NEXT: cmpl $3, %eax
; AVX-NEXT: vpmovmskb %xmm0, %eax
; AVX-NEXT: cmpl $65535, %eax # imm = 0xFFFF
; AVX-NEXT: sete %al ; AVX-NEXT: sete %al
; AVX-NEXT: retq ; AVX-NEXT: retq
%1 = icmp sgt <2 x i64> zeroinitializer, %a0 %1 = icmp sgt <2 x i64> zeroinitializer, %a0
@ -161,8 +157,8 @@ define i1 @pmovmskb_allof_bitcast_v4f32(<4 x float> %a0) {
; SSE: # %bb.0: ; SSE: # %bb.0:
; SSE-NEXT: xorps %xmm1, %xmm1 ; SSE-NEXT: xorps %xmm1, %xmm1
; SSE-NEXT: cmpeqps %xmm0, %xmm1 ; SSE-NEXT: cmpeqps %xmm0, %xmm1
; SSE-NEXT: pmovmskb %xmm1, %eax ; SSE-NEXT: movmskps %xmm1, %eax
; SSE-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; SSE-NEXT: cmpl $15, %eax
; SSE-NEXT: sete %al ; SSE-NEXT: sete %al
; SSE-NEXT: retq ; SSE-NEXT: retq
; ;
@ -170,8 +166,8 @@ define i1 @pmovmskb_allof_bitcast_v4f32(<4 x float> %a0) {
; AVX: # %bb.0: ; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vcmpeqps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vcmpeqps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpmovmskb %xmm0, %eax ; AVX-NEXT: vmovmskps %xmm0, %eax
; AVX-NEXT: cmpl $65535, %eax # imm = 0xFFFF ; AVX-NEXT: cmpl $15, %eax
; AVX-NEXT: sete %al ; AVX-NEXT: sete %al
; AVX-NEXT: retq ; AVX-NEXT: retq
%1 = fcmp oeq <4 x float> %a0, zeroinitializer %1 = fcmp oeq <4 x float> %a0, zeroinitializer