diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index c64632bdd72b..ceaa70d752f6 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -39836,13 +39836,19 @@ static SDValue combinePTESTCC(SDValue EFLAGS, X86::CondCode &CC, } } - // TODO: TESTZ(X,~Y) == TESTC(Y,X) - - // TESTZ(X,-1) == TESTZ(X,X) - // TESTZ(-1,X) == TESTZ(X,X) if (CC == X86::COND_E || CC == X86::COND_NE) { + // TESTZ(X,~Y) == TESTC(Y,X) + if (SDValue NotOp1 = IsNOT(Op1, DAG)) { + CC = (CC == X86::COND_E ? X86::COND_B : X86::COND_AE); + return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT, + DAG.getBitcast(OpVT, NotOp1), Op0); + } + + // TESTZ(-1,X) == TESTZ(X,X) if (ISD::isBuildVectorAllOnes(Op0.getNode())) return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT, Op1, Op1); + + // TESTZ(X,-1) == TESTZ(X,X) if (ISD::isBuildVectorAllOnes(Op1.getNode())) return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT, Op0, Op0); } diff --git a/llvm/test/CodeGen/X86/combine-ptest.ll b/llvm/test/CodeGen/X86/combine-ptest.ll index 59f7053c66a5..e12a4152ac7e 100644 --- a/llvm/test/CodeGen/X86/combine-ptest.ll +++ b/llvm/test/CodeGen/X86/combine-ptest.ll @@ -42,10 +42,8 @@ define i32 @ptestz_128_invert1(<2 x i64> %c, <2 x i64> %d, i32 %a, i32 %b) { ; CHECK-LABEL: ptestz_128_invert1: ; CHECK: # %bb.0: ; CHECK-NEXT: movl %edi, %eax -; CHECK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; CHECK-NEXT: vpxor %xmm2, %xmm1, %xmm1 -; CHECK-NEXT: vptest %xmm1, %xmm0 -; CHECK-NEXT: cmovnel %esi, %eax +; CHECK-NEXT: vptest %xmm0, %xmm1 +; CHECK-NEXT: cmovael %esi, %eax ; CHECK-NEXT: retq %t1 = xor <2 x i64> %d, %t2 = call i32 @llvm.x86.sse41.ptestz(<2 x i64> %c, <2 x i64> %t1) @@ -58,11 +56,8 @@ define i32 @ptestz_256_invert1(<4 x i64> %c, <4 x i64> %d, i32 %a, i32 %b) { ; CHECK-LABEL: ptestz_256_invert1: ; CHECK: # %bb.0: ; CHECK-NEXT: movl %edi, %eax -; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2 -; CHECK-NEXT: vcmptrueps %ymm2, %ymm2, %ymm2 -; CHECK-NEXT: vxorps %ymm2, %ymm1, %ymm1 -; CHECK-NEXT: vptest %ymm1, %ymm0 -; CHECK-NEXT: cmovnel %esi, %eax +; CHECK-NEXT: vptest %ymm0, %ymm1 +; CHECK-NEXT: cmovael %esi, %eax ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %t1 = xor <4 x i64> %d, diff --git a/llvm/test/CodeGen/X86/combine-testpd.ll b/llvm/test/CodeGen/X86/combine-testpd.ll index 968f8b01aeb2..9ae3d80e59cd 100644 --- a/llvm/test/CodeGen/X86/combine-testpd.ll +++ b/llvm/test/CodeGen/X86/combine-testpd.ll @@ -46,10 +46,8 @@ define i32 @testpdz_128_invert1(<2 x double> %c, <2 x double> %d, i32 %a, i32 %b ; CHECK-LABEL: testpdz_128_invert1: ; CHECK: # %bb.0: ; CHECK-NEXT: movl %edi, %eax -; CHECK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; CHECK-NEXT: vpxor %xmm2, %xmm1, %xmm1 -; CHECK-NEXT: vtestpd %xmm1, %xmm0 -; CHECK-NEXT: cmovnel %esi, %eax +; CHECK-NEXT: vtestpd %xmm0, %xmm1 +; CHECK-NEXT: cmovael %esi, %eax ; CHECK-NEXT: retq %t0 = bitcast <2 x double> %d to <2 x i64> %t1 = xor <2 x i64> %t0, @@ -64,11 +62,8 @@ define i32 @testpdz_256_invert1(<4 x double> %c, <4 x double> %d, i32 %a, i32 %b ; CHECK-LABEL: testpdz_256_invert1: ; CHECK: # %bb.0: ; CHECK-NEXT: movl %edi, %eax -; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2 -; CHECK-NEXT: vcmptrueps %ymm2, %ymm2, %ymm2 -; CHECK-NEXT: vxorps %ymm2, %ymm1, %ymm1 -; CHECK-NEXT: vtestpd %ymm1, %ymm0 -; CHECK-NEXT: cmovnel %esi, %eax +; CHECK-NEXT: vtestpd %ymm0, %ymm1 +; CHECK-NEXT: cmovael %esi, %eax ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %t0 = bitcast <4 x double> %d to <4 x i64> diff --git a/llvm/test/CodeGen/X86/combine-testps.ll b/llvm/test/CodeGen/X86/combine-testps.ll index 135c6f2ae106..f3605441348d 100644 --- a/llvm/test/CodeGen/X86/combine-testps.ll +++ b/llvm/test/CodeGen/X86/combine-testps.ll @@ -46,10 +46,8 @@ define i32 @testpsz_128_invert1(<4 x float> %c, <4 x float> %d, i32 %a, i32 %b) ; CHECK-LABEL: testpsz_128_invert1: ; CHECK: # %bb.0: ; CHECK-NEXT: movl %edi, %eax -; CHECK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; CHECK-NEXT: vpxor %xmm2, %xmm1, %xmm1 -; CHECK-NEXT: vtestps %xmm1, %xmm0 -; CHECK-NEXT: cmovnel %esi, %eax +; CHECK-NEXT: vtestps %xmm0, %xmm1 +; CHECK-NEXT: cmovael %esi, %eax ; CHECK-NEXT: retq %t0 = bitcast <4 x float> %d to <2 x i64> %t1 = xor <2 x i64> %t0, @@ -64,11 +62,8 @@ define i32 @testpsz_256_invert1(<8 x float> %c, <8 x float> %d, i32 %a, i32 %b) ; CHECK-LABEL: testpsz_256_invert1: ; CHECK: # %bb.0: ; CHECK-NEXT: movl %edi, %eax -; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2 -; CHECK-NEXT: vcmptrueps %ymm2, %ymm2, %ymm2 -; CHECK-NEXT: vxorps %ymm2, %ymm1, %ymm1 -; CHECK-NEXT: vtestps %ymm1, %ymm0 -; CHECK-NEXT: cmovnel %esi, %eax +; CHECK-NEXT: vtestps %ymm0, %ymm1 +; CHECK-NEXT: cmovael %esi, %eax ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %t0 = bitcast <8 x float> %d to <4 x i64>