From 9872cfc5b1774a9d0ab777a3c905013619db5c32 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Sun, 21 Feb 2021 21:19:39 +0000 Subject: [PATCH] [X86] Add 'sub C1, (xor X, C1) -> add (xor X, ~C2), C1+1' tests This is also in sub.ll but that's for a specific i686 pattern - this adds x86_64 and vector tests --- llvm/test/CodeGen/X86/combine-sub.ll | 50 ++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/llvm/test/CodeGen/X86/combine-sub.ll b/llvm/test/CodeGen/X86/combine-sub.ll index 935926cf50b2..f16768b87b0e 100644 --- a/llvm/test/CodeGen/X86/combine-sub.ll +++ b/llvm/test/CodeGen/X86/combine-sub.ll @@ -228,3 +228,53 @@ define <4 x i32> @combine_vec_sub_sextinreg(<4 x i32> %x, <4 x i32> %y) { %3 = sub <4 x i32> %x, %2 ret <4 x i32> %3 } + +; sub C1, (xor X, C1) -> add (xor X, ~C2), C1+1 +define i32 @combine_sub_xor_consts(i32 %x) { +; CHECK-LABEL: combine_sub_xor_consts: +; CHECK: # %bb.0: +; CHECK-NEXT: # kill: def $edi killed $edi def $rdi +; CHECK-NEXT: xorl $-32, %edi +; CHECK-NEXT: leal 33(%rdi), %eax +; CHECK-NEXT: retq + %xor = xor i32 %x, 31 + %sub = sub i32 32, %xor + ret i32 %sub +} + +define <4 x i32> @combine_vec_sub_xor_consts(<4 x i32> %x) { +; SSE-LABEL: combine_vec_sub_xor_consts: +; SSE: # %bb.0: +; SSE-NEXT: pxor {{.*}}(%rip), %xmm0 +; SSE-NEXT: movdqa {{.*#+}} xmm1 = [1,2,3,4] +; SSE-NEXT: psubd %xmm0, %xmm1 +; SSE-NEXT: movdqa %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_sub_xor_consts: +; AVX: # %bb.0: +; AVX-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,3,4] +; AVX-NEXT: vpsubd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq + %xor = xor <4 x i32> %x, + %sub = sub <4 x i32> , %xor + ret <4 x i32> %sub +} + +define <4 x i32> @combine_vec_neg_xor_consts(<4 x i32> %x) { +; SSE-LABEL: combine_vec_neg_xor_consts: +; SSE: # %bb.0: +; SSE-NEXT: pcmpeqd %xmm1, %xmm1 +; SSE-NEXT: psubd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: combine_vec_neg_xor_consts: +; AVX: # %bb.0: +; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpsubd %xmm1, %xmm0, %xmm0 +; AVX-NEXT: retq + %xor = xor <4 x i32> %x, + %sub = sub <4 x i32> zeroinitializer, %xor + ret <4 x i32> %sub +}