From 8bc531d349fe04bf61f5693b7d1554d94486395f Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Fri, 11 Nov 2016 11:11:40 +0000 Subject: [PATCH] [X86] Add knownbits vector UREM/SREM tests In preparation for demandedelts support llvm-svn: 286577 --- llvm/test/CodeGen/X86/known-bits-vector.ll | 116 +++++++++++++++++++++ 1 file changed, 116 insertions(+) diff --git a/llvm/test/CodeGen/X86/known-bits-vector.ll b/llvm/test/CodeGen/X86/known-bits-vector.ll index a1efb9b176e1..cb50d5f9611c 100644 --- a/llvm/test/CodeGen/X86/known-bits-vector.ll +++ b/llvm/test/CodeGen/X86/known-bits-vector.ll @@ -255,3 +255,119 @@ define <4 x i32> @knownbits_mask_udiv_shuffle_lshr(<4 x i32> %a0, <4 x i32> %a1) %4 = lshr <4 x i32> %3, ret <4 x i32> %4 } + +define <4 x i32> @knownbits_urem_lshr(<4 x i32> %a0) nounwind { +; X32-LABEL: knownbits_urem_lshr: +; X32: # BB#0: +; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; X32-NEXT: retl +; +; X64-LABEL: knownbits_urem_lshr: +; X64: # BB#0: +; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; X64-NEXT: retq + %1 = urem <4 x i32> %a0, + %2 = lshr <4 x i32> %1, + ret <4 x i32> %2 +} + +define <4 x i32> @knownbits_mask_urem_shuffle_lshr(<4 x i32> %a0, <4 x i32> %a1) nounwind { +; X32-LABEL: knownbits_mask_urem_shuffle_lshr: +; X32: # BB#0: +; X32-NEXT: pushl %esi +; X32-NEXT: vmovdqa {{.*#+}} xmm2 = [32767,4294967295,4294967295,32767] +; X32-NEXT: vpand %xmm2, %xmm0, %xmm0 +; X32-NEXT: vpand %xmm2, %xmm1, %xmm1 +; X32-NEXT: vpextrd $1, %xmm0, %eax +; X32-NEXT: vpextrd $1, %xmm1, %ecx +; X32-NEXT: xorl %edx, %edx +; X32-NEXT: divl %ecx +; X32-NEXT: movl %edx, %ecx +; X32-NEXT: vmovd %xmm0, %eax +; X32-NEXT: vmovd %xmm1, %esi +; X32-NEXT: xorl %edx, %edx +; X32-NEXT: divl %esi +; X32-NEXT: vmovd %edx, %xmm2 +; X32-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2 +; X32-NEXT: vpextrd $2, %xmm0, %eax +; X32-NEXT: vpextrd $2, %xmm1, %ecx +; X32-NEXT: xorl %edx, %edx +; X32-NEXT: divl %ecx +; X32-NEXT: vpinsrd $2, %edx, %xmm2, %xmm2 +; X32-NEXT: vpextrd $3, %xmm0, %eax +; X32-NEXT: vpextrd $3, %xmm1, %ecx +; X32-NEXT: xorl %edx, %edx +; X32-NEXT: divl %ecx +; X32-NEXT: vpinsrd $3, %edx, %xmm2, %xmm0 +; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3] +; X32-NEXT: vpsrld $22, %xmm0, %xmm0 +; X32-NEXT: popl %esi +; X32-NEXT: retl +; +; X64-LABEL: knownbits_mask_urem_shuffle_lshr: +; X64: # BB#0: +; X64-NEXT: vmovdqa {{.*#+}} xmm2 = [32767,4294967295,4294967295,32767] +; X64-NEXT: vpand %xmm2, %xmm0, %xmm0 +; X64-NEXT: vpand %xmm2, %xmm1, %xmm1 +; X64-NEXT: vpextrd $1, %xmm0, %eax +; X64-NEXT: vpextrd $1, %xmm1, %ecx +; X64-NEXT: xorl %edx, %edx +; X64-NEXT: divl %ecx +; X64-NEXT: movl %edx, %ecx +; X64-NEXT: vmovd %xmm0, %eax +; X64-NEXT: vmovd %xmm1, %esi +; X64-NEXT: xorl %edx, %edx +; X64-NEXT: divl %esi +; X64-NEXT: vmovd %edx, %xmm2 +; X64-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2 +; X64-NEXT: vpextrd $2, %xmm0, %eax +; X64-NEXT: vpextrd $2, %xmm1, %ecx +; X64-NEXT: xorl %edx, %edx +; X64-NEXT: divl %ecx +; X64-NEXT: vpinsrd $2, %edx, %xmm2, %xmm2 +; X64-NEXT: vpextrd $3, %xmm0, %eax +; X64-NEXT: vpextrd $3, %xmm1, %ecx +; X64-NEXT: xorl %edx, %edx +; X64-NEXT: divl %ecx +; X64-NEXT: vpinsrd $3, %edx, %xmm2, %xmm0 +; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3] +; X64-NEXT: vpsrld $22, %xmm0, %xmm0 +; X64-NEXT: retq + %1 = and <4 x i32> %a0, + %2 = and <4 x i32> %a1, + %3 = urem <4 x i32> %1, %2 + %4 = shufflevector <4 x i32> %3, <4 x i32> undef, <4 x i32> + %5 = lshr <4 x i32> %4, + ret <4 x i32> %5 +} + +define <4 x i32> @knownbits_mask_srem_shuffle_lshr(<4 x i32> %a0) nounwind { +; X32-LABEL: knownbits_mask_srem_shuffle_lshr: +; X32: # BB#0: +; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0 +; X32-NEXT: vpsrad $31, %xmm0, %xmm1 +; X32-NEXT: vpsrld $28, %xmm1, %xmm1 +; X32-NEXT: vpaddd %xmm1, %xmm0, %xmm1 +; X32-NEXT: vpand {{\.LCPI.*}}, %xmm1, %xmm1 +; X32-NEXT: vpsubd %xmm1, %xmm0, %xmm0 +; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3] +; X32-NEXT: vpsrld $22, %xmm0, %xmm0 +; X32-NEXT: retl +; +; X64-LABEL: knownbits_mask_srem_shuffle_lshr: +; X64: # BB#0: +; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; X64-NEXT: vpsrad $31, %xmm0, %xmm1 +; X64-NEXT: vpsrld $28, %xmm1, %xmm1 +; X64-NEXT: vpaddd %xmm1, %xmm0, %xmm1 +; X64-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1 +; X64-NEXT: vpsubd %xmm1, %xmm0, %xmm0 +; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3] +; X64-NEXT: vpsrld $22, %xmm0, %xmm0 +; X64-NEXT: retq + %1 = and <4 x i32> %a0, + %2 = srem <4 x i32> %1, + %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> + %4 = lshr <4 x i32> %3, + ret <4 x i32> %4 +}