[X86][SSE] Add uniform/non-uniform exact sdiv vector tests covering all paths

Regenerated tests and tested on 64-bit (AVX2) as well.

llvm-svn: 338729
This commit is contained in:
Simon Pilgrim 2018-08-02 15:34:51 +00:00
parent 67647bcfbe
commit ef494e1722
1 changed files with 110 additions and 14 deletions

View File

@ -1,29 +1,125 @@
; RUN: llc -mtriple=i686-- -mattr=+sse2 < %s | FileCheck %s
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
define i32 @test1(i32 %x) {
; X86-LABEL: test1:
; X86: # %bb.0:
; X86-NEXT: imull $-1030792151, {{[0-9]+}}(%esp), %eax # imm = 0xC28F5C29
; X86-NEXT: retl
;
; X64-LABEL: test1:
; X64: # %bb.0:
; X64-NEXT: imull $-1030792151, %edi, %eax # imm = 0xC28F5C29
; X64-NEXT: retq
%div = sdiv exact i32 %x, 25
ret i32 %div
; CHECK-LABEL: test1:
; CHECK: imull $-1030792151, 4(%esp)
; CHECK-NEXT: ret
}
define i32 @test2(i32 %x) {
; X86-LABEL: test2:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: sarl $3, %eax
; X86-NEXT: imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB
; X86-NEXT: retl
;
; X64-LABEL: test2:
; X64: # %bb.0:
; X64-NEXT: sarl $3, %edi
; X64-NEXT: imull $-1431655765, %edi, %eax # imm = 0xAAAAAAAB
; X64-NEXT: retq
%div = sdiv exact i32 %x, 24
ret i32 %div
; CHECK-LABEL: test2:
; CHECK: sarl $3
; CHECK-NEXT: imull $-1431655765
; CHECK-NEXT: ret
}
define <4 x i32> @test3(<4 x i32> %x) {
; X86-LABEL: test3:
; X86: # %bb.0:
; X86-NEXT: psrad $3, %xmm0
; X86-NEXT: movdqa {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531]
; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; X86-NEXT: pmuludq %xmm1, %xmm0
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X86-NEXT: pmuludq %xmm1, %xmm2
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
; X86-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X86-NEXT: retl
;
; X64-LABEL: test3:
; X64: # %bb.0:
; X64-NEXT: vpsrad $3, %xmm0, %xmm0
; X64-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2863311531,2863311531,2863311531,2863311531]
; X64-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%div = sdiv exact <4 x i32> %x, <i32 24, i32 24, i32 24, i32 24>
ret <4 x i32> %div
; CHECK-LABEL: test3:
; CHECK: psrad $3,
; CHECK: pmuludq
; CHECK: pmuludq
; CHECK-NOT: psrad
; CHECK: ret
}
define <4 x i32> @test4(<4 x i32> %x) {
; X86-LABEL: test4:
; X86: # %bb.0:
; X86-NEXT: movdqa {{.*#+}} xmm1 = [3264175145,3264175145,3264175145,3264175145]
; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
; X86-NEXT: pmuludq %xmm1, %xmm0
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X86-NEXT: pmuludq %xmm1, %xmm2
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
; X86-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X86-NEXT: retl
;
; X64-LABEL: test4:
; X64: # %bb.0:
; X64-NEXT: vpbroadcastd {{.*#+}} xmm1 = [3264175145,3264175145,3264175145,3264175145]
; X64-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X64-NEXT: retq
%div = sdiv exact <4 x i32> %x, <i32 25, i32 25, i32 25, i32 25>
ret <4 x i32> %div
}
define <4 x i32> @test5(<4 x i32> %x) {
; X86-LABEL: test5:
; X86: # %bb.0:
; X86-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
; X86-NEXT: movd %xmm1, %eax
; X86-NEXT: imull $-1030792151, %eax, %eax # imm = 0xC28F5C29
; X86-NEXT: movd %eax, %xmm1
; X86-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; X86-NEXT: movd %xmm2, %eax
; X86-NEXT: imull $-1030792151, %eax, %eax # imm = 0xC28F5C29
; X86-NEXT: movd %eax, %xmm2
; X86-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; X86-NEXT: movd %xmm0, %eax
; X86-NEXT: sarl $3, %eax
; X86-NEXT: imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB
; X86-NEXT: movd %eax, %xmm1
; X86-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
; X86-NEXT: movd %xmm0, %eax
; X86-NEXT: sarl $3, %eax
; X86-NEXT: imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB
; X86-NEXT: movd %eax, %xmm0
; X86-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; X86-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; X86-NEXT: movdqa %xmm1, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: test5:
; X64: # %bb.0:
; X64-NEXT: vpextrd $1, %xmm0, %eax
; X64-NEXT: sarl $3, %eax
; X64-NEXT: imull $-1431655765, %eax, %eax # imm = 0xAAAAAAAB
; X64-NEXT: vmovd %xmm0, %ecx
; X64-NEXT: sarl $3, %ecx
; X64-NEXT: imull $-1431655765, %ecx, %ecx # imm = 0xAAAAAAAB
; X64-NEXT: vmovd %ecx, %xmm1
; X64-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
; X64-NEXT: vpextrd $2, %xmm0, %eax
; X64-NEXT: imull $-1030792151, %eax, %eax # imm = 0xC28F5C29
; X64-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
; X64-NEXT: vpextrd $3, %xmm0, %eax
; X64-NEXT: imull $-1030792151, %eax, %eax # imm = 0xC28F5C29
; X64-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
; X64-NEXT: retq
%div = sdiv exact <4 x i32> %x, <i32 24, i32 24, i32 25, i32 25>
ret <4 x i32> %div
}