[X86][SSE] Regenerate SSE copysign tests

llvm-svn: 276565
This commit is contained in:
Simon Pilgrim 2016-07-24 15:17:50 +00:00
parent 351ed42795
commit 30a7cc2e1f
1 changed files with 102 additions and 82 deletions

View File

@ -1,3 +1,4 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=X32 ; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=X32
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=X64 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=X64
@ -5,41 +6,47 @@
; Library Functions ; Library Functions
; ;
define float @tst1(float %a, float %b) { define float @tst1(float %a, float %b) nounwind {
; X32-LABEL: @tst1 ; X32-LABEL: tst1:
; X32: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X32: # BB#0:
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; X32-NEXT: subl $8, %esp
; X32-NEXT: movss %xmm1, 4(%esp) ; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: movss %xmm0, (%esp) ; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: calll copysignf ; X32-NEXT: movss %xmm1, {{[0-9]+}}(%esp)
; X32-NEXT: addl $8, %esp ; X32-NEXT: movss %xmm0, (%esp)
; X32-NEXT: retl ; X32-NEXT: calll copysignf
; X32-NEXT: addl $8, %esp
; X32-NEXT: retl
; ;
; X64-LABEL: @tst1 ; X64-LABEL: tst1:
; X64: movaps %xmm0, %xmm2 ; X64: # BB#0:
; X64-NEXT: movaps %xmm1, %xmm0 ; X64-NEXT: movaps %xmm0, %xmm2
; X64-NEXT: movaps %xmm2, %xmm1 ; X64-NEXT: movaps %xmm1, %xmm0
; X64-NEXT: jmp copysignf ; X64-NEXT: movaps %xmm2, %xmm1
; X64-NEXT: jmp copysignf # TAILCALL
%tmp = tail call float @copysignf( float %b, float %a ) %tmp = tail call float @copysignf( float %b, float %a )
ret float %tmp ret float %tmp
} }
define double @tst2(double %a, float %b, float %c) { define double @tst2(double %a, float %b, float %c) nounwind {
; X32-LABEL: @tst2 ; X32-LABEL: tst2:
; X32: movsd {{.*#+}} xmm0 = mem[0],zero ; X32: # BB#0:
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; X32-NEXT: subl $16, %esp
; X32-NEXT: addss 32(%esp), %xmm1 ; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X32-NEXT: cvtss2sd %xmm1, %xmm1 ; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: movsd %xmm0, (%esp) ; X32-NEXT: addss {{[0-9]+}}(%esp), %xmm1
; X32-NEXT: movsd %xmm1, 8(%esp) ; X32-NEXT: cvtss2sd %xmm1, %xmm1
; X32-NEXT: calll copysign ; X32-NEXT: movsd %xmm0, (%esp)
; X32-NEXT: addl $16, %esp ; X32-NEXT: movsd %xmm1, {{[0-9]+}}(%esp)
; X32-NEXT: retl ; X32-NEXT: calll copysign
; X32-NEXT: addl $16, %esp
; X32-NEXT: retl
; ;
; X64-LABEL: @tst2 ; X64-LABEL: tst2:
; X64: addss %xmm2, %xmm1 ; X64: # BB#0:
; X64-NEXT: cvtss2sd %xmm1, %xmm1 ; X64-NEXT: addss %xmm2, %xmm1
; X64-NEXT: jmp copysign ; X64-NEXT: cvtss2sd %xmm1, %xmm1
; X64-NEXT: jmp copysign # TAILCALL
%tmp1 = fadd float %b, %c %tmp1 = fadd float %b, %c
%tmp2 = fpext float %tmp1 to double %tmp2 = fpext float %tmp1 to double
%tmp = tail call double @copysign( double %a, double %tmp2 ) %tmp = tail call double @copysign( double %a, double %tmp2 )
@ -53,77 +60,90 @@ declare double @copysign(double, double)
; LLVM Intrinsic ; LLVM Intrinsic
; ;
define float @int1(float %a, float %b) { define float @int1(float %a, float %b) nounwind {
; X32-LABEL: @int1 ; X32-LABEL: int1:
; X32: movss 8(%esp), %xmm0 {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X32: # BB#0:
; X32-NEXT: andps .LCPI2_0, %xmm0 ; X32-NEXT: pushl %eax
; X32-NEXT: movss 12(%esp), %xmm1 {{.*#+}} xmm1 = mem[0],zero,zero,zero ; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: andps .LCPI2_1, %xmm1 ; X32-NEXT: andps {{\.LCPI.*}}, %xmm0
; X32-NEXT: orps %xmm0, %xmm1 ; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-NEXT: movss %xmm1, (%esp) ; X32-NEXT: andps {{\.LCPI.*}}, %xmm1
; X32-NEXT: flds (%esp) ; X32-NEXT: orps %xmm0, %xmm1
; X32-NEXT: popl %eax ; X32-NEXT: movss %xmm1, (%esp)
; X32-NEXT: retl ; X32-NEXT: flds (%esp)
; X32-NEXT: popl %eax
; X32-NEXT: retl
; ;
; X64-LABEL: @int1 ; X64-LABEL: int1:
; X64: andps .LCPI2_0(%rip), %xmm0 ; X64: # BB#0:
; X64-NEXT: andps .LCPI2_1(%rip), %xmm1 ; X64-NEXT: andps {{.*}}(%rip), %xmm0
; X64-NEXT: orps %xmm1, %xmm0 ; X64-NEXT: andps {{.*}}(%rip), %xmm1
; X64-NEXT: retq ; X64-NEXT: orps %xmm1, %xmm0
; X64-NEXT: retq
%tmp = tail call float @llvm.copysign.f32( float %b, float %a ) %tmp = tail call float @llvm.copysign.f32( float %b, float %a )
ret float %tmp ret float %tmp
} }
define double @int2(double %a, float %b, float %c) { define double @int2(double %a, float %b, float %c) nounwind {
; X32-LABEL: @int2 ; X32-LABEL: int2:
; X32: movss 16(%ebp), %xmm0 {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X32: # BB#0:
; X32-NEXT: addss 20(%ebp), %xmm0 ; X32-NEXT: pushl %ebp
; X32-NEXT: movsd 8(%ebp), %xmm1 {{.*#+}} xmm1 = mem[0],zero ; X32-NEXT: movl %esp, %ebp
; X32-NEXT: andpd .LCPI3_0, %xmm1 ; X32-NEXT: andl $-8, %esp
; X32-NEXT: cvtss2sd %xmm0, %xmm0 ; X32-NEXT: subl $8, %esp
; X32-NEXT: andpd .LCPI3_1, %xmm0 ; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: orpd %xmm1, %xmm0 ; X32-NEXT: addss 20(%ebp), %xmm0
; X32-NEXT: movlpd %xmm0, (%esp) ; X32-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; X32-NEXT: fldl (%esp) ; X32-NEXT: andpd {{\.LCPI.*}}, %xmm1
; X32-NEXT: movl %ebp, %esp ; X32-NEXT: cvtss2sd %xmm0, %xmm0
; X32-NEXT: popl %ebp ; X32-NEXT: andpd {{\.LCPI.*}}, %xmm0
; X32-NEXT: retl ; X32-NEXT: orpd %xmm1, %xmm0
; X32-NEXT: movlpd %xmm0, (%esp)
; X32-NEXT: fldl (%esp)
; X32-NEXT: movl %ebp, %esp
; X32-NEXT: popl %ebp
; X32-NEXT: retl
; ;
; X64-LABEL: @int2 ; X64-LABEL: int2:
; X64: addss %xmm2, %xmm1 ; X64: # BB#0:
; X64-NEXT: cvtss2sd %xmm1, %xmm1 ; X64-NEXT: addss %xmm2, %xmm1
; X64-NEXT: andpd .LCPI3_0(%rip), %xmm1 ; X64-NEXT: cvtss2sd %xmm1, %xmm1
; X64-NEXT: andpd .LCPI3_1(%rip), %xmm0 ; X64-NEXT: andpd {{.*}}(%rip), %xmm1
; X64-NEXT: orpd %xmm1, %xmm0 ; X64-NEXT: andpd {{.*}}(%rip), %xmm0
; X64-NEXT: retq ; X64-NEXT: orpd %xmm1, %xmm0
; X64-NEXT: retq
%tmp1 = fadd float %b, %c %tmp1 = fadd float %b, %c
%tmp2 = fpext float %tmp1 to double %tmp2 = fpext float %tmp1 to double
%tmp = tail call double @llvm.copysign.f64( double %a, double %tmp2 ) %tmp = tail call double @llvm.copysign.f64( double %a, double %tmp2 )
ret double %tmp ret double %tmp
} }
define float @cst1() { define float @cst1() nounwind {
; X32-LABEL: @cst1 ; X32-LABEL: cst1:
; X32: fld1 ; X32: # BB#0:
; X32-NEXT: fchs ; X32-NEXT: fld1
; X32-NEXT: retl ; X32-NEXT: fchs
; X32-NEXT: retl
; ;
; X64-LABEL: @cst1 ; X64-LABEL: cst1:
; X64: movss .LCPI4_0(%rip), %xmm0 {{.*#+}} xmm0 = mem[0],zero,zero,zero ; X64: # BB#0:
; X64-NEXT: retq ; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: retq
%tmp = tail call float @llvm.copysign.f32( float 1.0, float -2.0 ) %tmp = tail call float @llvm.copysign.f32( float 1.0, float -2.0 )
ret float %tmp ret float %tmp
} }
define double @cst2() { define double @cst2() nounwind {
; X32-LABEL: @cst2 ; X32-LABEL: cst2:
; X32: fldz ; X32: # BB#0:
; X32-NEXT: fchs ; X32-NEXT: fldz
; X32-NEXT: retl ; X32-NEXT: fchs
; X32-NEXT: retl
; ;
; X64-LABEL: @cst2 ; X64-LABEL: cst2:
; X64: movsd .LCPI5_0(%rip), %xmm0 {{.*#+}} xmm0 = mem[0],zero ; X64: # BB#0:
; X64-NEXT: retq ; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; X64-NEXT: retq
%tmp1 = fadd float -1.0, -1.0 %tmp1 = fadd float -1.0, -1.0
%tmp2 = fpext float %tmp1 to double %tmp2 = fpext float %tmp1 to double
%tmp = tail call double @llvm.copysign.f64( double 0.0, double %tmp2 ) %tmp = tail call double @llvm.copysign.f64( double 0.0, double %tmp2 )