forked from OSchip/llvm-project
[X86] Add AVX partial dependency tests as noted on D67363
llvm-svn: 371525
This commit is contained in:
parent
3b0b3def86
commit
937ca68157
|
@ -1,5 +1,6 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=CHECK,SSE
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX
|
||||
|
||||
; PR31455 - https://bugs.llvm.org/show_bug.cgi?id=31455
|
||||
; We have to assume that errno can be set, so we have to make a libcall in that case.
|
||||
|
@ -9,44 +10,74 @@
|
|||
; for that), but we're checking the final asm to make sure that comes out as expected too.
|
||||
|
||||
define float @f(float %val) nounwind {
|
||||
; CHECK-LABEL: f:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: xorps %xmm1, %xmm1
|
||||
; CHECK-NEXT: ucomiss %xmm1, %xmm0
|
||||
; CHECK-NEXT: jb .LBB0_2
|
||||
; CHECK-NEXT: # %bb.1: # %.split
|
||||
; CHECK-NEXT: sqrtss %xmm0, %xmm0
|
||||
; CHECK-NEXT: retq
|
||||
; CHECK-NEXT: .LBB0_2: # %call.sqrt
|
||||
; CHECK-NEXT: jmp sqrtf # TAILCALL
|
||||
; SSE-LABEL: f:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: xorps %xmm1, %xmm1
|
||||
; SSE-NEXT: ucomiss %xmm1, %xmm0
|
||||
; SSE-NEXT: jb .LBB0_2
|
||||
; SSE-NEXT: # %bb.1: # %.split
|
||||
; SSE-NEXT: sqrtss %xmm0, %xmm0
|
||||
; SSE-NEXT: retq
|
||||
; SSE-NEXT: .LBB0_2: # %call.sqrt
|
||||
; SSE-NEXT: jmp sqrtf # TAILCALL
|
||||
;
|
||||
; AVX-LABEL: f:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; AVX-NEXT: vucomiss %xmm1, %xmm0
|
||||
; AVX-NEXT: jb .LBB0_2
|
||||
; AVX-NEXT: # %bb.1: # %.split
|
||||
; AVX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
; AVX-NEXT: .LBB0_2: # %call.sqrt
|
||||
; AVX-NEXT: jmp sqrtf # TAILCALL
|
||||
%res = tail call float @sqrtf(float %val)
|
||||
ret float %res
|
||||
}
|
||||
|
||||
define double @d(double %val) nounwind {
|
||||
; CHECK-LABEL: d:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: xorps %xmm1, %xmm1
|
||||
; CHECK-NEXT: ucomisd %xmm1, %xmm0
|
||||
; CHECK-NEXT: jb .LBB1_2
|
||||
; CHECK-NEXT: # %bb.1: # %.split
|
||||
; CHECK-NEXT: sqrtsd %xmm0, %xmm0
|
||||
; CHECK-NEXT: retq
|
||||
; CHECK-NEXT: .LBB1_2: # %call.sqrt
|
||||
; CHECK-NEXT: jmp sqrt # TAILCALL
|
||||
; SSE-LABEL: d:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: xorps %xmm1, %xmm1
|
||||
; SSE-NEXT: ucomisd %xmm1, %xmm0
|
||||
; SSE-NEXT: jb .LBB1_2
|
||||
; SSE-NEXT: # %bb.1: # %.split
|
||||
; SSE-NEXT: sqrtsd %xmm0, %xmm0
|
||||
; SSE-NEXT: retq
|
||||
; SSE-NEXT: .LBB1_2: # %call.sqrt
|
||||
; SSE-NEXT: jmp sqrt # TAILCALL
|
||||
;
|
||||
; AVX-LABEL: d:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; AVX-NEXT: vucomisd %xmm1, %xmm0
|
||||
; AVX-NEXT: jb .LBB1_2
|
||||
; AVX-NEXT: # %bb.1: # %.split
|
||||
; AVX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
; AVX-NEXT: .LBB1_2: # %call.sqrt
|
||||
; AVX-NEXT: jmp sqrt # TAILCALL
|
||||
%res = tail call double @sqrt(double %val)
|
||||
ret double %res
|
||||
}
|
||||
|
||||
define double @minsize(double %x, double %y) minsize {
|
||||
; CHECK-LABEL: minsize:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: mulsd %xmm0, %xmm0
|
||||
; CHECK-NEXT: mulsd %xmm1, %xmm1
|
||||
; CHECK-NEXT: addsd %xmm0, %xmm1
|
||||
; CHECK-NEXT: xorps %xmm0, %xmm0
|
||||
; CHECK-NEXT: sqrtsd %xmm1, %xmm0
|
||||
; CHECK-NEXT: retq
|
||||
; SSE-LABEL: minsize:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: mulsd %xmm0, %xmm0
|
||||
; SSE-NEXT: mulsd %xmm1, %xmm1
|
||||
; SSE-NEXT: addsd %xmm0, %xmm1
|
||||
; SSE-NEXT: xorps %xmm0, %xmm0
|
||||
; SSE-NEXT: sqrtsd %xmm1, %xmm0
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: minsize:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vmulsd %xmm0, %xmm0, %xmm0
|
||||
; AVX-NEXT: vmulsd %xmm1, %xmm1, %xmm1
|
||||
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
|
||||
; AVX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
%t3 = fmul fast double %x, %x
|
||||
%t4 = fmul fast double %y, %y
|
||||
%t5 = fadd fast double %t3, %t4
|
||||
|
|
Loading…
Reference in New Issue