llvm-project/llvm/test/CodeGen/X86/haddsub-3.ll

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

104 lines
3.9 KiB
LLVM
Raw Normal View History

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=SSE,SSSE3
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
define float @pr26491(<4 x float> %a0) {
; SSE2-LABEL: pr26491:
; SSE2: # %bb.0:
; SSE2-NEXT: movaps %xmm0, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[3,3]
; SSE2-NEXT: addps %xmm0, %xmm1
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: pr26491:
; SSSE3: # %bb.0:
; SSSE3-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSSE3-NEXT: addps %xmm0, %xmm1
; SSSE3-NEXT: movaps %xmm1, %xmm0
; SSSE3-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; SSSE3-NEXT: addss %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; AVX-LABEL: pr26491:
; AVX: # %bb.0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-NEXT: vaddps %xmm0, %xmm1, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%1 = shufflevector <4 x float> %a0, <4 x float> undef, <4 x i32> <i32 1, i32 1, i32 3, i32 3>
%2 = fadd <4 x float> %1, %a0
%3 = extractelement <4 x float> %2, i32 2
%4 = extractelement <4 x float> %2, i32 0
%5 = fadd float %3, %4
ret float %5
}
; When simplifying away a splat (broadcast), the hop type must match the shuffle type.
define <4 x double> @PR41414(i64 %x, <4 x double> %y) {
; SSE2-LABEL: PR41414:
; SSE2: # %bb.0:
; SSE2-NEXT: movq %rdi, %xmm2
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[1],mem[1]
; SSE2-NEXT: subpd {{.*}}(%rip), %xmm2
; SSE2-NEXT: movapd %xmm2, %xmm3
; SSE2-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm2[1]
; SSE2-NEXT: addpd %xmm2, %xmm3
; SSE2-NEXT: unpcklpd {{.*#+}} xmm3 = xmm3[0,0]
; SSE2-NEXT: divpd %xmm3, %xmm1
; SSE2-NEXT: divpd %xmm3, %xmm0
; SSE2-NEXT: xorpd %xmm2, %xmm2
; SSE2-NEXT: addpd %xmm2, %xmm0
; SSE2-NEXT: addpd %xmm2, %xmm1
; SSE2-NEXT: retq
;
; SSSE3-LABEL: PR41414:
; SSSE3: # %bb.0:
; SSSE3-NEXT: movq %rdi, %xmm2
; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[1],mem[1]
; SSSE3-NEXT: subpd {{.*}}(%rip), %xmm2
; SSSE3-NEXT: haddpd %xmm2, %xmm2
; SSSE3-NEXT: divpd %xmm2, %xmm1
; SSSE3-NEXT: divpd %xmm2, %xmm0
; SSSE3-NEXT: xorpd %xmm2, %xmm2
; SSSE3-NEXT: addpd %xmm2, %xmm0
; SSSE3-NEXT: addpd %xmm2, %xmm1
; SSSE3-NEXT: retq
;
; AVX1-LABEL: PR41414:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovq %rdi, %xmm1
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
; AVX1-NEXT: vsubpd {{.*}}(%rip), %xmm1, %xmm1
; AVX1-NEXT: vhaddpd %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
; AVX1-NEXT: vdivpd %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: PR41414:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovq %rdi, %xmm1
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
; AVX2-NEXT: vsubpd {{.*}}(%rip), %xmm1, %xmm1
; AVX2-NEXT: vhaddpd %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vbroadcastsd %xmm1, %ymm1
; AVX2-NEXT: vdivpd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX2-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
%conv = uitofp i64 %x to double
%t0 = insertelement <4 x double> undef, double %conv, i32 0
%t1 = shufflevector <4 x double> %t0, <4 x double> undef, <4 x i32> zeroinitializer
%t2 = fdiv <4 x double> %y, %t1
%t3 = fadd <4 x double> zeroinitializer, %t2
ret <4 x double> %t3
}