llvm-project/llvm/test/CodeGen/X86/vector-reduce-fadd.ll

1825 lines
71 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512VL
;
; vXf32 (accum)
;
define float @test_v2f32(float %a0, <2 x float> %a1) {
; SSE2-LABEL: test_v2f32:
; SSE2: # %bb.0:
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v2f32:
; SSE41: # %bb.0:
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_v2f32:
; AVX: # %bb.0:
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v2f32:
; AVX512: # %bb.0:
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%1 = call float @llvm.experimental.vector.reduce.fadd.f32.f32.v2f32(float %a0, <2 x float> %a1)
ret float %1
}
define float @test_v4f32(float %a0, <4 x float> %a1) {
; SSE2-LABEL: test_v4f32:
; SSE2: # %bb.0:
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: movaps %xmm1, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[2,3]
; SSE2-NEXT: addss %xmm2, %xmm0
; SSE2-NEXT: movaps %xmm1, %xmm2
; SSE2-NEXT: movhlps {{.*#+}} xmm2 = xmm1[1],xmm2[1]
; SSE2-NEXT: addss %xmm2, %xmm0
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v4f32:
; SSE41: # %bb.0:
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: movshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; SSE41-NEXT: addss %xmm2, %xmm0
; SSE41-NEXT: movaps %xmm1, %xmm2
; SSE41-NEXT: movhlps {{.*#+}} xmm2 = xmm1[1],xmm2[1]
; SSE41-NEXT: addss %xmm2, %xmm0
; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_v4f32:
; AVX: # %bb.0:
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v4f32:
; AVX512: # %bb.0:
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%1 = call float @llvm.experimental.vector.reduce.fadd.f32.f32.v4f32(float %a0, <4 x float> %a1)
ret float %1
}
define float @test_v8f32(float %a0, <8 x float> %a1) {
; SSE2-LABEL: test_v8f32:
; SSE2: # %bb.0:
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: movaps %xmm1, %xmm3
; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm1[2,3]
; SSE2-NEXT: addss %xmm3, %xmm0
; SSE2-NEXT: movaps %xmm1, %xmm3
; SSE2-NEXT: movhlps {{.*#+}} xmm3 = xmm1[1],xmm3[1]
; SSE2-NEXT: addss %xmm3, %xmm0
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: addss %xmm2, %xmm0
; SSE2-NEXT: movaps %xmm2, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm2[2,3]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: movaps %xmm2, %xmm1
; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm2[1],xmm1[1]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3]
; SSE2-NEXT: addss %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v8f32:
; SSE41: # %bb.0:
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: movshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
; SSE41-NEXT: addss %xmm3, %xmm0
; SSE41-NEXT: movaps %xmm1, %xmm3
; SSE41-NEXT: movhlps {{.*#+}} xmm3 = xmm1[1],xmm3[1]
; SSE41-NEXT: addss %xmm3, %xmm0
; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: addss %xmm2, %xmm0
; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: movaps %xmm2, %xmm1
; SSE41-NEXT: movhlps {{.*#+}} xmm1 = xmm2[1],xmm1[1]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3]
; SSE41-NEXT: addss %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_v8f32:
; AVX: # %bb.0:
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3]
; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v8f32:
; AVX512: # %bb.0:
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3]
; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call float @llvm.experimental.vector.reduce.fadd.f32.f32.v8f32(float %a0, <8 x float> %a1)
ret float %1
}
define float @test_v16f32(float %a0, <16 x float> %a1) {
; SSE2-LABEL: test_v16f32:
; SSE2: # %bb.0:
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: movaps %xmm1, %xmm5
; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1],xmm1[2,3]
; SSE2-NEXT: addss %xmm5, %xmm0
; SSE2-NEXT: movaps %xmm1, %xmm5
; SSE2-NEXT: movhlps {{.*#+}} xmm5 = xmm1[1],xmm5[1]
; SSE2-NEXT: addss %xmm5, %xmm0
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: addss %xmm2, %xmm0
; SSE2-NEXT: movaps %xmm2, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm2[2,3]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: movaps %xmm2, %xmm1
; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm2[1],xmm1[1]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3]
; SSE2-NEXT: addss %xmm2, %xmm0
; SSE2-NEXT: addss %xmm3, %xmm0
; SSE2-NEXT: movaps %xmm3, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm3[2,3]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: movaps %xmm3, %xmm1
; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm3[1],xmm1[1]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
; SSE2-NEXT: addss %xmm3, %xmm0
; SSE2-NEXT: addss %xmm4, %xmm0
; SSE2-NEXT: movaps %xmm4, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm4[2,3]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: movaps %xmm4, %xmm1
; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm4[1],xmm1[1]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,1,2,3]
; SSE2-NEXT: addss %xmm4, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v16f32:
; SSE41: # %bb.0:
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: movshdup {{.*#+}} xmm5 = xmm1[1,1,3,3]
; SSE41-NEXT: addss %xmm5, %xmm0
; SSE41-NEXT: movaps %xmm1, %xmm5
; SSE41-NEXT: movhlps {{.*#+}} xmm5 = xmm1[1],xmm5[1]
; SSE41-NEXT: addss %xmm5, %xmm0
; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: addss %xmm2, %xmm0
; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: movaps %xmm2, %xmm1
; SSE41-NEXT: movhlps {{.*#+}} xmm1 = xmm2[1],xmm1[1]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3]
; SSE41-NEXT: addss %xmm2, %xmm0
; SSE41-NEXT: addss %xmm3, %xmm0
; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm3[1,1,3,3]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: movaps %xmm3, %xmm1
; SSE41-NEXT: movhlps {{.*#+}} xmm1 = xmm3[1],xmm1[1]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
; SSE41-NEXT: addss %xmm3, %xmm0
; SSE41-NEXT: addss %xmm4, %xmm0
; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm4[1,1,3,3]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: movaps %xmm4, %xmm1
; SSE41-NEXT: movhlps {{.*#+}} xmm1 = xmm4[1],xmm1[1]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,1,2,3]
; SSE41-NEXT: addss %xmm4, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_v16f32:
; AVX: # %bb.0:
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
; AVX-NEXT: vaddss %xmm3, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
; AVX-NEXT: vaddss %xmm3, %xmm0, %xmm0
; AVX-NEXT: vpermilps {{.*#+}} xmm3 = xmm1[3,1,2,3]
; AVX-NEXT: vaddss %xmm3, %xmm0, %xmm0
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
; AVX-NEXT: vaddss %xmm3, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
; AVX-NEXT: vaddss %xmm3, %xmm0, %xmm0
; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm2[3,1,2,3]
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm1
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v16f32:
; AVX512: # %bb.0:
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3]
; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3]
; AVX512-NEXT: vaddss %xmm3, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0]
; AVX512-NEXT: vaddss %xmm3, %xmm0, %xmm0
; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vextractf32x4 $2, %zmm1, %xmm2
; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3]
; AVX512-NEXT: vaddss %xmm3, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0]
; AVX512-NEXT: vaddss %xmm3, %xmm0, %xmm0
; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vextractf32x4 $3, %zmm1, %xmm1
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX512-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call float @llvm.experimental.vector.reduce.fadd.f32.f32.v16f32(float %a0, <16 x float> %a1)
ret float %1
}
;
; vXf32 (zero)
;
define float @test_v2f32_zero(<2 x float> %a0) {
; SSE2-LABEL: test_v2f32_zero:
; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: addss %xmm0, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v2f32_zero:
; SSE41: # %bb.0:
; SSE41-NEXT: xorps %xmm1, %xmm1
; SSE41-NEXT: addss %xmm0, %xmm1
; SSE41-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_v2f32_zero:
; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm1
; AVX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
; AVX512BW-LABEL: test_v2f32_zero:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vaddss %xmm1, %xmm0, %xmm1
; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; AVX512BW-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: test_v2f32_zero:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vaddss %xmm1, %xmm0, %xmm1
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; AVX512VL-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX512VL-NEXT: retq
%1 = call float @llvm.experimental.vector.reduce.fadd.f32.f32.v2f32(float 0.0, <2 x float> %a0)
ret float %1
}
define float @test_v4f32_zero(<4 x float> %a0) {
; SSE2-LABEL: test_v4f32_zero:
; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: addss %xmm0, %xmm1
; SSE2-NEXT: movaps %xmm0, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[2,3]
; SSE2-NEXT: addss %xmm1, %xmm2
; SSE2-NEXT: movaps %xmm0, %xmm1
; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1]
; SSE2-NEXT: addss %xmm2, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v4f32_zero:
; SSE41: # %bb.0:
; SSE41-NEXT: xorps %xmm1, %xmm1
; SSE41-NEXT: addss %xmm0, %xmm1
; SSE41-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE41-NEXT: addss %xmm1, %xmm2
; SSE41-NEXT: movaps %xmm0, %xmm1
; SSE41-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1]
; SSE41-NEXT: addss %xmm2, %xmm1
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_v4f32_zero:
; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm1
; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
; AVX512BW-LABEL: test_v4f32_zero:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vaddss %xmm1, %xmm0, %xmm1
; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX512BW-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512BW-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512BW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX512BW-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: test_v4f32_zero:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vaddss %xmm1, %xmm0, %xmm1
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX512VL-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512VL-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX512VL-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX512VL-NEXT: retq
%1 = call float @llvm.experimental.vector.reduce.fadd.f32.f32.v4f32(float 0.0, <4 x float> %a0)
ret float %1
}
define float @test_v8f32_zero(<8 x float> %a0) {
; SSE2-LABEL: test_v8f32_zero:
; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm2, %xmm2
; SSE2-NEXT: addss %xmm0, %xmm2
; SSE2-NEXT: movaps %xmm0, %xmm3
; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[2,3]
; SSE2-NEXT: addss %xmm2, %xmm3
; SSE2-NEXT: movaps %xmm0, %xmm2
; SSE2-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
; SSE2-NEXT: addss %xmm3, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE2-NEXT: addss %xmm2, %xmm0
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: movaps %xmm1, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[2,3]
; SSE2-NEXT: addss %xmm2, %xmm0
; SSE2-NEXT: movaps %xmm1, %xmm2
; SSE2-NEXT: movhlps {{.*#+}} xmm2 = xmm1[1],xmm2[1]
; SSE2-NEXT: addss %xmm2, %xmm0
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v8f32_zero:
; SSE41: # %bb.0:
; SSE41-NEXT: xorps %xmm2, %xmm2
; SSE41-NEXT: addss %xmm0, %xmm2
; SSE41-NEXT: movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
; SSE41-NEXT: addss %xmm2, %xmm3
; SSE41-NEXT: movaps %xmm0, %xmm2
; SSE41-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
; SSE41-NEXT: addss %xmm3, %xmm2
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE41-NEXT: addss %xmm2, %xmm0
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: movshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; SSE41-NEXT: addss %xmm2, %xmm0
; SSE41-NEXT: movaps %xmm1, %xmm2
; SSE41-NEXT: movhlps {{.*#+}} xmm2 = xmm1[1],xmm2[1]
; SSE41-NEXT: addss %xmm2, %xmm0
; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_v8f32_zero:
; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm1
; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3]
; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm1
; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512BW-LABEL: test_v8f32_zero:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vaddss %xmm1, %xmm0, %xmm1
; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX512BW-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512BW-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512BW-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3]
; AVX512BW-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512BW-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512BW-NEXT: vaddss %xmm0, %xmm1, %xmm1
; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX512BW-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512BW-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512BW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX512BW-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: test_v8f32_zero:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vaddss %xmm1, %xmm0, %xmm1
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX512VL-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512VL-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3]
; AVX512VL-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512VL-NEXT: vaddss %xmm0, %xmm1, %xmm1
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX512VL-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512VL-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX512VL-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
%1 = call float @llvm.experimental.vector.reduce.fadd.f32.f32.v8f32(float 0.0, <8 x float> %a0)
ret float %1
}
define float @test_v16f32_zero(<16 x float> %a0) {
; SSE2-LABEL: test_v16f32_zero:
; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm4, %xmm4
; SSE2-NEXT: addss %xmm0, %xmm4
; SSE2-NEXT: movaps %xmm0, %xmm5
; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1],xmm0[2,3]
; SSE2-NEXT: addss %xmm4, %xmm5
; SSE2-NEXT: movaps %xmm0, %xmm4
; SSE2-NEXT: movhlps {{.*#+}} xmm4 = xmm0[1],xmm4[1]
; SSE2-NEXT: addss %xmm5, %xmm4
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE2-NEXT: addss %xmm4, %xmm0
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: movaps %xmm1, %xmm4
; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm1[2,3]
; SSE2-NEXT: addss %xmm4, %xmm0
; SSE2-NEXT: movaps %xmm1, %xmm4
; SSE2-NEXT: movhlps {{.*#+}} xmm4 = xmm1[1],xmm4[1]
; SSE2-NEXT: addss %xmm4, %xmm0
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: addss %xmm2, %xmm0
; SSE2-NEXT: movaps %xmm2, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm2[2,3]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: movaps %xmm2, %xmm1
; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm2[1],xmm1[1]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3]
; SSE2-NEXT: addss %xmm2, %xmm0
; SSE2-NEXT: addss %xmm3, %xmm0
; SSE2-NEXT: movaps %xmm3, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm3[2,3]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: movaps %xmm3, %xmm1
; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm3[1],xmm1[1]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
; SSE2-NEXT: addss %xmm3, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v16f32_zero:
; SSE41: # %bb.0:
; SSE41-NEXT: xorps %xmm4, %xmm4
; SSE41-NEXT: addss %xmm0, %xmm4
; SSE41-NEXT: movshdup {{.*#+}} xmm5 = xmm0[1,1,3,3]
; SSE41-NEXT: addss %xmm4, %xmm5
; SSE41-NEXT: movaps %xmm0, %xmm4
; SSE41-NEXT: movhlps {{.*#+}} xmm4 = xmm0[1],xmm4[1]
; SSE41-NEXT: addss %xmm5, %xmm4
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE41-NEXT: addss %xmm4, %xmm0
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: movshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
; SSE41-NEXT: addss %xmm4, %xmm0
; SSE41-NEXT: movaps %xmm1, %xmm4
; SSE41-NEXT: movhlps {{.*#+}} xmm4 = xmm1[1],xmm4[1]
; SSE41-NEXT: addss %xmm4, %xmm0
; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: addss %xmm2, %xmm0
; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: movaps %xmm2, %xmm1
; SSE41-NEXT: movhlps {{.*#+}} xmm1 = xmm2[1],xmm1[1]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3]
; SSE41-NEXT: addss %xmm2, %xmm0
; SSE41-NEXT: addss %xmm3, %xmm0
; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm3[1,1,3,3]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: movaps %xmm3, %xmm1
; SSE41-NEXT: movhlps {{.*#+}} xmm1 = xmm3[1],xmm1[1]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
; SSE41-NEXT: addss %xmm3, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_v16f32_zero:
; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm2, %xmm2, %xmm2
; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm2
; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2
; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2
; AVX-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[3,1,2,3]
; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-NEXT: vaddss %xmm0, %xmm2, %xmm2
; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2
; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX-NEXT: vaddss %xmm0, %xmm2, %xmm0
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3]
; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512BW-LABEL: test_v16f32_zero:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vaddss %xmm1, %xmm0, %xmm1
; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX512BW-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512BW-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512BW-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3]
; AVX512BW-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512BW-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX512BW-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3]
; AVX512BW-NEXT: vaddss %xmm3, %xmm1, %xmm1
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0]
; AVX512BW-NEXT: vaddss %xmm3, %xmm1, %xmm1
; AVX512BW-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
; AVX512BW-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512BW-NEXT: vextractf32x4 $2, %zmm0, %xmm2
; AVX512BW-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3]
; AVX512BW-NEXT: vaddss %xmm3, %xmm1, %xmm1
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0]
; AVX512BW-NEXT: vaddss %xmm3, %xmm1, %xmm1
; AVX512BW-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
; AVX512BW-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512BW-NEXT: vextractf32x4 $3, %zmm0, %xmm0
; AVX512BW-NEXT: vaddss %xmm0, %xmm1, %xmm1
; AVX512BW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX512BW-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512BW-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512BW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX512BW-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: test_v16f32_zero:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vaddss %xmm1, %xmm0, %xmm1
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX512VL-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512VL-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3]
; AVX512VL-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX512VL-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3]
; AVX512VL-NEXT: vaddss %xmm3, %xmm1, %xmm1
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0]
; AVX512VL-NEXT: vaddss %xmm3, %xmm1, %xmm1
; AVX512VL-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
; AVX512VL-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vextractf32x4 $2, %zmm0, %xmm2
; AVX512VL-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3]
; AVX512VL-NEXT: vaddss %xmm3, %xmm1, %xmm1
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0]
; AVX512VL-NEXT: vaddss %xmm3, %xmm1, %xmm1
; AVX512VL-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
; AVX512VL-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vextractf32x4 $3, %zmm0, %xmm0
; AVX512VL-NEXT: vaddss %xmm0, %xmm1, %xmm1
; AVX512VL-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX512VL-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512VL-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX512VL-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
%1 = call float @llvm.experimental.vector.reduce.fadd.f32.f32.v16f32(float 0.0, <16 x float> %a0)
ret float %1
}
;
; vXf32 (undef)
;
define float @test_v2f32_undef(<2 x float> %a0) {
; SSE2-LABEL: test_v2f32_undef:
; SSE2: # %bb.0:
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
; SSE2-NEXT: addss {{.*}}(%rip), %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v2f32_undef:
; SSE41: # %bb.0:
; SSE41-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; SSE41-NEXT: addss {{.*}}(%rip), %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_v2f32_undef:
; AVX: # %bb.0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; AVX-NEXT: vaddss {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v2f32_undef:
; AVX512: # %bb.0:
; AVX512-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; AVX512-NEXT: vaddss {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: retq
%1 = call float @llvm.experimental.vector.reduce.fadd.f32.f32.v2f32(float undef, <2 x float> %a0)
ret float %1
}
define float @test_v4f32_undef(<4 x float> %a0) {
; SSE2-LABEL: test_v4f32_undef:
; SSE2: # %bb.0:
; SSE2-NEXT: movaps %xmm0, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3]
; SSE2-NEXT: addss {{.*}}(%rip), %xmm1
; SSE2-NEXT: movaps %xmm0, %xmm2
; SSE2-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
; SSE2-NEXT: addss %xmm1, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE2-NEXT: addss %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v4f32_undef:
; SSE41: # %bb.0:
; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE41-NEXT: addss {{.*}}(%rip), %xmm1
; SSE41-NEXT: movaps %xmm0, %xmm2
; SSE41-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
; SSE41-NEXT: addss %xmm1, %xmm2
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE41-NEXT: addss %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_v4f32_undef:
; AVX: # %bb.0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-NEXT: vaddss {{.*}}(%rip), %xmm1, %xmm1
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v4f32_undef:
; AVX512: # %bb.0:
; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512-NEXT: vaddss {{.*}}(%rip), %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX512-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX512-NEXT: retq
%1 = call float @llvm.experimental.vector.reduce.fadd.f32.f32.v4f32(float undef, <4 x float> %a0)
ret float %1
}
define float @test_v8f32_undef(<8 x float> %a0) {
; SSE2-LABEL: test_v8f32_undef:
; SSE2: # %bb.0:
; SSE2-NEXT: movaps %xmm0, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[2,3]
; SSE2-NEXT: addss {{.*}}(%rip), %xmm2
; SSE2-NEXT: movaps %xmm0, %xmm3
; SSE2-NEXT: movhlps {{.*#+}} xmm3 = xmm0[1],xmm3[1]
; SSE2-NEXT: addss %xmm2, %xmm3
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE2-NEXT: addss %xmm3, %xmm0
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: movaps %xmm1, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[2,3]
; SSE2-NEXT: addss %xmm2, %xmm0
; SSE2-NEXT: movaps %xmm1, %xmm2
; SSE2-NEXT: movhlps {{.*#+}} xmm2 = xmm1[1],xmm2[1]
; SSE2-NEXT: addss %xmm2, %xmm0
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v8f32_undef:
; SSE41: # %bb.0:
; SSE41-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE41-NEXT: addss {{.*}}(%rip), %xmm2
; SSE41-NEXT: movaps %xmm0, %xmm3
; SSE41-NEXT: movhlps {{.*#+}} xmm3 = xmm0[1],xmm3[1]
; SSE41-NEXT: addss %xmm2, %xmm3
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE41-NEXT: addss %xmm3, %xmm0
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: movshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; SSE41-NEXT: addss %xmm2, %xmm0
; SSE41-NEXT: movaps %xmm1, %xmm2
; SSE41-NEXT: movhlps {{.*#+}} xmm2 = xmm1[1],xmm2[1]
; SSE41-NEXT: addss %xmm2, %xmm0
; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_v8f32_undef:
; AVX: # %bb.0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX-NEXT: vaddss {{.*}}(%rip), %xmm1, %xmm1
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3]
; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm1
; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v8f32_undef:
; AVX512: # %bb.0:
; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512-NEXT: vaddss {{.*}}(%rip), %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3]
; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512-NEXT: vaddss %xmm0, %xmm1, %xmm1
; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX512-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call float @llvm.experimental.vector.reduce.fadd.f32.f32.v8f32(float undef, <8 x float> %a0)
ret float %1
}
define float @test_v16f32_undef(<16 x float> %a0) {
; SSE2-LABEL: test_v16f32_undef:
; SSE2: # %bb.0:
; SSE2-NEXT: movaps %xmm0, %xmm4
; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm0[2,3]
; SSE2-NEXT: addss {{.*}}(%rip), %xmm4
; SSE2-NEXT: movaps %xmm0, %xmm5
; SSE2-NEXT: movhlps {{.*#+}} xmm5 = xmm0[1],xmm5[1]
; SSE2-NEXT: addss %xmm4, %xmm5
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE2-NEXT: addss %xmm5, %xmm0
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: movaps %xmm1, %xmm4
; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm1[2,3]
; SSE2-NEXT: addss %xmm4, %xmm0
; SSE2-NEXT: movaps %xmm1, %xmm4
; SSE2-NEXT: movhlps {{.*#+}} xmm4 = xmm1[1],xmm4[1]
; SSE2-NEXT: addss %xmm4, %xmm0
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: addss %xmm2, %xmm0
; SSE2-NEXT: movaps %xmm2, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm2[2,3]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: movaps %xmm2, %xmm1
; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm2[1],xmm1[1]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3]
; SSE2-NEXT: addss %xmm2, %xmm0
; SSE2-NEXT: addss %xmm3, %xmm0
; SSE2-NEXT: movaps %xmm3, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm3[2,3]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: movaps %xmm3, %xmm1
; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm3[1],xmm1[1]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
; SSE2-NEXT: addss %xmm3, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v16f32_undef:
; SSE41: # %bb.0:
; SSE41-NEXT: movshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
; SSE41-NEXT: addss {{.*}}(%rip), %xmm4
; SSE41-NEXT: movaps %xmm0, %xmm5
; SSE41-NEXT: movhlps {{.*#+}} xmm5 = xmm0[1],xmm5[1]
; SSE41-NEXT: addss %xmm4, %xmm5
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; SSE41-NEXT: addss %xmm5, %xmm0
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: movshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
; SSE41-NEXT: addss %xmm4, %xmm0
; SSE41-NEXT: movaps %xmm1, %xmm4
; SSE41-NEXT: movhlps {{.*#+}} xmm4 = xmm1[1],xmm4[1]
; SSE41-NEXT: addss %xmm4, %xmm0
; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: addss %xmm2, %xmm0
; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: movaps %xmm2, %xmm1
; SSE41-NEXT: movhlps {{.*#+}} xmm1 = xmm2[1],xmm1[1]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3]
; SSE41-NEXT: addss %xmm2, %xmm0
; SSE41-NEXT: addss %xmm3, %xmm0
; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm3[1,1,3,3]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: movaps %xmm3, %xmm1
; SSE41-NEXT: movhlps {{.*#+}} xmm1 = xmm3[1],xmm1[1]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
; SSE41-NEXT: addss %xmm3, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_v16f32_undef:
; AVX: # %bb.0:
; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX-NEXT: vaddss {{.*}}(%rip), %xmm2, %xmm2
; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2
; AVX-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[3,1,2,3]
; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-NEXT: vaddss %xmm0, %xmm2, %xmm2
; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2
; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX-NEXT: vaddss %xmm0, %xmm2, %xmm0
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,1,2,3]
; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v16f32_undef:
; AVX512: # %bb.0:
; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512-NEXT: vaddss {{.*}}(%rip), %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,1,2,3]
; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3]
; AVX512-NEXT: vaddss %xmm3, %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0]
; AVX512-NEXT: vaddss %xmm3, %xmm1, %xmm1
; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vextractf32x4 $2, %zmm0, %xmm2
; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3]
; AVX512-NEXT: vaddss %xmm3, %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm2[1,0]
; AVX512-NEXT: vaddss %xmm3, %xmm1, %xmm1
; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm0
; AVX512-NEXT: vaddss %xmm0, %xmm1, %xmm1
; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
; AVX512-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call float @llvm.experimental.vector.reduce.fadd.f32.f32.v16f32(float undef, <16 x float> %a0)
ret float %1
}
;
; vXf64 (accum)
;
define double @test_v2f64(double %a0, <2 x double> %a1) {
; SSE-LABEL: test_v2f64:
; SSE: # %bb.0:
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_v2f64:
; AVX: # %bb.0:
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v2f64:
; AVX512: # %bb.0:
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%1 = call double @llvm.experimental.vector.reduce.fadd.f64.f64.v2f64(double %a0, <2 x double> %a1)
ret double %1
}
define double @test_v4f64(double %a0, <4 x double> %a1) {
; SSE-LABEL: test_v4f64:
; SSE: # %bb.0:
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: addsd %xmm2, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
; SSE-NEXT: addsd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_v4f64:
; AVX: # %bb.0:
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v4f64:
; AVX512: # %bb.0:
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call double @llvm.experimental.vector.reduce.fadd.f64.f64.v4f64(double %a0, <4 x double> %a1)
ret double %1
}
define double @test_v8f64(double %a0, <8 x double> %a1) {
; SSE-LABEL: test_v8f64:
; SSE: # %bb.0:
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: addsd %xmm2, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
; SSE-NEXT: addsd %xmm2, %xmm0
; SSE-NEXT: addsd %xmm3, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
; SSE-NEXT: addsd %xmm3, %xmm0
; SSE-NEXT: addsd %xmm4, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm4[1,1]
; SSE-NEXT: addsd %xmm4, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_v8f64:
; AVX: # %bb.0:
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
; AVX-NEXT: vaddsd %xmm3, %xmm0, %xmm0
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm1
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v8f64:
; AVX512: # %bb.0:
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vextractf32x4 $2, %zmm1, %xmm2
; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vextractf32x4 $3, %zmm1, %xmm1
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call double @llvm.experimental.vector.reduce.fadd.f64.f64.v8f64(double %a0, <8 x double> %a1)
ret double %1
}
define double @test_v16f64(double %a0, <16 x double> %a1) {
; SSE-LABEL: test_v16f64:
; SSE: # %bb.0:
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm8
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: addsd %xmm2, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
; SSE-NEXT: addsd %xmm2, %xmm0
; SSE-NEXT: addsd %xmm3, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
; SSE-NEXT: addsd %xmm3, %xmm0
; SSE-NEXT: addsd %xmm4, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm4[1,1]
; SSE-NEXT: addsd %xmm4, %xmm0
; SSE-NEXT: addsd %xmm5, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm5 = xmm5[1,1]
; SSE-NEXT: addsd %xmm5, %xmm0
; SSE-NEXT: addsd %xmm6, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm6 = xmm6[1,1]
; SSE-NEXT: addsd %xmm6, %xmm0
; SSE-NEXT: addsd %xmm7, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm7 = xmm7[1,1]
; SSE-NEXT: addsd %xmm7, %xmm0
; SSE-NEXT: addsd %xmm8, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm8 = xmm8[1,1]
; SSE-NEXT: addsd %xmm8, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_v16f64:
; AVX: # %bb.0:
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm5 = xmm1[1,0]
; AVX-NEXT: vaddsd %xmm5, %xmm0, %xmm0
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm1
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vaddsd %xmm3, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm3[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vextractf128 $1, %ymm3, %xmm1
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vaddsd %xmm4, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm4[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vextractf128 $1, %ymm4, %xmm1
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v16f64:
; AVX512: # %bb.0:
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
; AVX512-NEXT: vaddsd %xmm3, %xmm0, %xmm0
; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX512-NEXT: vaddsd %xmm3, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
; AVX512-NEXT: vaddsd %xmm3, %xmm0, %xmm0
; AVX512-NEXT: vextractf32x4 $2, %zmm1, %xmm3
; AVX512-NEXT: vaddsd %xmm3, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
; AVX512-NEXT: vaddsd %xmm3, %xmm0, %xmm0
; AVX512-NEXT: vextractf32x4 $3, %zmm1, %xmm1
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vextractf128 $1, %ymm2, %xmm1
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vextractf32x4 $2, %zmm2, %xmm1
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vextractf32x4 $3, %zmm2, %xmm1
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call double @llvm.experimental.vector.reduce.fadd.f64.f64.v16f64(double %a0, <16 x double> %a1)
ret double %1
}
;
; vXf64 (zero)
;
define double @test_v2f64_zero(<2 x double> %a0) {
; SSE-LABEL: test_v2f64_zero:
; SSE: # %bb.0:
; SSE-NEXT: xorpd %xmm1, %xmm1
; SSE-NEXT: addsd %xmm0, %xmm1
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_v2f64_zero:
; AVX: # %bb.0:
; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
;
; AVX512BW-LABEL: test_v2f64_zero:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vaddsd %xmm1, %xmm0, %xmm1
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512BW-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: test_v2f64_zero:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vaddsd %xmm1, %xmm0, %xmm1
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512VL-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX512VL-NEXT: retq
%1 = call double @llvm.experimental.vector.reduce.fadd.f64.f64.v2f64(double 0.0, <2 x double> %a0)
ret double %1
}
define double @test_v4f64_zero(<4 x double> %a0) {
; SSE-LABEL: test_v4f64_zero:
; SSE: # %bb.0:
; SSE-NEXT: xorpd %xmm2, %xmm2
; SSE-NEXT: addsd %xmm0, %xmm2
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: addsd %xmm2, %xmm0
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_v4f64_zero:
; AVX: # %bb.0:
; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm1
; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512BW-LABEL: test_v4f64_zero:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vaddsd %xmm1, %xmm0, %xmm1
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512BW-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX512BW-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512BW-NEXT: vaddsd %xmm0, %xmm1, %xmm1
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512BW-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: test_v4f64_zero:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vaddsd %xmm1, %xmm0, %xmm1
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512VL-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512VL-NEXT: vaddsd %xmm0, %xmm1, %xmm1
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512VL-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
%1 = call double @llvm.experimental.vector.reduce.fadd.f64.f64.v4f64(double 0.0, <4 x double> %a0)
ret double %1
}
define double @test_v8f64_zero(<8 x double> %a0) {
; SSE-LABEL: test_v8f64_zero:
; SSE: # %bb.0:
; SSE-NEXT: xorpd %xmm4, %xmm4
; SSE-NEXT: addsd %xmm0, %xmm4
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: addsd %xmm4, %xmm0
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: addsd %xmm2, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
; SSE-NEXT: addsd %xmm2, %xmm0
; SSE-NEXT: addsd %xmm3, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
; SSE-NEXT: addsd %xmm3, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_v8f64_zero:
; AVX: # %bb.0:
; AVX-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; AVX-NEXT: vaddsd %xmm2, %xmm0, %xmm2
; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX-NEXT: vaddsd %xmm3, %xmm2, %xmm2
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-NEXT: vaddsd %xmm0, %xmm2, %xmm2
; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-NEXT: vaddsd %xmm0, %xmm2, %xmm0
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512BW-LABEL: test_v8f64_zero:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX512BW-NEXT: vaddsd %xmm1, %xmm0, %xmm1
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512BW-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX512BW-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX512BW-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
; AVX512BW-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX512BW-NEXT: vextractf32x4 $2, %zmm0, %xmm2
; AVX512BW-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
; AVX512BW-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX512BW-NEXT: vextractf32x4 $3, %zmm0, %xmm0
; AVX512BW-NEXT: vaddsd %xmm0, %xmm1, %xmm1
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512BW-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: test_v8f64_zero:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX512VL-NEXT: vaddsd %xmm1, %xmm0, %xmm1
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512VL-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX512VL-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
; AVX512VL-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vextractf32x4 $2, %zmm0, %xmm2
; AVX512VL-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
; AVX512VL-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX512VL-NEXT: vextractf32x4 $3, %zmm0, %xmm0
; AVX512VL-NEXT: vaddsd %xmm0, %xmm1, %xmm1
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512VL-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
%1 = call double @llvm.experimental.vector.reduce.fadd.f64.f64.v8f64(double 0.0, <8 x double> %a0)
ret double %1
}
define double @test_v16f64_zero(<16 x double> %a0) {
; SSE-LABEL: test_v16f64_zero:
; SSE: # %bb.0:
; SSE-NEXT: xorpd %xmm8, %xmm8
; SSE-NEXT: addsd %xmm0, %xmm8
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: addsd %xmm8, %xmm0
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: addsd %xmm2, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
; SSE-NEXT: addsd %xmm2, %xmm0
; SSE-NEXT: addsd %xmm3, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
; SSE-NEXT: addsd %xmm3, %xmm0
; SSE-NEXT: addsd %xmm4, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm4[1,1]
; SSE-NEXT: addsd %xmm4, %xmm0
; SSE-NEXT: addsd %xmm5, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm5 = xmm5[1,1]
; SSE-NEXT: addsd %xmm5, %xmm0
; SSE-NEXT: addsd %xmm6, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm6 = xmm6[1,1]
; SSE-NEXT: addsd %xmm6, %xmm0
; SSE-NEXT: addsd %xmm7, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm7 = xmm7[1,1]
; SSE-NEXT: addsd %xmm7, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_v16f64_zero:
; AVX: # %bb.0:
; AVX-NEXT: vxorpd %xmm4, %xmm4, %xmm4
; AVX-NEXT: vaddsd %xmm4, %xmm0, %xmm4
; AVX-NEXT: vpermilpd {{.*#+}} xmm5 = xmm0[1,0]
; AVX-NEXT: vaddsd %xmm5, %xmm4, %xmm4
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-NEXT: vaddsd %xmm0, %xmm4, %xmm4
; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-NEXT: vaddsd %xmm0, %xmm4, %xmm0
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm4 = xmm1[1,0]
; AVX-NEXT: vaddsd %xmm4, %xmm0, %xmm0
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm1
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vaddsd %xmm3, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm3[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vextractf128 $1, %ymm3, %xmm1
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512BW-LABEL: test_v16f64_zero:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; AVX512BW-NEXT: vaddsd %xmm2, %xmm0, %xmm2
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX512BW-NEXT: vaddsd %xmm3, %xmm2, %xmm2
; AVX512BW-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX512BW-NEXT: vaddsd %xmm3, %xmm2, %xmm2
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
; AVX512BW-NEXT: vaddsd %xmm3, %xmm2, %xmm2
; AVX512BW-NEXT: vextractf32x4 $2, %zmm0, %xmm3
; AVX512BW-NEXT: vaddsd %xmm3, %xmm2, %xmm2
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
; AVX512BW-NEXT: vaddsd %xmm3, %xmm2, %xmm2
; AVX512BW-NEXT: vextractf32x4 $3, %zmm0, %xmm0
; AVX512BW-NEXT: vaddsd %xmm0, %xmm2, %xmm2
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512BW-NEXT: vaddsd %xmm0, %xmm2, %xmm0
; AVX512BW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX512BW-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX512BW-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX512BW-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
; AVX512BW-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX512BW-NEXT: vextractf32x4 $2, %zmm1, %xmm2
; AVX512BW-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
; AVX512BW-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX512BW-NEXT: vextractf32x4 $3, %zmm1, %xmm1
; AVX512BW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX512BW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512VL-LABEL: test_v16f64_zero:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX512VL-NEXT: vaddsd %xmm2, %xmm0, %xmm2
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX512VL-NEXT: vaddsd %xmm3, %xmm2, %xmm2
; AVX512VL-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX512VL-NEXT: vaddsd %xmm3, %xmm2, %xmm2
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
; AVX512VL-NEXT: vaddsd %xmm3, %xmm2, %xmm2
; AVX512VL-NEXT: vextractf32x4 $2, %zmm0, %xmm3
; AVX512VL-NEXT: vaddsd %xmm3, %xmm2, %xmm2
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
; AVX512VL-NEXT: vaddsd %xmm3, %xmm2, %xmm2
; AVX512VL-NEXT: vextractf32x4 $3, %zmm0, %xmm0
; AVX512VL-NEXT: vaddsd %xmm0, %xmm2, %xmm2
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512VL-NEXT: vaddsd %xmm0, %xmm2, %xmm0
; AVX512VL-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX512VL-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX512VL-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX512VL-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
; AVX512VL-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX512VL-NEXT: vextractf32x4 $2, %zmm1, %xmm2
; AVX512VL-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
; AVX512VL-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX512VL-NEXT: vextractf32x4 $3, %zmm1, %xmm1
; AVX512VL-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512VL-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX512VL-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512VL-NEXT: vzeroupper
; AVX512VL-NEXT: retq
%1 = call double @llvm.experimental.vector.reduce.fadd.f64.f64.v16f64(double 0.0, <16 x double> %a0)
ret double %1
}
;
; vXf64 (undef)
;
define double @test_v2f64_undef(<2 x double> %a0) {
; SSE-LABEL: test_v2f64_undef:
; SSE: # %bb.0:
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: addsd {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_v2f64_undef:
; AVX: # %bb.0:
; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v2f64_undef:
; AVX512: # %bb.0:
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: vaddsd {{.*}}(%rip), %xmm0, %xmm0
; AVX512-NEXT: retq
%1 = call double @llvm.experimental.vector.reduce.fadd.f64.f64.v2f64(double undef, <2 x double> %a0)
ret double %1
}
define double @test_v4f64_undef(<4 x double> %a0) {
; SSE-LABEL: test_v4f64_undef:
; SSE: # %bb.0:
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: addsd {{.*}}(%rip), %xmm0
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_v4f64_undef:
; AVX: # %bb.0:
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm1, %xmm1
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm1
; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v4f64_undef:
; AVX512: # %bb.0:
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX512-NEXT: vaddsd {{.*}}(%rip), %xmm1, %xmm1
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512-NEXT: vaddsd %xmm0, %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call double @llvm.experimental.vector.reduce.fadd.f64.f64.v4f64(double undef, <4 x double> %a0)
ret double %1
}
define double @test_v8f64_undef(<8 x double> %a0) {
; SSE-LABEL: test_v8f64_undef:
; SSE: # %bb.0:
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: addsd {{.*}}(%rip), %xmm0
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: addsd %xmm2, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
; SSE-NEXT: addsd %xmm2, %xmm0
; SSE-NEXT: addsd %xmm3, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
; SSE-NEXT: addsd %xmm3, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_v8f64_undef:
; AVX: # %bb.0:
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm2, %xmm2
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-NEXT: vaddsd %xmm0, %xmm2, %xmm2
; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-NEXT: vaddsd %xmm0, %xmm2, %xmm0
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v8f64_undef:
; AVX512: # %bb.0:
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX512-NEXT: vaddsd {{.*}}(%rip), %xmm1, %xmm1
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vextractf32x4 $2, %zmm0, %xmm2
; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm0
; AVX512-NEXT: vaddsd %xmm0, %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call double @llvm.experimental.vector.reduce.fadd.f64.f64.v8f64(double undef, <8 x double> %a0)
ret double %1
}
define double @test_v16f64_undef(<16 x double> %a0) {
; SSE-LABEL: test_v16f64_undef:
; SSE: # %bb.0:
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: addsd {{.*}}(%rip), %xmm0
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: addsd %xmm2, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
; SSE-NEXT: addsd %xmm2, %xmm0
; SSE-NEXT: addsd %xmm3, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
; SSE-NEXT: addsd %xmm3, %xmm0
; SSE-NEXT: addsd %xmm4, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm4[1,1]
; SSE-NEXT: addsd %xmm4, %xmm0
; SSE-NEXT: addsd %xmm5, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm5 = xmm5[1,1]
; SSE-NEXT: addsd %xmm5, %xmm0
; SSE-NEXT: addsd %xmm6, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm6 = xmm6[1,1]
; SSE-NEXT: addsd %xmm6, %xmm0
; SSE-NEXT: addsd %xmm7, %xmm0
; SSE-NEXT: movhlps {{.*#+}} xmm7 = xmm7[1,1]
; SSE-NEXT: addsd %xmm7, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_v16f64_undef:
; AVX: # %bb.0:
; AVX-NEXT: vpermilpd {{.*#+}} xmm4 = xmm0[1,0]
; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm4, %xmm4
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-NEXT: vaddsd %xmm0, %xmm4, %xmm4
; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-NEXT: vaddsd %xmm0, %xmm4, %xmm0
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm4 = xmm1[1,0]
; AVX-NEXT: vaddsd %xmm4, %xmm0, %xmm0
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm1
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vaddsd %xmm3, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm3[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vextractf128 $1, %ymm3, %xmm1
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; AVX512-LABEL: test_v16f64_undef:
; AVX512: # %bb.0:
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512-NEXT: vaddsd {{.*}}(%rip), %xmm2, %xmm2
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX512-NEXT: vaddsd %xmm3, %xmm2, %xmm2
; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
; AVX512-NEXT: vaddsd %xmm3, %xmm2, %xmm2
; AVX512-NEXT: vextractf32x4 $2, %zmm0, %xmm3
; AVX512-NEXT: vaddsd %xmm3, %xmm2, %xmm2
; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
; AVX512-NEXT: vaddsd %xmm3, %xmm2, %xmm2
; AVX512-NEXT: vextractf32x4 $3, %zmm0, %xmm0
; AVX512-NEXT: vaddsd %xmm0, %xmm2, %xmm2
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: vaddsd %xmm0, %xmm2, %xmm0
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vextractf32x4 $2, %zmm1, %xmm2
; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vextractf32x4 $3, %zmm1, %xmm1
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call double @llvm.experimental.vector.reduce.fadd.f64.f64.v16f64(double undef, <16 x double> %a0)
ret double %1
}
declare float @llvm.experimental.vector.reduce.fadd.f32.f32.v2f32(float, <2 x float>)
declare float @llvm.experimental.vector.reduce.fadd.f32.f32.v4f32(float, <4 x float>)
declare float @llvm.experimental.vector.reduce.fadd.f32.f32.v8f32(float, <8 x float>)
declare float @llvm.experimental.vector.reduce.fadd.f32.f32.v16f32(float, <16 x float>)
declare double @llvm.experimental.vector.reduce.fadd.f64.f64.v2f64(double, <2 x double>)
declare double @llvm.experimental.vector.reduce.fadd.f64.f64.v4f64(double, <4 x double>)
declare double @llvm.experimental.vector.reduce.fadd.f64.f64.v8f64(double, <8 x double>)
declare double @llvm.experimental.vector.reduce.fadd.f64.f64.v16f64(double, <16 x double>)