llvm-project/llvm/test/CodeGen/X86/vector-reduce-fadd-fast.ll

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

1412 lines
48 KiB
LLVM
Raw Normal View History

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
[x86] use instruction-level fast-math-flags to drive MachineCombiner The code changes here are hopefully straightforward: 1. Use MachineInstruction flags to decide if FP ops can be reassociated (use both "reassoc" and "nsz" to be consistent with IR transforms; we probably don't need "nsz", but that's a safer interpretation of the FMF). 2. Check that both nodes allow reassociation to change instructions. This is a stronger requirement than we've usually implemented in IR/DAG, but this is needed to solve the motivating bug (see below), and it seems unlikely to impede optimization at this late stage. 3. Intersect/propagate MachineIR flags to enable further reassociation in MachineCombiner. We managed to make MachineCombiner flexible enough that no changes are needed to that pass itself. So this patch should only affect x86 (assuming no other targets have implemented the hooks using MachineIR flags yet). The motivating example in PR43609 is another case of fast-math transforms interacting badly with special FP ops created during lowering: https://bugs.llvm.org/show_bug.cgi?id=43609 The special fadd ops used for converting int to FP assume that they will not be altered, so those are created without FMF. However, the MachineCombiner pass was being enabled for FP ops using the global/function-level TargetOption for "UnsafeFPMath". We managed to run instruction/node-level FMF all the way down to MachineIR sometime in the last 1-2 years though, so we can do better now. The test diffs require some explanation: 1. llvm/test/CodeGen/X86/fmf-flags.ll - no target option for unsafe math was specified here, so MachineCombiner kicks in where it did not previously; to make it behave consistently, we need to specify a CPU schedule model, so use the default model, and there are no code diffs. 2. llvm/test/CodeGen/X86/machine-combiner.ll - replace the target option for unsafe math with the equivalent IR-level flags, and there are no code diffs; we can't remove the NaN/nsz options because those are still used to drive x86 fmin/fmax codegen (special SDAG opcodes). 3. llvm/test/CodeGen/X86/pow.ll - similar to #1 4. llvm/test/CodeGen/X86/sqrt-fastmath.ll - similar to #1, but MachineCombiner does some reassociation of the estimate sequence ops; presumably these are perf wins based on latency/throughput (and we get some reduction of move instructions too); I'm not sure how it affects numerical accuracy, but the test reflects reality better now because we would expect MachineCombiner to be enabled if the IR was generated via something like "-ffast-math" with clang. 5. llvm/test/CodeGen/X86/vec_int_to_fp.ll - this is the test added to model PR43609; the fadds are not reassociated now, so we should get the expected results. 6. llvm/test/CodeGen/X86/vector-reduce-fadd-fast.ll - similar to #1 7. llvm/test/CodeGen/X86/vector-reduce-fmul-fast.ll - similar to #1 Differential Revision: https://reviews.llvm.org/D74851
2020-02-28 04:19:37 +08:00
; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64 -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,SSE2
; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefixes=SSE,SSE41
; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=AVX1-SLOW
; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64 -mattr=+avx,+fast-hops | FileCheck %s --check-prefix=AVX1-FAST
; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=AVX2
; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64 -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=AVX512
; RUN: llc < %s -mtriple=x86_64-- -mcpu=x86-64 -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefix=AVX512
;
; vXf32 (accum)
;
define float @test_v2f32(float %a0, <2 x float> %a1) {
; SSE2-LABEL: test_v2f32:
; SSE2: # %bb.0:
; SSE2-NEXT: movaps %xmm1, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[1,1]
; SSE2-NEXT: addss %xmm1, %xmm2
; SSE2-NEXT: addss %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v2f32:
; SSE41: # %bb.0:
; SSE41-NEXT: movshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; SSE41-NEXT: addss %xmm1, %xmm2
; SSE41-NEXT: addss %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX1-SLOW-LABEL: test_v2f32:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v2f32:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vhaddps %xmm1, %xmm1, %xmm1
; AVX1-FAST-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v2f32:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX2-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v2f32:
; AVX512: # %bb.0:
; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%1 = call fast float @llvm.vector.reduce.fadd.f32.v2f32(float %a0, <2 x float> %a1)
ret float %1
}
define float @test_v4f32(float %a0, <4 x float> %a1) {
; SSE2-LABEL: test_v4f32:
; SSE2: # %bb.0:
; SSE2-NEXT: movaps %xmm1, %xmm2
; SSE2-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE2-NEXT: addps %xmm1, %xmm2
; SSE2-NEXT: movaps %xmm2, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm2[1,1]
; SSE2-NEXT: addss %xmm2, %xmm1
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v4f32:
; SSE41: # %bb.0:
; SSE41-NEXT: movaps %xmm1, %xmm2
; SSE41-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE41-NEXT: addps %xmm1, %xmm2
; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
; SSE41-NEXT: addss %xmm2, %xmm1
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-SLOW-LABEL: test_v4f32:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX1-SLOW-NEXT: vaddps %xmm2, %xmm1, %xmm1
; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v4f32:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vhaddps %xmm1, %xmm1, %xmm1
; AVX1-FAST-NEXT: vhaddps %xmm1, %xmm1, %xmm1
; AVX1-FAST-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v4f32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX2-NEXT: vaddps %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX2-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v4f32:
; AVX512: # %bb.0:
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX512-NEXT: vaddps %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%1 = call fast float @llvm.vector.reduce.fadd.f32.v4f32(float %a0, <4 x float> %a1)
ret float %1
}
define float @test_v8f32(float %a0, <8 x float> %a1) {
; SSE2-LABEL: test_v8f32:
; SSE2: # %bb.0:
; SSE2-NEXT: addps %xmm2, %xmm1
; SSE2-NEXT: movaps %xmm1, %xmm2
; SSE2-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE2-NEXT: addps %xmm1, %xmm2
; SSE2-NEXT: movaps %xmm2, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm2[1,1]
; SSE2-NEXT: addss %xmm2, %xmm1
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v8f32:
; SSE41: # %bb.0:
; SSE41-NEXT: addps %xmm2, %xmm1
; SSE41-NEXT: movaps %xmm1, %xmm2
; SSE41-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE41-NEXT: addps %xmm1, %xmm2
; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
; SSE41-NEXT: addss %xmm2, %xmm1
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-SLOW-LABEL: test_v8f32:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-SLOW-NEXT: vaddps %xmm2, %xmm1, %xmm1
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX1-SLOW-NEXT: vaddps %xmm2, %xmm1, %xmm1
; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vzeroupper
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v8f32:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-FAST-NEXT: vhaddps %xmm1, %xmm2, %xmm1
; AVX1-FAST-NEXT: vhaddps %xmm1, %xmm1, %xmm1
; AVX1-FAST-NEXT: vhaddps %xmm1, %xmm1, %xmm1
; AVX1-FAST-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vzeroupper
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v8f32:
; AVX2: # %bb.0:
; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX2-NEXT: vaddps %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX2-NEXT: vaddps %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX2-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v8f32:
; AVX512: # %bb.0:
; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX512-NEXT: vaddps %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX512-NEXT: vaddps %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call fast float @llvm.vector.reduce.fadd.f32.v8f32(float %a0, <8 x float> %a1)
ret float %1
}
define float @test_v16f32(float %a0, <16 x float> %a1) {
; SSE2-LABEL: test_v16f32:
; SSE2: # %bb.0:
; SSE2-NEXT: addps %xmm4, %xmm2
; SSE2-NEXT: addps %xmm3, %xmm1
; SSE2-NEXT: addps %xmm2, %xmm1
; SSE2-NEXT: movaps %xmm1, %xmm2
; SSE2-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE2-NEXT: addps %xmm1, %xmm2
; SSE2-NEXT: movaps %xmm2, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm2[1,1]
; SSE2-NEXT: addss %xmm2, %xmm1
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v16f32:
; SSE41: # %bb.0:
; SSE41-NEXT: addps %xmm4, %xmm2
; SSE41-NEXT: addps %xmm3, %xmm1
; SSE41-NEXT: addps %xmm2, %xmm1
; SSE41-NEXT: movaps %xmm1, %xmm2
; SSE41-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE41-NEXT: addps %xmm1, %xmm2
; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm2[1,1,3,3]
; SSE41-NEXT: addss %xmm2, %xmm1
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-SLOW-LABEL: test_v16f32:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vaddps %ymm2, %ymm1, %ymm1
; AVX1-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-SLOW-NEXT: vaddps %xmm2, %xmm1, %xmm1
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX1-SLOW-NEXT: vaddps %xmm2, %xmm1, %xmm1
; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vzeroupper
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v16f32:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vaddps %ymm2, %ymm1, %ymm1
; AVX1-FAST-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-FAST-NEXT: vaddps %xmm2, %xmm1, %xmm1
; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX1-FAST-NEXT: vaddps %xmm2, %xmm1, %xmm1
; AVX1-FAST-NEXT: vhaddps %xmm1, %xmm1, %xmm1
; AVX1-FAST-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vzeroupper
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v16f32:
; AVX2: # %bb.0:
; AVX2-NEXT: vaddps %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX2-NEXT: vaddps %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX2-NEXT: vaddps %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX2-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v16f32:
; AVX512: # %bb.0:
; AVX512-NEXT: vextractf64x4 $1, %zmm1, %ymm2
; AVX512-NEXT: vaddps %zmm2, %zmm1, %zmm1
; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX512-NEXT: vaddps %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX512-NEXT: vaddps %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call fast float @llvm.vector.reduce.fadd.f32.v16f32(float %a0, <16 x float> %a1)
ret float %1
}
;
; vXf32 (zero)
;
define float @test_v2f32_zero(<2 x float> %a0) {
; SSE2-LABEL: test_v2f32_zero:
; SSE2: # %bb.0:
; SSE2-NEXT: movaps %xmm0, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
; SSE2-NEXT: addss %xmm0, %xmm1
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v2f32_zero:
; SSE41: # %bb.0:
; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-SLOW-LABEL: test_v2f32_zero:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v2f32_zero:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v2f32_zero:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v2f32_zero:
; AVX512: # %bb.0:
; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%1 = call fast float @llvm.vector.reduce.fadd.f32.v2f32(float 0.0, <2 x float> %a0)
ret float %1
}
define float @test_v4f32_zero(<4 x float> %a0) {
; SSE2-LABEL: test_v4f32_zero:
; SSE2: # %bb.0:
; SSE2-NEXT: movaps %xmm0, %xmm1
; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE2-NEXT: addps %xmm0, %xmm1
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[1,1]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v4f32_zero:
; SSE41: # %bb.0:
; SSE41-NEXT: movaps %xmm0, %xmm1
; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE41-NEXT: addps %xmm0, %xmm1
; SSE41-NEXT: movshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
; SSE41-NEXT: addss %xmm0, %xmm1
; SSE41-NEXT: movaps %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-SLOW-LABEL: test_v4f32_zero:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v4f32_zero:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v4f32_zero:
; AVX2: # %bb.0:
; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX2-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v4f32_zero:
; AVX512: # %bb.0:
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%1 = call fast float @llvm.vector.reduce.fadd.f32.v4f32(float 0.0, <4 x float> %a0)
ret float %1
}
define float @test_v8f32_zero(<8 x float> %a0) {
; SSE2-LABEL: test_v8f32_zero:
; SSE2: # %bb.0:
; SSE2-NEXT: addps %xmm1, %xmm0
; SSE2-NEXT: movaps %xmm0, %xmm1
; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE2-NEXT: addps %xmm0, %xmm1
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[1,1]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v8f32_zero:
; SSE41: # %bb.0:
; SSE41-NEXT: addps %xmm1, %xmm0
; SSE41-NEXT: movaps %xmm0, %xmm1
; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE41-NEXT: addps %xmm0, %xmm1
; SSE41-NEXT: movshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
; SSE41-NEXT: addss %xmm0, %xmm1
; SSE41-NEXT: movaps %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-SLOW-LABEL: test_v8f32_zero:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vzeroupper
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v8f32_zero:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm1, %xmm0
; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX1-FAST-NEXT: vzeroupper
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v8f32_zero:
; AVX2: # %bb.0:
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX2-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX2-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v8f32_zero:
; AVX512: # %bb.0:
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call fast float @llvm.vector.reduce.fadd.f32.v8f32(float 0.0, <8 x float> %a0)
ret float %1
}
define float @test_v16f32_zero(<16 x float> %a0) {
; SSE2-LABEL: test_v16f32_zero:
; SSE2: # %bb.0:
; SSE2-NEXT: addps %xmm3, %xmm1
; SSE2-NEXT: addps %xmm2, %xmm0
; SSE2-NEXT: addps %xmm1, %xmm0
; SSE2-NEXT: movaps %xmm0, %xmm1
; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE2-NEXT: addps %xmm0, %xmm1
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[1,1]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v16f32_zero:
; SSE41: # %bb.0:
; SSE41-NEXT: addps %xmm3, %xmm1
; SSE41-NEXT: addps %xmm2, %xmm0
; SSE41-NEXT: addps %xmm1, %xmm0
; SSE41-NEXT: movaps %xmm0, %xmm1
; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE41-NEXT: addps %xmm0, %xmm1
; SSE41-NEXT: movshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
; SSE41-NEXT: addss %xmm0, %xmm1
; SSE41-NEXT: movaps %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-SLOW-LABEL: test_v16f32_zero:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vaddps %ymm1, %ymm0, %ymm0
; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vzeroupper
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v16f32_zero:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vaddps %ymm1, %ymm0, %ymm0
; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-FAST-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX1-FAST-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX1-FAST-NEXT: vzeroupper
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v16f32_zero:
; AVX2: # %bb.0:
; AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX2-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX2-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v16f32_zero:
; AVX512: # %bb.0:
; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call fast float @llvm.vector.reduce.fadd.f32.v16f32(float 0.0, <16 x float> %a0)
ret float %1
}
;
; vXf32 (undef)
;
define float @test_v2f32_undef(<2 x float> %a0) {
; SSE2-LABEL: test_v2f32_undef:
; SSE2: # %bb.0:
; SSE2-NEXT: movaps %xmm0, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
; SSE2-NEXT: addss %xmm0, %xmm1
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v2f32_undef:
; SSE41: # %bb.0:
; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-SLOW-LABEL: test_v2f32_undef:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v2f32_undef:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v2f32_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v2f32_undef:
; AVX512: # %bb.0:
; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%1 = call fast float @llvm.vector.reduce.fadd.f32.v2f32(float 0.0, <2 x float> %a0)
ret float %1
}
define float @test_v4f32_undef(<4 x float> %a0) {
; SSE2-LABEL: test_v4f32_undef:
; SSE2: # %bb.0:
; SSE2-NEXT: movaps %xmm0, %xmm1
; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE2-NEXT: addps %xmm0, %xmm1
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[1,1]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v4f32_undef:
; SSE41: # %bb.0:
; SSE41-NEXT: movaps %xmm0, %xmm1
; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE41-NEXT: addps %xmm0, %xmm1
; SSE41-NEXT: movshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
; SSE41-NEXT: addss %xmm0, %xmm1
; SSE41-NEXT: movaps %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-SLOW-LABEL: test_v4f32_undef:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v4f32_undef:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v4f32_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX2-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v4f32_undef:
; AVX512: # %bb.0:
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%1 = call fast float @llvm.vector.reduce.fadd.f32.v4f32(float 0.0, <4 x float> %a0)
ret float %1
}
define float @test_v8f32_undef(<8 x float> %a0) {
; SSE2-LABEL: test_v8f32_undef:
; SSE2: # %bb.0:
; SSE2-NEXT: addps %xmm1, %xmm0
; SSE2-NEXT: movaps %xmm0, %xmm1
; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE2-NEXT: addps %xmm0, %xmm1
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[1,1]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v8f32_undef:
; SSE41: # %bb.0:
; SSE41-NEXT: addps %xmm1, %xmm0
; SSE41-NEXT: movaps %xmm0, %xmm1
; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE41-NEXT: addps %xmm0, %xmm1
; SSE41-NEXT: movshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
; SSE41-NEXT: addss %xmm0, %xmm1
; SSE41-NEXT: movaps %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-SLOW-LABEL: test_v8f32_undef:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vzeroupper
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v8f32_undef:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm1, %xmm0
; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX1-FAST-NEXT: vzeroupper
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v8f32_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX2-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX2-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v8f32_undef:
; AVX512: # %bb.0:
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call fast float @llvm.vector.reduce.fadd.f32.v8f32(float 0.0, <8 x float> %a0)
ret float %1
}
define float @test_v16f32_undef(<16 x float> %a0) {
; SSE2-LABEL: test_v16f32_undef:
; SSE2: # %bb.0:
; SSE2-NEXT: addps %xmm3, %xmm1
; SSE2-NEXT: addps %xmm2, %xmm0
; SSE2-NEXT: addps %xmm1, %xmm0
; SSE2-NEXT: movaps %xmm0, %xmm1
; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE2-NEXT: addps %xmm0, %xmm1
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1],xmm1[1,1]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v16f32_undef:
; SSE41: # %bb.0:
; SSE41-NEXT: addps %xmm3, %xmm1
; SSE41-NEXT: addps %xmm2, %xmm0
; SSE41-NEXT: addps %xmm1, %xmm0
; SSE41-NEXT: movaps %xmm0, %xmm1
; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE41-NEXT: addps %xmm0, %xmm1
; SSE41-NEXT: movshdup {{.*#+}} xmm0 = xmm1[1,1,3,3]
; SSE41-NEXT: addss %xmm0, %xmm1
; SSE41-NEXT: movaps %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX1-SLOW-LABEL: test_v16f32_undef:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vaddps %ymm1, %ymm0, %ymm0
; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX1-SLOW-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vzeroupper
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v16f32_undef:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vaddps %ymm1, %ymm0, %ymm0
; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-FAST-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX1-FAST-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX1-FAST-NEXT: vzeroupper
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v16f32_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX2-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX2-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v16f32_undef:
; AVX512: # %bb.0:
; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call fast float @llvm.vector.reduce.fadd.f32.v16f32(float 0.0, <16 x float> %a0)
ret float %1
}
;
; vXf64 (accum)
;
define double @test_v2f64(double %a0, <2 x double> %a1) {
; SSE-LABEL: test_v2f64:
; SSE: # %bb.0:
; SSE-NEXT: movapd %xmm1, %xmm2
; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT: addsd %xmm1, %xmm2
; SSE-NEXT: addsd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX1-SLOW-LABEL: test_v2f64:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX1-SLOW-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v2f64:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vhaddpd %xmm1, %xmm1, %xmm1
; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v2f64:
; AVX2: # %bb.0:
; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX2-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v2f64:
; AVX512: # %bb.0:
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%1 = call fast double @llvm.vector.reduce.fadd.f64.v2f64(double %a0, <2 x double> %a1)
ret double %1
}
define double @test_v4f64(double %a0, <4 x double> %a1) {
; SSE-LABEL: test_v4f64:
; SSE: # %bb.0:
; SSE-NEXT: addpd %xmm2, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm2
; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT: addsd %xmm1, %xmm2
; SSE-NEXT: addsd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX1-SLOW-LABEL: test_v4f64:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-SLOW-NEXT: vaddpd %xmm2, %xmm1, %xmm1
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX1-SLOW-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vzeroupper
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v4f64:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-FAST-NEXT: vhaddpd %xmm1, %xmm2, %xmm1
; AVX1-FAST-NEXT: vhaddpd %xmm1, %xmm1, %xmm1
; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vzeroupper
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v4f64:
; AVX2: # %bb.0:
; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX2-NEXT: vaddpd %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX2-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v4f64:
; AVX512: # %bb.0:
; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX512-NEXT: vaddpd %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call fast double @llvm.vector.reduce.fadd.f64.v4f64(double %a0, <4 x double> %a1)
ret double %1
}
define double @test_v8f64(double %a0, <8 x double> %a1) {
; SSE-LABEL: test_v8f64:
; SSE: # %bb.0:
; SSE-NEXT: addpd %xmm4, %xmm2
; SSE-NEXT: addpd %xmm3, %xmm1
; SSE-NEXT: addpd %xmm2, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm2
; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
; SSE-NEXT: addsd %xmm1, %xmm2
; SSE-NEXT: addsd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX1-SLOW-LABEL: test_v8f64:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vaddpd %ymm2, %ymm1, %ymm1
; AVX1-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-SLOW-NEXT: vaddpd %xmm2, %xmm1, %xmm1
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX1-SLOW-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vzeroupper
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v8f64:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vaddpd %ymm2, %ymm1, %ymm1
; AVX1-FAST-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-FAST-NEXT: vaddpd %xmm2, %xmm1, %xmm1
; AVX1-FAST-NEXT: vhaddpd %xmm1, %xmm1, %xmm1
; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vzeroupper
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v8f64:
; AVX2: # %bb.0:
; AVX2-NEXT: vaddpd %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX2-NEXT: vaddpd %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX2-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v8f64:
; AVX512: # %bb.0:
; AVX512-NEXT: vextractf64x4 $1, %zmm1, %ymm2
; AVX512-NEXT: vaddpd %zmm2, %zmm1, %zmm1
; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX512-NEXT: vaddpd %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call fast double @llvm.vector.reduce.fadd.f64.v8f64(double %a0, <8 x double> %a1)
ret double %1
}
define double @test_v16f64(double %a0, <16 x double> %a1) {
; SSE-LABEL: test_v16f64:
; SSE: # %bb.0:
; SSE-NEXT: addpd %xmm6, %xmm2
; SSE-NEXT: addpd %xmm7, %xmm3
; SSE-NEXT: addpd %xmm5, %xmm1
; SSE-NEXT: addpd %xmm3, %xmm1
; SSE-NEXT: addpd {{[0-9]+}}(%rsp), %xmm4
; SSE-NEXT: addpd %xmm2, %xmm4
; SSE-NEXT: addpd %xmm1, %xmm4
; SSE-NEXT: movapd %xmm4, %xmm1
; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm4[1]
; SSE-NEXT: addsd %xmm4, %xmm1
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-SLOW-LABEL: test_v16f64:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vaddpd %ymm4, %ymm2, %ymm2
; AVX1-SLOW-NEXT: vaddpd %ymm3, %ymm1, %ymm1
; AVX1-SLOW-NEXT: vaddpd %ymm2, %ymm1, %ymm1
; AVX1-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-SLOW-NEXT: vaddpd %xmm2, %xmm1, %xmm1
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX1-SLOW-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vzeroupper
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v16f64:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vaddpd %ymm4, %ymm2, %ymm2
; AVX1-FAST-NEXT: vaddpd %ymm3, %ymm1, %ymm1
; AVX1-FAST-NEXT: vaddpd %ymm2, %ymm1, %ymm1
; AVX1-FAST-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-FAST-NEXT: vaddpd %xmm2, %xmm1, %xmm1
; AVX1-FAST-NEXT: vhaddpd %xmm1, %xmm1, %xmm1
; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vzeroupper
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v16f64:
; AVX2: # %bb.0:
; AVX2-NEXT: vaddpd %ymm4, %ymm2, %ymm2
; AVX2-NEXT: vaddpd %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vaddpd %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX2-NEXT: vaddpd %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX2-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v16f64:
; AVX512: # %bb.0:
; AVX512-NEXT: vaddpd %zmm2, %zmm1, %zmm1
; AVX512-NEXT: vextractf64x4 $1, %zmm1, %ymm2
; AVX512-NEXT: vaddpd %zmm2, %zmm1, %zmm1
; AVX512-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX512-NEXT: vaddpd %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call fast double @llvm.vector.reduce.fadd.f64.v16f64(double %a0, <16 x double> %a1)
ret double %1
}
;
; vXf64 (zero)
;
define double @test_v2f64_zero(<2 x double> %a0) {
; SSE-LABEL: test_v2f64_zero:
; SSE: # %bb.0:
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE-NEXT: addsd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-SLOW-LABEL: test_v2f64_zero:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v2f64_zero:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v2f64_zero:
; AVX2: # %bb.0:
; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v2f64_zero:
; AVX512: # %bb.0:
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%1 = call fast double @llvm.vector.reduce.fadd.f64.v2f64(double 0.0, <2 x double> %a0)
ret double %1
}
define double @test_v4f64_zero(<4 x double> %a0) {
; SSE-LABEL: test_v4f64_zero:
; SSE: # %bb.0:
; SSE-NEXT: addpd %xmm1, %xmm0
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE-NEXT: addsd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-SLOW-LABEL: test_v4f64_zero:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-SLOW-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vzeroupper
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v4f64_zero:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm1, %xmm0
; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
; AVX1-FAST-NEXT: vzeroupper
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v4f64_zero:
; AVX2: # %bb.0:
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX2-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v4f64_zero:
; AVX512: # %bb.0:
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call fast double @llvm.vector.reduce.fadd.f64.v4f64(double 0.0, <4 x double> %a0)
ret double %1
}
define double @test_v8f64_zero(<8 x double> %a0) {
; SSE-LABEL: test_v8f64_zero:
; SSE: # %bb.0:
; SSE-NEXT: addpd %xmm3, %xmm1
; SSE-NEXT: addpd %xmm2, %xmm0
; SSE-NEXT: addpd %xmm1, %xmm0
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE-NEXT: addsd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-SLOW-LABEL: test_v8f64_zero:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-SLOW-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vzeroupper
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v8f64_zero:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-FAST-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
; AVX1-FAST-NEXT: vzeroupper
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v8f64_zero:
; AVX2: # %bb.0:
; AVX2-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX2-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v8f64_zero:
; AVX512: # %bb.0:
; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call fast double @llvm.vector.reduce.fadd.f64.v8f64(double 0.0, <8 x double> %a0)
ret double %1
}
define double @test_v16f64_zero(<16 x double> %a0) {
; SSE-LABEL: test_v16f64_zero:
; SSE: # %bb.0:
; SSE-NEXT: addpd %xmm6, %xmm2
; SSE-NEXT: addpd %xmm4, %xmm0
; SSE-NEXT: addpd %xmm2, %xmm0
; SSE-NEXT: addpd %xmm7, %xmm3
; SSE-NEXT: addpd %xmm5, %xmm1
; SSE-NEXT: addpd %xmm3, %xmm1
; SSE-NEXT: addpd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-SLOW-LABEL: test_v16f64_zero:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vaddpd %ymm3, %ymm1, %ymm1
; AVX1-SLOW-NEXT: vaddpd %ymm2, %ymm0, %ymm0
; AVX1-SLOW-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-SLOW-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vzeroupper
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v16f64_zero:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vaddpd %ymm3, %ymm1, %ymm1
; AVX1-FAST-NEXT: vaddpd %ymm2, %ymm0, %ymm0
; AVX1-FAST-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-FAST-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
; AVX1-FAST-NEXT: vzeroupper
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v16f64_zero:
; AVX2: # %bb.0:
; AVX2-NEXT: vaddpd %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vaddpd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX2-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v16f64_zero:
; AVX512: # %bb.0:
; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call fast double @llvm.vector.reduce.fadd.f64.v16f64(double 0.0, <16 x double> %a0)
ret double %1
}
;
; vXf64 (undef)
;
define double @test_v2f64_undef(<2 x double> %a0) {
; SSE-LABEL: test_v2f64_undef:
; SSE: # %bb.0:
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE-NEXT: addsd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-SLOW-LABEL: test_v2f64_undef:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v2f64_undef:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v2f64_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v2f64_undef:
; AVX512: # %bb.0:
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%1 = call fast double @llvm.vector.reduce.fadd.f64.v2f64(double 0.0, <2 x double> %a0)
ret double %1
}
define double @test_v4f64_undef(<4 x double> %a0) {
; SSE-LABEL: test_v4f64_undef:
; SSE: # %bb.0:
; SSE-NEXT: addpd %xmm1, %xmm0
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE-NEXT: addsd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-SLOW-LABEL: test_v4f64_undef:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-SLOW-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vzeroupper
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v4f64_undef:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm1, %xmm0
; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
; AVX1-FAST-NEXT: vzeroupper
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v4f64_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX2-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v4f64_undef:
; AVX512: # %bb.0:
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call fast double @llvm.vector.reduce.fadd.f64.v4f64(double 0.0, <4 x double> %a0)
ret double %1
}
define double @test_v8f64_undef(<8 x double> %a0) {
; SSE-LABEL: test_v8f64_undef:
; SSE: # %bb.0:
; SSE-NEXT: addpd %xmm3, %xmm1
; SSE-NEXT: addpd %xmm2, %xmm0
; SSE-NEXT: addpd %xmm1, %xmm0
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE-NEXT: addsd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-SLOW-LABEL: test_v8f64_undef:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-SLOW-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vzeroupper
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v8f64_undef:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-FAST-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
; AVX1-FAST-NEXT: vzeroupper
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v8f64_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX2-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v8f64_undef:
; AVX512: # %bb.0:
; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call fast double @llvm.vector.reduce.fadd.f64.v8f64(double 0.0, <8 x double> %a0)
ret double %1
}
define double @test_v16f64_undef(<16 x double> %a0) {
; SSE-LABEL: test_v16f64_undef:
; SSE: # %bb.0:
; SSE-NEXT: addpd %xmm6, %xmm2
; SSE-NEXT: addpd %xmm4, %xmm0
; SSE-NEXT: addpd %xmm2, %xmm0
; SSE-NEXT: addpd %xmm7, %xmm3
; SSE-NEXT: addpd %xmm5, %xmm1
; SSE-NEXT: addpd %xmm3, %xmm1
; SSE-NEXT: addpd %xmm0, %xmm1
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX1-SLOW-LABEL: test_v16f64_undef:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vaddpd %ymm3, %ymm1, %ymm1
; AVX1-SLOW-NEXT: vaddpd %ymm2, %ymm0, %ymm0
; AVX1-SLOW-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-SLOW-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vzeroupper
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v16f64_undef:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vaddpd %ymm3, %ymm1, %ymm1
; AVX1-FAST-NEXT: vaddpd %ymm2, %ymm0, %ymm0
; AVX1-FAST-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX1-FAST-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
; AVX1-FAST-NEXT: vzeroupper
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v16f64_undef:
; AVX2: # %bb.0:
; AVX2-NEXT: vaddpd %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vaddpd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vaddpd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX2-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v16f64_undef:
; AVX512: # %bb.0:
; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm1
; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm1
; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call fast double @llvm.vector.reduce.fadd.f64.v16f64(double 0.0, <16 x double> %a0)
ret double %1
}
declare float @llvm.vector.reduce.fadd.f32.v2f32(float, <2 x float>)
declare float @llvm.vector.reduce.fadd.f32.v4f32(float, <4 x float>)
declare float @llvm.vector.reduce.fadd.f32.v8f32(float, <8 x float>)
declare float @llvm.vector.reduce.fadd.f32.v16f32(float, <16 x float>)
declare double @llvm.vector.reduce.fadd.f64.v2f64(double, <2 x double>)
declare double @llvm.vector.reduce.fadd.f64.v4f64(double, <4 x double>)
declare double @llvm.vector.reduce.fadd.f64.v8f64(double, <8 x double>)
declare double @llvm.vector.reduce.fadd.f64.v16f64(double, <16 x double>)