[CodeGen] Fix neutral value of vecreduce fadd in tests (NFC)

The neutral value is -0.0, not 0.0. This doesn't matter for "fast"
reductions due to nsz, but does matter for reassoc-only and seq
reductions.

Change tests to mostly use -0.0 where the neutral value was intended,
and add some additional test coverage in some places. Also update
LangRef to use the right value.
This commit is contained in:
Nikita Popov 2020-10-29 20:34:13 +01:00
parent b22f111023
commit fa48ff3fc9
7 changed files with 963 additions and 371 deletions

View File

@ -15680,12 +15680,15 @@ The first argument to this intrinsic is a scalar start value for the reduction.
The type of the start value matches the element-type of the vector input.
The second argument must be a vector of floating-point values.
To ignore the start value, negative zero (``-0.0``) can be used, as it is
the neutral value of floating point addition.
Examples:
"""""""""
::
%unord = call reassoc float @llvm.vector.reduce.fadd.v4f32(float 0.0, <4 x float> %input) ; relaxed reduction
%unord = call reassoc float @llvm.vector.reduce.fadd.v4f32(float -0.0, <4 x float> %input) ; relaxed reduction
%ord = call float @llvm.vector.reduce.fadd.v4f32(float %start_value, <4 x float> %input) ; sequential reduction
@ -15751,6 +15754,9 @@ The first argument to this intrinsic is a scalar start value for the reduction.
The type of the start value matches the element-type of the vector input.
The second argument must be a vector of floating-point values.
To ignore the start value, one (``1.0``) can be used, as it is the neutral
value of floating point multiplication.
Examples:
"""""""""

View File

@ -9,76 +9,145 @@ declare double @llvm.vector.reduce.fadd.f64.v1f64(double, <1 x double>)
declare fp128 @llvm.vector.reduce.fadd.f128.v1f128(fp128, <1 x fp128>)
declare float @llvm.vector.reduce.fadd.f32.v3f32(float, <3 x float>)
declare float @llvm.vector.reduce.fadd.f32.v5f32(float, <5 x float>)
declare fp128 @llvm.vector.reduce.fadd.f128.v2f128(fp128, <2 x fp128>)
declare float @llvm.vector.reduce.fadd.f32.v16f32(float, <16 x float>)
define half @test_v1f16(<1 x half> %a) nounwind {
define half @test_v1f16(<1 x half> %a, half %s) nounwind {
; CHECK-LABEL: test_v1f16:
; CHECK: // %bb.0:
; CHECK-NEXT: fcvt s0, h0
; CHECK-NEXT: fmov s1, wzr
; CHECK-NEXT: fadd s0, s0, s1
; CHECK-NEXT: fcvt s1, h1
; CHECK-NEXT: fadd s0, s1, s0
; CHECK-NEXT: fcvt h0, s0
; CHECK-NEXT: ret
%b = call half @llvm.vector.reduce.fadd.f16.v1f16(half 0.0, <1 x half> %a)
%b = call half @llvm.vector.reduce.fadd.f16.v1f16(half %s, <1 x half> %a)
ret half %b
}
define float @test_v1f32(<1 x float> %a) nounwind {
define half @test_v1f16_neutral(<1 x half> %a) nounwind {
; CHECK-LABEL: test_v1f16_neutral:
; CHECK: // %bb.0:
; CHECK-NEXT: ret
%b = call half @llvm.vector.reduce.fadd.f16.v1f16(half -0.0, <1 x half> %a)
ret half %b
}
define float @test_v1f32(<1 x float> %a, float %s) nounwind {
; CHECK-LABEL: test_v1f32:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: fmov s1, wzr
; CHECK-NEXT: fadd s0, s0, s1
; CHECK-NEXT: fadd s0, s1, s0
; CHECK-NEXT: ret
%b = call float @llvm.vector.reduce.fadd.f32.v1f32(float 0.0, <1 x float> %a)
%b = call float @llvm.vector.reduce.fadd.f32.v1f32(float %s, <1 x float> %a)
ret float %b
}
define double @test_v1f64(<1 x double> %a) nounwind {
define float @test_v1f32_neutral(<1 x float> %a) nounwind {
; CHECK-LABEL: test_v1f32_neutral:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-NEXT: ret
%b = call float @llvm.vector.reduce.fadd.f32.v1f32(float -0.0, <1 x float> %a)
ret float %b
}
define double @test_v1f64(<1 x double> %a, double %s) nounwind {
; CHECK-LABEL: test_v1f64:
; CHECK: // %bb.0:
; CHECK-NEXT: fmov d1, xzr
; CHECK-NEXT: fadd d0, d0, d1
; CHECK-NEXT: fadd d0, d1, d0
; CHECK-NEXT: ret
%b = call double @llvm.vector.reduce.fadd.f64.v1f64(double 0.0, <1 x double> %a)
%b = call double @llvm.vector.reduce.fadd.f64.v1f64(double %s, <1 x double> %a)
ret double %b
}
define fp128 @test_v1f128(<1 x fp128> %a) nounwind {
define double @test_v1f64_neutral(<1 x double> %a) nounwind {
; CHECK-LABEL: test_v1f64_neutral:
; CHECK: // %bb.0:
; CHECK-NEXT: ret
%b = call double @llvm.vector.reduce.fadd.f64.v1f64(double -0.0, <1 x double> %a)
ret double %b
}
define fp128 @test_v1f128(<1 x fp128> %a, fp128 %s) nounwind {
; CHECK-LABEL: test_v1f128:
; CHECK: // %bb.0:
; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: adrp x8, .LCPI3_0
; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI3_0]
; CHECK-NEXT: mov v2.16b, v0.16b
; CHECK-NEXT: mov v0.16b, v1.16b
; CHECK-NEXT: mov v1.16b, v2.16b
; CHECK-NEXT: bl __addtf3
; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
%b = call fp128 @llvm.vector.reduce.fadd.f128.v1f128(fp128 zeroinitializer, <1 x fp128> %a)
%b = call fp128 @llvm.vector.reduce.fadd.f128.v1f128(fp128 %s, <1 x fp128> %a)
ret fp128 %b
}
define float @test_v3f32(<3 x float> %a) nounwind {
define fp128 @test_v1f128_neutral(<1 x fp128> %a) nounwind {
; CHECK-LABEL: test_v1f128_neutral:
; CHECK: // %bb.0:
; CHECK-NEXT: ret
%b = call fp128 @llvm.vector.reduce.fadd.f128.v1f128(fp128 0xL00000000000000008000000000000000, <1 x fp128> %a)
ret fp128 %b
}
define float @test_v3f32(<3 x float> %a, float %s) nounwind {
; CHECK-LABEL: test_v3f32:
; CHECK: // %bb.0:
; CHECK-NEXT: fmov s1, wzr
; CHECK-NEXT: fadd s1, s1, s0
; CHECK-NEXT: mov s2, v0.s[1]
; CHECK-NEXT: fadd s1, s0, s1
; CHECK-NEXT: fadd s1, s1, s2
; CHECK-NEXT: mov s0, v0.s[2]
; CHECK-NEXT: fadd s0, s1, s0
; CHECK-NEXT: ret
%b = call float @llvm.vector.reduce.fadd.f32.v3f32(float 0.0, <3 x float> %a)
%b = call float @llvm.vector.reduce.fadd.f32.v3f32(float %s, <3 x float> %a)
ret float %b
}
define fp128 @test_v2f128(<2 x fp128> %a) nounwind {
define float @test_v3f32_neutral(<3 x float> %a) nounwind {
; CHECK-LABEL: test_v3f32_neutral:
; CHECK: // %bb.0:
; CHECK-NEXT: faddp s1, v0.2s
; CHECK-NEXT: mov s0, v0.s[2]
; CHECK-NEXT: fadd s0, s1, s0
; CHECK-NEXT: ret
%b = call float @llvm.vector.reduce.fadd.f32.v3f32(float -0.0, <3 x float> %a)
ret float %b
}
define float @test_v5f32(<5 x float> %a, float %s) nounwind {
; CHECK-LABEL: test_v5f32:
; CHECK: // %bb.0:
; CHECK-NEXT: fadd s0, s5, s0
; CHECK-NEXT: fadd s0, s0, s1
; CHECK-NEXT: fadd s0, s0, s2
; CHECK-NEXT: fadd s0, s0, s3
; CHECK-NEXT: fadd s0, s0, s4
; CHECK-NEXT: ret
%b = call float @llvm.vector.reduce.fadd.f32.v5f32(float %s, <5 x float> %a)
ret float %b
}
define float @test_v5f32_neutral(<5 x float> %a) nounwind {
; CHECK-LABEL: test_v5f32_neutral:
; CHECK: // %bb.0:
; CHECK-NEXT: fadd s0, s0, s1
; CHECK-NEXT: fadd s0, s0, s2
; CHECK-NEXT: fadd s0, s0, s3
; CHECK-NEXT: fadd s0, s0, s4
; CHECK-NEXT: ret
%b = call float @llvm.vector.reduce.fadd.f32.v5f32(float -0.0, <5 x float> %a)
ret float %b
}
define fp128 @test_v2f128(<2 x fp128> %a, fp128 %s) nounwind {
; CHECK-LABEL: test_v2f128:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #32 // =32
; CHECK-NEXT: adrp x8, .LCPI5_0
; CHECK-NEXT: str q1, [sp] // 16-byte Folded Spill
; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI5_0]
; CHECK-NEXT: mov v1.16b, v0.16b
; CHECK-NEXT: mov v0.16b, v2.16b
; CHECK-NEXT: str x30, [sp, #16] // 8-byte Folded Spill
; CHECK-NEXT: bl __addtf3
; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
@ -86,16 +155,26 @@ define fp128 @test_v2f128(<2 x fp128> %a) nounwind {
; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
; CHECK-NEXT: add sp, sp, #32 // =32
; CHECK-NEXT: ret
%b = call fp128 @llvm.vector.reduce.fadd.f128.v2f128(fp128 zeroinitializer, <2 x fp128> %a)
%b = call fp128 @llvm.vector.reduce.fadd.f128.v2f128(fp128 %s, <2 x fp128> %a)
ret fp128 %b
}
define float @test_v16f32(<16 x float> %a) nounwind {
define fp128 @test_v2f128_neutral(<2 x fp128> %a) nounwind {
; CHECK-LABEL: test_v2f128_neutral:
; CHECK: // %bb.0:
; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: bl __addtf3
; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
%b = call fp128 @llvm.vector.reduce.fadd.f128.v2f128(fp128 0xL00000000000000008000000000000000, <2 x fp128> %a)
ret fp128 %b
}
define float @test_v16f32(<16 x float> %a, float %s) nounwind {
; CHECK-LABEL: test_v16f32:
; CHECK: // %bb.0:
; CHECK-NEXT: fmov s4, wzr
; CHECK-NEXT: fadd s4, s4, s0
; CHECK-NEXT: mov s5, v0.s[1]
; CHECK-NEXT: fadd s4, s0, s4
; CHECK-NEXT: fadd s4, s4, s5
; CHECK-NEXT: mov s5, v0.s[2]
; CHECK-NEXT: mov s0, v0.s[3]
@ -123,6 +202,40 @@ define float @test_v16f32(<16 x float> %a) nounwind {
; CHECK-NEXT: mov s1, v3.s[3]
; CHECK-NEXT: fadd s0, s0, s1
; CHECK-NEXT: ret
%b = call float @llvm.vector.reduce.fadd.f32.v16f32(float 0.0, <16 x float> %a)
%b = call float @llvm.vector.reduce.fadd.f32.v16f32(float %s, <16 x float> %a)
ret float %b
}
define float @test_v16f32_neutral(<16 x float> %a) nounwind {
; CHECK-LABEL: test_v16f32_neutral:
; CHECK: // %bb.0:
; CHECK-NEXT: faddp s4, v0.2s
; CHECK-NEXT: mov s5, v0.s[2]
; CHECK-NEXT: mov s0, v0.s[3]
; CHECK-NEXT: fadd s4, s4, s5
; CHECK-NEXT: fadd s0, s4, s0
; CHECK-NEXT: mov s5, v1.s[1]
; CHECK-NEXT: fadd s0, s0, s1
; CHECK-NEXT: mov s4, v1.s[2]
; CHECK-NEXT: fadd s0, s0, s5
; CHECK-NEXT: mov s1, v1.s[3]
; CHECK-NEXT: fadd s0, s0, s4
; CHECK-NEXT: fadd s0, s0, s1
; CHECK-NEXT: mov s5, v2.s[1]
; CHECK-NEXT: fadd s0, s0, s2
; CHECK-NEXT: mov s4, v2.s[2]
; CHECK-NEXT: fadd s0, s0, s5
; CHECK-NEXT: mov s1, v2.s[3]
; CHECK-NEXT: fadd s0, s0, s4
; CHECK-NEXT: fadd s0, s0, s1
; CHECK-NEXT: mov s2, v3.s[1]
; CHECK-NEXT: fadd s0, s0, s3
; CHECK-NEXT: mov s5, v3.s[2]
; CHECK-NEXT: fadd s0, s0, s2
; CHECK-NEXT: fadd s0, s0, s5
; CHECK-NEXT: mov s1, v3.s[3]
; CHECK-NEXT: fadd s0, s0, s1
; CHECK-NEXT: ret
%b = call float @llvm.vector.reduce.fadd.f32.v16f32(float -0.0, <16 x float> %a)
ret float %b
}

View File

@ -7,6 +7,7 @@ declare double @llvm.vector.reduce.fadd.f64.v1f64(double, <1 x double>)
declare fp128 @llvm.vector.reduce.fadd.f128.v1f128(fp128, <1 x fp128>)
declare float @llvm.vector.reduce.fadd.f32.v3f32(float, <3 x float>)
declare float @llvm.vector.reduce.fadd.f32.v5f32(float, <5 x float>)
declare fp128 @llvm.vector.reduce.fadd.f128.v2f128(fp128, <2 x fp128>)
declare float @llvm.vector.reduce.fadd.f32.v16f32(float, <16 x float>)
@ -14,7 +15,7 @@ define half @test_v1f16(<1 x half> %a) nounwind {
; CHECK-LABEL: test_v1f16:
; CHECK: // %bb.0:
; CHECK-NEXT: ret
%b = call fast nnan half @llvm.vector.reduce.fadd.f16.v1f16(half 0.0, <1 x half> %a)
%b = call reassoc half @llvm.vector.reduce.fadd.f16.v1f16(half -0.0, <1 x half> %a)
ret half %b
}
@ -24,7 +25,7 @@ define float @test_v1f32(<1 x float> %a) nounwind {
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: // kill: def $s0 killed $s0 killed $q0
; CHECK-NEXT: ret
%b = call fast nnan float @llvm.vector.reduce.fadd.f32.v1f32(float 0.0, <1 x float> %a)
%b = call reassoc float @llvm.vector.reduce.fadd.f32.v1f32(float -0.0, <1 x float> %a)
ret float %b
}
@ -32,7 +33,7 @@ define double @test_v1f64(<1 x double> %a) nounwind {
; CHECK-LABEL: test_v1f64:
; CHECK: // %bb.0:
; CHECK-NEXT: ret
%b = call fast nnan double @llvm.vector.reduce.fadd.f64.v1f64(double 0.0, <1 x double> %a)
%b = call reassoc double @llvm.vector.reduce.fadd.f64.v1f64(double -0.0, <1 x double> %a)
ret double %b
}
@ -40,7 +41,7 @@ define fp128 @test_v1f128(<1 x fp128> %a) nounwind {
; CHECK-LABEL: test_v1f128:
; CHECK: // %bb.0:
; CHECK-NEXT: ret
%b = call fast nnan fp128 @llvm.vector.reduce.fadd.f128.v1f128(fp128 zeroinitializer, <1 x fp128> %a)
%b = call reassoc fp128 @llvm.vector.reduce.fadd.f128.v1f128(fp128 0xL00000000000000008000000000000000, <1 x fp128> %a)
ret fp128 %b
}
@ -52,7 +53,29 @@ define float @test_v3f32(<3 x float> %a) nounwind {
; CHECK-NEXT: fadd v0.2s, v0.2s, v1.2s
; CHECK-NEXT: faddp s0, v0.2s
; CHECK-NEXT: ret
%b = call fast nnan float @llvm.vector.reduce.fadd.f32.v3f32(float 0.0, <3 x float> %a)
%b = call reassoc float @llvm.vector.reduce.fadd.f32.v3f32(float -0.0, <3 x float> %a)
ret float %b
}
define float @test_v5f32(<5 x float> %a) nounwind {
; CHECK-LABEL: test_v5f32:
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
; CHECK-NEXT: // kill: def $s1 killed $s1 def $q1
; CHECK-NEXT: // kill: def $s2 killed $s2 def $q2
; CHECK-NEXT: movi v5.2d, #0000000000000000
; CHECK-NEXT: mov v0.s[1], v1.s[0]
; CHECK-NEXT: mov v0.s[2], v2.s[0]
; CHECK-NEXT: // kill: def $s4 killed $s4 def $q4
; CHECK-NEXT: // kill: def $s3 killed $s3 def $q3
; CHECK-NEXT: mov v0.s[3], v3.s[0]
; CHECK-NEXT: mov v5.s[0], v4.s[0]
; CHECK-NEXT: fadd v0.4s, v0.4s, v5.4s
; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
; CHECK-NEXT: fadd v0.2s, v0.2s, v1.2s
; CHECK-NEXT: faddp s0, v0.2s
; CHECK-NEXT: ret
%b = call reassoc float @llvm.vector.reduce.fadd.f32.v5f32(float -0.0, <5 x float> %a)
ret float %b
}
@ -63,7 +86,7 @@ define fp128 @test_v2f128(<2 x fp128> %a) nounwind {
; CHECK-NEXT: bl __addtf3
; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
%b = call fast nnan fp128 @llvm.vector.reduce.fadd.f128.v2f128(fp128 zeroinitializer, <2 x fp128> %a)
%b = call reassoc fp128 @llvm.vector.reduce.fadd.f128.v2f128(fp128 0xL00000000000000008000000000000000, <2 x fp128> %a)
ret fp128 %b
}
@ -77,6 +100,6 @@ define float @test_v16f32(<16 x float> %a) nounwind {
; CHECK-NEXT: fadd v0.2s, v0.2s, v1.2s
; CHECK-NEXT: faddp s0, v0.2s
; CHECK-NEXT: ret
%b = call fast nnan float @llvm.vector.reduce.fadd.f32.v16f32(float 0.0, <16 x float> %a)
%b = call reassoc float @llvm.vector.reduce.fadd.f32.v16f32(float -0.0, <16 x float> %a)
ret float %b
}

View File

@ -14,7 +14,7 @@ define float @add_HalfS(<2 x float> %bin.rdx) {
; CHECKNOFP16-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECKNOFP16-NEXT: faddp s0, v0.2s
; CHECKNOFP16-NEXT: ret
%r = call fast float @llvm.vector.reduce.fadd.f32.v2f32(float 0.0, <2 x float> %bin.rdx)
%r = call fast float @llvm.vector.reduce.fadd.f32.v2f32(float -0.0, <2 x float> %bin.rdx)
ret float %r
}
@ -48,7 +48,7 @@ define half @add_HalfH(<4 x half> %bin.rdx) {
; CHECKNOFP16-NEXT: fadd s0, s0, s1
; CHECKNOFP16-NEXT: fcvt h0, s0
; CHECKNOFP16-NEXT: ret
%r = call fast half @llvm.vector.reduce.fadd.f16.v4f16(half 0.0, <4 x half> %bin.rdx)
%r = call fast half @llvm.vector.reduce.fadd.f16.v4f16(half -0.0, <4 x half> %bin.rdx)
ret half %r
}
@ -103,7 +103,7 @@ define half @add_H(<8 x half> %bin.rdx) {
; CHECKNOFP16-NEXT: fadd s0, s0, s1
; CHECKNOFP16-NEXT: fcvt h0, s0
; CHECKNOFP16-NEXT: ret
%r = call fast half @llvm.vector.reduce.fadd.f16.v8f16(half 0.0, <8 x half> %bin.rdx)
%r = call fast half @llvm.vector.reduce.fadd.f16.v8f16(half -0.0, <8 x half> %bin.rdx)
ret half %r
}
@ -121,7 +121,7 @@ define float @add_S(<4 x float> %bin.rdx) {
; CHECKNOFP16-NEXT: fadd v0.2s, v0.2s, v1.2s
; CHECKNOFP16-NEXT: faddp s0, v0.2s
; CHECKNOFP16-NEXT: ret
%r = call fast float @llvm.vector.reduce.fadd.f32.v4f32(float 0.0, <4 x float> %bin.rdx)
%r = call fast float @llvm.vector.reduce.fadd.f32.v4f32(float -0.0, <4 x float> %bin.rdx)
ret float %r
}
@ -135,7 +135,7 @@ define double @add_D(<2 x double> %bin.rdx) {
; CHECKNOFP16: // %bb.0:
; CHECKNOFP16-NEXT: faddp d0, v0.2d
; CHECKNOFP16-NEXT: ret
%r = call fast double @llvm.vector.reduce.fadd.f64.v2f64(double 0.0, <2 x double> %bin.rdx)
%r = call fast double @llvm.vector.reduce.fadd.f64.v2f64(double -0.0, <2 x double> %bin.rdx)
ret double %r
}
@ -229,7 +229,7 @@ define half @add_2H(<16 x half> %bin.rdx) {
; CHECKNOFP16-NEXT: fadd s0, s1, s0
; CHECKNOFP16-NEXT: fcvt h0, s0
; CHECKNOFP16-NEXT: ret
%r = call fast half @llvm.vector.reduce.fadd.f16.v16f16(half 0.0, <16 x half> %bin.rdx)
%r = call fast half @llvm.vector.reduce.fadd.f16.v16f16(half -0.0, <16 x half> %bin.rdx)
ret half %r
}
@ -249,7 +249,7 @@ define float @add_2S(<8 x float> %bin.rdx) {
; CHECKNOFP16-NEXT: fadd v0.2s, v0.2s, v1.2s
; CHECKNOFP16-NEXT: faddp s0, v0.2s
; CHECKNOFP16-NEXT: ret
%r = call fast float @llvm.vector.reduce.fadd.f32.v8f32(float 0.0, <8 x float> %bin.rdx)
%r = call fast float @llvm.vector.reduce.fadd.f32.v8f32(float -0.0, <8 x float> %bin.rdx)
ret float %r
}
@ -265,7 +265,7 @@ define double @add_2D(<4 x double> %bin.rdx) {
; CHECKNOFP16-NEXT: fadd v0.2d, v0.2d, v1.2d
; CHECKNOFP16-NEXT: faddp d0, v0.2d
; CHECKNOFP16-NEXT: ret
%r = call fast double @llvm.vector.reduce.fadd.f64.v4f64(double 0.0, <4 x double> %bin.rdx)
%r = call fast double @llvm.vector.reduce.fadd.f64.v4f64(double -0.0, <4 x double> %bin.rdx)
ret double %r
}

View File

@ -6,8 +6,8 @@ declare float @llvm.vector.reduce.fadd.f32.v4f32(float, <4 x float>)
declare double @llvm.vector.reduce.fadd.f64.v2f64(double, <2 x double>)
declare fp128 @llvm.vector.reduce.fadd.f128.v2f128(fp128, <2 x fp128>)
define half @test_v4f16(<4 x half> %a) nounwind {
; CHECK-LABEL: test_v4f16:
define half @test_v4f16_reassoc(<4 x half> %a) nounwind {
; CHECK-LABEL: test_v4f16_reassoc:
; CHECK: @ %bb.0:
; CHECK-NEXT: .save {r4, r5, r6, r7, r8, lr}
; CHECK-NEXT: push {r4, r5, r6, r7, r8, lr}
@ -37,12 +37,47 @@ define half @test_v4f16(<4 x half> %a) nounwind {
; CHECK-NEXT: bl __aeabi_f2h
; CHECK-NEXT: pop {r4, r5, r6, r7, r8, lr}
; CHECK-NEXT: mov pc, lr
%b = call fast half @llvm.vector.reduce.fadd.f16.v4f16(half 0.0, <4 x half> %a)
%b = call reassoc half @llvm.vector.reduce.fadd.f16.v4f16(half -0.0, <4 x half> %a)
ret half %b
}
define float @test_v4f32(<4 x float> %a) nounwind {
; CHECK-LABEL: test_v4f32:
define half @test_v4f16_seq(<4 x half> %a) nounwind {
; CHECK-LABEL: test_v4f16_seq:
; CHECK: @ %bb.0:
; CHECK-NEXT: .save {r4, r5, r6, r7, r8, lr}
; CHECK-NEXT: push {r4, r5, r6, r7, r8, lr}
; CHECK-NEXT: mov r4, #255
; CHECK-NEXT: mov r7, r0
; CHECK-NEXT: orr r4, r4, #65280
; CHECK-NEXT: mov r5, r2
; CHECK-NEXT: and r0, r3, r4
; CHECK-NEXT: mov r6, r1
; CHECK-NEXT: bl __aeabi_h2f
; CHECK-NEXT: mov r8, r0
; CHECK-NEXT: and r0, r5, r4
; CHECK-NEXT: bl __aeabi_h2f
; CHECK-NEXT: mov r5, r0
; CHECK-NEXT: and r0, r7, r4
; CHECK-NEXT: bl __aeabi_h2f
; CHECK-NEXT: mov r7, r0
; CHECK-NEXT: and r0, r6, r4
; CHECK-NEXT: bl __aeabi_h2f
; CHECK-NEXT: mov r1, r0
; CHECK-NEXT: mov r0, r7
; CHECK-NEXT: bl __aeabi_fadd
; CHECK-NEXT: mov r1, r5
; CHECK-NEXT: bl __aeabi_fadd
; CHECK-NEXT: mov r1, r8
; CHECK-NEXT: bl __aeabi_fadd
; CHECK-NEXT: bl __aeabi_f2h
; CHECK-NEXT: pop {r4, r5, r6, r7, r8, lr}
; CHECK-NEXT: mov pc, lr
%b = call half @llvm.vector.reduce.fadd.f16.v4f16(half -0.0, <4 x half> %a)
ret half %b
}
define float @test_v4f32_reassoc(<4 x float> %a) nounwind {
; CHECK-LABEL: test_v4f32_reassoc:
; CHECK: @ %bb.0:
; CHECK-NEXT: .save {r4, r5, r11, lr}
; CHECK-NEXT: push {r4, r5, r11, lr}
@ -55,24 +90,54 @@ define float @test_v4f32(<4 x float> %a) nounwind {
; CHECK-NEXT: bl __aeabi_fadd
; CHECK-NEXT: pop {r4, r5, r11, lr}
; CHECK-NEXT: mov pc, lr
%b = call fast float @llvm.vector.reduce.fadd.f32.v4f32(float 0.0, <4 x float> %a)
%b = call reassoc float @llvm.vector.reduce.fadd.f32.v4f32(float -0.0, <4 x float> %a)
ret float %b
}
define double @test_v2f64(<2 x double> %a) nounwind {
; CHECK-LABEL: test_v2f64:
define float @test_v4f32_seq(<4 x float> %a) nounwind {
; CHECK-LABEL: test_v4f32_seq:
; CHECK: @ %bb.0:
; CHECK-NEXT: .save {r4, r5, r11, lr}
; CHECK-NEXT: push {r4, r5, r11, lr}
; CHECK-NEXT: mov r4, r3
; CHECK-NEXT: mov r5, r2
; CHECK-NEXT: bl __aeabi_fadd
; CHECK-NEXT: mov r1, r5
; CHECK-NEXT: bl __aeabi_fadd
; CHECK-NEXT: mov r1, r4
; CHECK-NEXT: bl __aeabi_fadd
; CHECK-NEXT: pop {r4, r5, r11, lr}
; CHECK-NEXT: mov pc, lr
%b = call float @llvm.vector.reduce.fadd.f32.v4f32(float -0.0, <4 x float> %a)
ret float %b
}
define double @test_v2f64_reassoc(<2 x double> %a) nounwind {
; CHECK-LABEL: test_v2f64_reassoc:
; CHECK: @ %bb.0:
; CHECK-NEXT: .save {r11, lr}
; CHECK-NEXT: push {r11, lr}
; CHECK-NEXT: bl __aeabi_dadd
; CHECK-NEXT: pop {r11, lr}
; CHECK-NEXT: mov pc, lr
%b = call fast double @llvm.vector.reduce.fadd.f64.v2f64(double zeroinitializer, <2 x double> %a)
%b = call reassoc double @llvm.vector.reduce.fadd.f64.v2f64(double -0.0, <2 x double> %a)
ret double %b
}
define fp128 @test_v2f128(<2 x fp128> %a) nounwind {
; CHECK-LABEL: test_v2f128:
define double @test_v2f64_seq(<2 x double> %a) nounwind {
; CHECK-LABEL: test_v2f64_seq:
; CHECK: @ %bb.0:
; CHECK-NEXT: .save {r11, lr}
; CHECK-NEXT: push {r11, lr}
; CHECK-NEXT: bl __aeabi_dadd
; CHECK-NEXT: pop {r11, lr}
; CHECK-NEXT: mov pc, lr
%b = call double @llvm.vector.reduce.fadd.f64.v2f64(double -0.0, <2 x double> %a)
ret double %b
}
define fp128 @test_v2f128_reassoc(<2 x fp128> %a) nounwind {
; CHECK-LABEL: test_v2f128_reassoc:
; CHECK: @ %bb.0:
; CHECK-NEXT: .save {r11, lr}
; CHECK-NEXT: push {r11, lr}
@ -90,6 +155,29 @@ define fp128 @test_v2f128(<2 x fp128> %a) nounwind {
; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: pop {r11, lr}
; CHECK-NEXT: mov pc, lr
%b = call fast fp128 @llvm.vector.reduce.fadd.f128.v2f128(fp128 zeroinitializer, <2 x fp128> %a)
%b = call reassoc fp128 @llvm.vector.reduce.fadd.f128.v2f128(fp128 0xL00000000000000008000000000000000, <2 x fp128> %a)
ret fp128 %b
}
define fp128 @test_v2f128_seq(<2 x fp128> %a) nounwind {
; CHECK-LABEL: test_v2f128_seq:
; CHECK: @ %bb.0:
; CHECK-NEXT: .save {r11, lr}
; CHECK-NEXT: push {r11, lr}
; CHECK-NEXT: .pad #16
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: ldr r12, [sp, #36]
; CHECK-NEXT: str r12, [sp, #12]
; CHECK-NEXT: ldr r12, [sp, #32]
; CHECK-NEXT: str r12, [sp, #8]
; CHECK-NEXT: ldr r12, [sp, #28]
; CHECK-NEXT: str r12, [sp, #4]
; CHECK-NEXT: ldr r12, [sp, #24]
; CHECK-NEXT: str r12, [sp]
; CHECK-NEXT: bl __addtf3
; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: pop {r11, lr}
; CHECK-NEXT: mov pc, lr
%b = call fp128 @llvm.vector.reduce.fadd.f128.v2f128(fp128 0xL00000000000000008000000000000000, <2 x fp128> %a)
ret fp128 %b
}

View File

@ -7,111 +7,228 @@ declare double @llvm.vector.reduce.fadd.f64.v1f64(double, <1 x double>)
declare fp128 @llvm.vector.reduce.fadd.f128.v1f128(fp128, <1 x fp128>)
declare float @llvm.vector.reduce.fadd.f32.v3f32(float, <3 x float>)
declare float @llvm.vector.reduce.fadd.f32.v5f32(float, <5 x float>)
declare fp128 @llvm.vector.reduce.fadd.f128.v2f128(fp128, <2 x fp128>)
declare float @llvm.vector.reduce.fadd.f32.v16f32(float, <16 x float>)
define half @test_v1f16(<1 x half> %a) nounwind {
define half @test_v1f16(<1 x half> %a, half %s) nounwind {
; CHECK-LABEL: test_v1f16:
; CHECK: @ %bb.0:
; CHECK-NEXT: .save {r11, lr}
; CHECK-NEXT: push {r11, lr}
; CHECK-NEXT: bl __aeabi_f2h
; CHECK-NEXT: .save {r4, r5, r11, lr}
; CHECK-NEXT: push {r4, r5, r11, lr}
; CHECK-NEXT: .vsave {d8}
; CHECK-NEXT: vpush {d8}
; CHECK-NEXT: mov r4, r0
; CHECK-NEXT: mov r0, r1
; CHECK-NEXT: bl __aeabi_h2f
; CHECK-NEXT: vldr s0, .LCPI0_0
; CHECK-NEXT: vmov s2, r0
; CHECK-NEXT: vadd.f32 s0, s2, s0
; CHECK-NEXT: mov r5, r0
; CHECK-NEXT: mov r0, r4
; CHECK-NEXT: bl __aeabi_f2h
; CHECK-NEXT: vmov s16, r5
; CHECK-NEXT: bl __aeabi_h2f
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vadd.f32 s0, s16, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: bl __aeabi_f2h
; CHECK-NEXT: pop {r11, lr}
; CHECK-NEXT: vpop {d8}
; CHECK-NEXT: pop {r4, r5, r11, lr}
; CHECK-NEXT: mov pc, lr
; CHECK-NEXT: .p2align 2
; CHECK-NEXT: @ %bb.1:
; CHECK-NEXT: .LCPI0_0:
; CHECK-NEXT: .long 0x00000000 @ float 0
%b = call half @llvm.vector.reduce.fadd.f16.v1f16(half 0.0, <1 x half> %a)
%b = call half @llvm.vector.reduce.fadd.f16.v1f16(half %s, <1 x half> %a)
ret half %b
}
define float @test_v1f32(<1 x float> %a) nounwind {
; CHECK-LABEL: test_v1f32:
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr s0, .LCPI1_0
; CHECK-NEXT: vmov s2, r0
; CHECK-NEXT: vadd.f32 s0, s2, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: mov pc, lr
; CHECK-NEXT: .p2align 2
; CHECK-NEXT: @ %bb.1:
; CHECK-NEXT: .LCPI1_0:
; CHECK-NEXT: .long 0x00000000 @ float 0
%b = call float @llvm.vector.reduce.fadd.f32.v1f32(float 0.0, <1 x float> %a)
ret float %b
}
define double @test_v1f64(<1 x double> %a) nounwind {
; CHECK-LABEL: test_v1f64:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmov.i32 d16, #0x0
; CHECK-NEXT: vmov d17, r0, r1
; CHECK-NEXT: vadd.f64 d16, d17, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
%b = call double @llvm.vector.reduce.fadd.f64.v1f64(double 0.0, <1 x double> %a)
ret double %b
}
define fp128 @test_v1f128(<1 x fp128> %a) nounwind {
; CHECK-LABEL: test_v1f128:
define half @test_v1f16_neutral(<1 x half> %a) nounwind {
; CHECK-LABEL: test_v1f16_neutral:
; CHECK: @ %bb.0:
; CHECK-NEXT: .save {r11, lr}
; CHECK-NEXT: push {r11, lr}
; CHECK-NEXT: .pad #16
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: mov r12, #0
; CHECK-NEXT: str r12, [sp]
; CHECK-NEXT: str r12, [sp, #4]
; CHECK-NEXT: str r12, [sp, #8]
; CHECK-NEXT: str r12, [sp, #12]
; CHECK-NEXT: bl __addtf3
; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: bl __aeabi_f2h
; CHECK-NEXT: mov r1, #255
; CHECK-NEXT: orr r1, r1, #65280
; CHECK-NEXT: and r0, r0, r1
; CHECK-NEXT: pop {r11, lr}
; CHECK-NEXT: mov pc, lr
%b = call fp128 @llvm.vector.reduce.fadd.f128.v1f128(fp128 zeroinitializer, <1 x fp128> %a)
%b = call half @llvm.vector.reduce.fadd.f16.v1f16(half -0.0, <1 x half> %a)
ret half %b
}
define float @test_v1f32(<1 x float> %a, float %s) nounwind {
; CHECK-LABEL: test_v1f32:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmov s0, r0
; CHECK-NEXT: vmov s2, r1
; CHECK-NEXT: vadd.f32 s0, s2, s0
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: mov pc, lr
%b = call float @llvm.vector.reduce.fadd.f32.v1f32(float %s, <1 x float> %a)
ret float %b
}
define float @test_v1f32_neutral(<1 x float> %a) nounwind {
; CHECK-LABEL: test_v1f32_neutral:
; CHECK: @ %bb.0:
; CHECK-NEXT: mov pc, lr
%b = call float @llvm.vector.reduce.fadd.f32.v1f32(float -0.0, <1 x float> %a)
ret float %b
}
define double @test_v1f64(<1 x double> %a, double %s) nounwind {
; CHECK-LABEL: test_v1f64:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d16, r0, r1
; CHECK-NEXT: vmov d17, r2, r3
; CHECK-NEXT: vadd.f64 d16, d17, d16
; CHECK-NEXT: vmov r0, r1, d16
; CHECK-NEXT: mov pc, lr
%b = call double @llvm.vector.reduce.fadd.f64.v1f64(double %s, <1 x double> %a)
ret double %b
}
define double @test_v1f64_neutral(<1 x double> %a) nounwind {
; CHECK-LABEL: test_v1f64_neutral:
; CHECK: @ %bb.0:
; CHECK-NEXT: mov pc, lr
%b = call double @llvm.vector.reduce.fadd.f64.v1f64(double -0.0, <1 x double> %a)
ret double %b
}
define fp128 @test_v1f128(<1 x fp128> %a, fp128 %s) nounwind {
; CHECK-LABEL: test_v1f128:
; CHECK: @ %bb.0:
; CHECK-NEXT: .save {r4, r5, r11, lr}
; CHECK-NEXT: push {r4, r5, r11, lr}
; CHECK-NEXT: .pad #16
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: ldr r12, [sp, #32]
; CHECK-NEXT: ldr lr, [sp, #36]
; CHECK-NEXT: ldr r4, [sp, #40]
; CHECK-NEXT: ldr r5, [sp, #44]
; CHECK-NEXT: stm sp, {r0, r1, r2, r3}
; CHECK-NEXT: mov r0, r12
; CHECK-NEXT: mov r1, lr
; CHECK-NEXT: mov r2, r4
; CHECK-NEXT: mov r3, r5
; CHECK-NEXT: bl __addtf3
; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: pop {r4, r5, r11, lr}
; CHECK-NEXT: mov pc, lr
%b = call fp128 @llvm.vector.reduce.fadd.f128.v1f128(fp128 %s, <1 x fp128> %a)
ret fp128 %b
}
define float @test_v3f32(<3 x float> %a) nounwind {
define fp128 @test_v1f128_neutral(<1 x fp128> %a) nounwind {
; CHECK-LABEL: test_v1f128_neutral:
; CHECK: @ %bb.0:
; CHECK-NEXT: mov pc, lr
%b = call fp128 @llvm.vector.reduce.fadd.f128.v1f128(fp128 0xL00000000000000008000000000000000, <1 x fp128> %a)
ret fp128 %b
}
define float @test_v3f32(<3 x float> %a, float %s) nounwind {
; CHECK-LABEL: test_v3f32:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d3, r2, r3
; CHECK-NEXT: vldr s0, .LCPI4_0
; CHECK-NEXT: vldr s0, [sp]
; CHECK-NEXT: vmov d2, r0, r1
; CHECK-NEXT: vadd.f32 s0, s4, s0
; CHECK-NEXT: vadd.f32 s0, s0, s4
; CHECK-NEXT: vadd.f32 s0, s0, s5
; CHECK-NEXT: vadd.f32 s0, s0, s6
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: mov pc, lr
; CHECK-NEXT: .p2align 2
; CHECK-NEXT: @ %bb.1:
; CHECK-NEXT: .LCPI4_0:
; CHECK-NEXT: .long 0x00000000 @ float 0
%b = call float @llvm.vector.reduce.fadd.f32.v3f32(float 0.0, <3 x float> %a)
%b = call float @llvm.vector.reduce.fadd.f32.v3f32(float %s, <3 x float> %a)
ret float %b
}
define fp128 @test_v2f128(<2 x fp128> %a) nounwind {
define float @test_v3f32_neutral(<3 x float> %a) nounwind {
; CHECK-LABEL: test_v3f32_neutral:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d1, r2, r3
; CHECK-NEXT: vmov d0, r0, r1
; CHECK-NEXT: vadd.f32 s4, s0, s1
; CHECK-NEXT: vadd.f32 s0, s4, s2
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: mov pc, lr
%b = call float @llvm.vector.reduce.fadd.f32.v3f32(float -0.0, <3 x float> %a)
ret float %b
}
define float @test_v5f32(<5 x float> %a, float %s) nounwind {
; CHECK-LABEL: test_v5f32:
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr s0, [sp, #4]
; CHECK-NEXT: vmov s2, r0
; CHECK-NEXT: vadd.f32 s0, s0, s2
; CHECK-NEXT: vmov s2, r1
; CHECK-NEXT: vadd.f32 s0, s0, s2
; CHECK-NEXT: vmov s2, r2
; CHECK-NEXT: vadd.f32 s0, s0, s2
; CHECK-NEXT: vmov s2, r3
; CHECK-NEXT: vadd.f32 s0, s0, s2
; CHECK-NEXT: vldr s2, [sp]
; CHECK-NEXT: vadd.f32 s0, s0, s2
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: mov pc, lr
%b = call float @llvm.vector.reduce.fadd.f32.v5f32(float %s, <5 x float> %a)
ret float %b
}
define float @test_v5f32_neutral(<5 x float> %a) nounwind {
; CHECK-LABEL: test_v5f32_neutral:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmov s0, r1
; CHECK-NEXT: vmov s2, r0
; CHECK-NEXT: vadd.f32 s0, s2, s0
; CHECK-NEXT: vmov s2, r2
; CHECK-NEXT: vadd.f32 s0, s0, s2
; CHECK-NEXT: vmov s2, r3
; CHECK-NEXT: vadd.f32 s0, s0, s2
; CHECK-NEXT: vldr s2, [sp]
; CHECK-NEXT: vadd.f32 s0, s0, s2
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: mov pc, lr
%b = call float @llvm.vector.reduce.fadd.f32.v5f32(float -0.0, <5 x float> %a)
ret float %b
}
define fp128 @test_v2f128(<2 x fp128> %a, fp128 %s) nounwind {
; CHECK-LABEL: test_v2f128:
; CHECK: @ %bb.0:
; CHECK-NEXT: .save {r4, r5, r11, lr}
; CHECK-NEXT: push {r4, r5, r11, lr}
; CHECK-NEXT: .pad #16
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: mov r12, #0
; CHECK-NEXT: str r12, [sp]
; CHECK-NEXT: str r12, [sp, #4]
; CHECK-NEXT: str r12, [sp, #8]
; CHECK-NEXT: str r12, [sp, #12]
; CHECK-NEXT: ldr r12, [sp, #48]
; CHECK-NEXT: ldr lr, [sp, #52]
; CHECK-NEXT: ldr r4, [sp, #56]
; CHECK-NEXT: ldr r5, [sp, #60]
; CHECK-NEXT: stm sp, {r0, r1, r2, r3}
; CHECK-NEXT: mov r0, r12
; CHECK-NEXT: mov r1, lr
; CHECK-NEXT: mov r2, r4
; CHECK-NEXT: mov r3, r5
; CHECK-NEXT: bl __addtf3
; CHECK-NEXT: ldr r4, [sp, #32]
; CHECK-NEXT: ldr r5, [sp, #40]
; CHECK-NEXT: ldr lr, [sp, #44]
; CHECK-NEXT: ldr r12, [sp, #36]
; CHECK-NEXT: stm sp, {r4, r12}
; CHECK-NEXT: str r5, [sp, #8]
; CHECK-NEXT: str lr, [sp, #12]
; CHECK-NEXT: bl __addtf3
; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: pop {r4, r5, r11, lr}
; CHECK-NEXT: mov pc, lr
%b = call fp128 @llvm.vector.reduce.fadd.f128.v2f128(fp128 %s, <2 x fp128> %a)
ret fp128 %b
}
define fp128 @test_v2f128_neutral(<2 x fp128> %a) nounwind {
; CHECK-LABEL: test_v2f128_neutral:
; CHECK: @ %bb.0:
; CHECK-NEXT: .save {r4, r5, r11, lr}
; CHECK-NEXT: push {r4, r5, r11, lr}
; CHECK-NEXT: .pad #16
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: ldr r12, [sp, #36]
; CHECK-NEXT: ldr lr, [sp, #32]
; CHECK-NEXT: ldr r4, [sp, #40]
@ -124,18 +241,18 @@ define fp128 @test_v2f128(<2 x fp128> %a) nounwind {
; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: pop {r4, r5, r11, lr}
; CHECK-NEXT: mov pc, lr
%b = call fp128 @llvm.vector.reduce.fadd.f128.v2f128(fp128 zeroinitializer, <2 x fp128> %a)
%b = call fp128 @llvm.vector.reduce.fadd.f128.v2f128(fp128 0xL00000000000000008000000000000000, <2 x fp128> %a)
ret fp128 %b
}
define float @test_v16f32(<16 x float> %a) nounwind {
define float @test_v16f32(<16 x float> %a, float %s) nounwind {
; CHECK-LABEL: test_v16f32:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d3, r2, r3
; CHECK-NEXT: vldr s0, .LCPI6_0
; CHECK-NEXT: vldr s0, [sp, #48]
; CHECK-NEXT: vmov d2, r0, r1
; CHECK-NEXT: mov r0, sp
; CHECK-NEXT: vadd.f32 s0, s4, s0
; CHECK-NEXT: vadd.f32 s0, s0, s4
; CHECK-NEXT: vadd.f32 s0, s0, s5
; CHECK-NEXT: vadd.f32 s0, s0, s6
; CHECK-NEXT: vadd.f32 s0, s0, s7
@ -158,10 +275,38 @@ define float @test_v16f32(<16 x float> %a) nounwind {
; CHECK-NEXT: vadd.f32 s0, s0, s7
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: mov pc, lr
; CHECK-NEXT: .p2align 2
; CHECK-NEXT: @ %bb.1:
; CHECK-NEXT: .LCPI6_0:
; CHECK-NEXT: .long 0x00000000 @ float 0
%b = call float @llvm.vector.reduce.fadd.f32.v16f32(float 0.0, <16 x float> %a)
%b = call float @llvm.vector.reduce.fadd.f32.v16f32(float %s, <16 x float> %a)
ret float %b
}
define float @test_v16f32_neutral(<16 x float> %a) nounwind {
; CHECK-LABEL: test_v16f32_neutral:
; CHECK: @ %bb.0:
; CHECK-NEXT: vmov d1, r2, r3
; CHECK-NEXT: vmov d0, r0, r1
; CHECK-NEXT: mov r0, sp
; CHECK-NEXT: vadd.f32 s4, s0, s1
; CHECK-NEXT: vadd.f32 s4, s4, s2
; CHECK-NEXT: vadd.f32 s0, s4, s3
; CHECK-NEXT: vld1.64 {d2, d3}, [r0]
; CHECK-NEXT: add r0, sp, #16
; CHECK-NEXT: vadd.f32 s0, s0, s4
; CHECK-NEXT: vadd.f32 s0, s0, s5
; CHECK-NEXT: vadd.f32 s0, s0, s6
; CHECK-NEXT: vadd.f32 s0, s0, s7
; CHECK-NEXT: vld1.64 {d2, d3}, [r0]
; CHECK-NEXT: add r0, sp, #32
; CHECK-NEXT: vadd.f32 s0, s0, s4
; CHECK-NEXT: vadd.f32 s0, s0, s5
; CHECK-NEXT: vadd.f32 s0, s0, s6
; CHECK-NEXT: vadd.f32 s0, s0, s7
; CHECK-NEXT: vld1.64 {d2, d3}, [r0]
; CHECK-NEXT: vadd.f32 s0, s0, s4
; CHECK-NEXT: vadd.f32 s0, s0, s5
; CHECK-NEXT: vadd.f32 s0, s0, s6
; CHECK-NEXT: vadd.f32 s0, s0, s7
; CHECK-NEXT: vmov r0, s0
; CHECK-NEXT: mov pc, lr
%b = call float @llvm.vector.reduce.fadd.f32.v16f32(float -0.0, <16 x float> %a)
ret float %b
}

View File

@ -338,107 +338,121 @@ define float @test_v16f32(float %a0, <16 x float> %a1) {
define float @test_v2f32_zero(<2 x float> %a0) {
; SSE2-LABEL: test_v2f32_zero:
; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: movaps %xmm0, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
; SSE2-NEXT: addss %xmm0, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,1,1]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: movaps %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v2f32_zero:
; SSE41: # %bb.0:
; SSE41-NEXT: xorps %xmm1, %xmm1
; SSE41-NEXT: addss %xmm0, %xmm1
; SSE41-NEXT: movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_v2f32_zero:
; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm1
; AVX-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
; AVX1-SLOW-LABEL: test_v2f32_zero:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v2f32_zero:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v2f32_zero:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v2f32_zero:
; AVX512: # %bb.0:
; AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm1
; AVX512-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
; AVX512-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%1 = call float @llvm.vector.reduce.fadd.f32.v2f32(float 0.0, <2 x float> %a0)
%1 = call float @llvm.vector.reduce.fadd.f32.v2f32(float -0.0, <2 x float> %a0)
ret float %1
}
define float @test_v4f32_zero(<4 x float> %a0) {
; SSE2-LABEL: test_v4f32_zero:
; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: movaps %xmm0, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[1,1]
; SSE2-NEXT: addss %xmm0, %xmm1
; SSE2-NEXT: movaps %xmm0, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[1,1]
; SSE2-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE2-NEXT: addss %xmm1, %xmm2
; SSE2-NEXT: movaps %xmm0, %xmm1
; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE2-NEXT: addss %xmm2, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: addss %xmm2, %xmm0
; SSE2-NEXT: retq
;
; SSE41-LABEL: test_v4f32_zero:
; SSE41: # %bb.0:
; SSE41-NEXT: xorps %xmm1, %xmm1
; SSE41-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SSE41-NEXT: addss %xmm0, %xmm1
; SSE41-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE41-NEXT: movaps %xmm0, %xmm2
; SSE41-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE41-NEXT: addss %xmm1, %xmm2
; SSE41-NEXT: movaps %xmm0, %xmm1
; SSE41-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE41-NEXT: addss %xmm2, %xmm1
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: addss %xmm2, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_v4f32_zero:
; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm1
; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
; AVX1-SLOW-LABEL: test_v4f32_zero:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm1
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; AVX1-SLOW-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v4f32_zero:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm1
; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX1-FAST-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX1-FAST-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; AVX1-FAST-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v4f32_zero:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm1
; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX2-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; AVX2-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v4f32_zero:
; AVX512: # %bb.0:
; AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm1
; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; AVX512-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX512-NEXT: retq
%1 = call float @llvm.vector.reduce.fadd.f32.v4f32(float 0.0, <4 x float> %a0)
%1 = call float @llvm.vector.reduce.fadd.f32.v4f32(float -0.0, <4 x float> %a0)
ret float %1
}
define float @test_v8f32_zero(<8 x float> %a0) {
; SSE2-LABEL: test_v8f32_zero:
; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm2, %xmm2
; SSE2-NEXT: movaps %xmm0, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[1,1]
; SSE2-NEXT: addss %xmm0, %xmm2
; SSE2-NEXT: movaps %xmm0, %xmm3
; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[1,1]
; SSE2-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1]
; SSE2-NEXT: addss %xmm2, %xmm3
; SSE2-NEXT: movaps %xmm0, %xmm2
; SSE2-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE2-NEXT: addss %xmm3, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; SSE2-NEXT: addss %xmm2, %xmm0
; SSE2-NEXT: addss %xmm3, %xmm0
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: movaps %xmm1, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[1,1]
@ -452,15 +466,13 @@ define float @test_v8f32_zero(<8 x float> %a0) {
;
; SSE41-LABEL: test_v8f32_zero:
; SSE41: # %bb.0:
; SSE41-NEXT: xorps %xmm2, %xmm2
; SSE41-NEXT: movshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; SSE41-NEXT: addss %xmm0, %xmm2
; SSE41-NEXT: movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
; SSE41-NEXT: movaps %xmm0, %xmm3
; SSE41-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1]
; SSE41-NEXT: addss %xmm2, %xmm3
; SSE41-NEXT: movaps %xmm0, %xmm2
; SSE41-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE41-NEXT: addss %xmm3, %xmm2
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; SSE41-NEXT: addss %xmm2, %xmm0
; SSE41-NEXT: addss %xmm3, %xmm0
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: movshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; SSE41-NEXT: addss %xmm2, %xmm0
@ -471,33 +483,66 @@ define float @test_v8f32_zero(<8 x float> %a0) {
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_v8f32_zero:
; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm1
; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,3,3,3]
; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm1
; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
; AVX1-SLOW-LABEL: test_v8f32_zero:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm1
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,3,3,3]
; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-SLOW-NEXT: vaddss %xmm0, %xmm1, %xmm1
; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; AVX1-SLOW-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX1-SLOW-NEXT: vzeroupper
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v8f32_zero:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm1
; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX1-FAST-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX1-FAST-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,3,3,3]
; AVX1-FAST-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-FAST-NEXT: vaddss %xmm0, %xmm1, %xmm1
; AVX1-FAST-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX1-FAST-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX1-FAST-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX1-FAST-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; AVX1-FAST-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX1-FAST-NEXT: vzeroupper
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v8f32_zero:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm1
; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX2-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,3,3,3]
; AVX2-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX2-NEXT: vaddss %xmm0, %xmm1, %xmm1
; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX2-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX2-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; AVX2-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v8f32_zero:
; AVX512: # %bb.0:
; AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm1
; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,3,3,3]
@ -512,23 +557,21 @@ define float @test_v8f32_zero(<8 x float> %a0) {
; AVX512-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call float @llvm.vector.reduce.fadd.f32.v8f32(float 0.0, <8 x float> %a0)
%1 = call float @llvm.vector.reduce.fadd.f32.v8f32(float -0.0, <8 x float> %a0)
ret float %1
}
define float @test_v16f32_zero(<16 x float> %a0) {
; SSE2-LABEL: test_v16f32_zero:
; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm4, %xmm4
; SSE2-NEXT: movaps %xmm0, %xmm4
; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm0[1,1]
; SSE2-NEXT: addss %xmm0, %xmm4
; SSE2-NEXT: movaps %xmm0, %xmm5
; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1],xmm0[1,1]
; SSE2-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm0[1]
; SSE2-NEXT: addss %xmm4, %xmm5
; SSE2-NEXT: movaps %xmm0, %xmm4
; SSE2-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm0[1]
; SSE2-NEXT: addss %xmm5, %xmm4
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; SSE2-NEXT: addss %xmm4, %xmm0
; SSE2-NEXT: addss %xmm5, %xmm0
; SSE2-NEXT: addss %xmm1, %xmm0
; SSE2-NEXT: movaps %xmm1, %xmm4
; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm1[1,1]
@ -560,15 +603,13 @@ define float @test_v16f32_zero(<16 x float> %a0) {
;
; SSE41-LABEL: test_v16f32_zero:
; SSE41: # %bb.0:
; SSE41-NEXT: xorps %xmm4, %xmm4
; SSE41-NEXT: movshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
; SSE41-NEXT: addss %xmm0, %xmm4
; SSE41-NEXT: movshdup {{.*#+}} xmm5 = xmm0[1,1,3,3]
; SSE41-NEXT: movaps %xmm0, %xmm5
; SSE41-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm0[1]
; SSE41-NEXT: addss %xmm4, %xmm5
; SSE41-NEXT: movaps %xmm0, %xmm4
; SSE41-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm0[1]
; SSE41-NEXT: addss %xmm5, %xmm4
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; SSE41-NEXT: addss %xmm4, %xmm0
; SSE41-NEXT: addss %xmm5, %xmm0
; SSE41-NEXT: addss %xmm1, %xmm0
; SSE41-NEXT: movshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
; SSE41-NEXT: addss %xmm4, %xmm0
@ -595,48 +636,111 @@ define float @test_v16f32_zero(<16 x float> %a0) {
; SSE41-NEXT: addss %xmm3, %xmm0
; SSE41-NEXT: retq
;
; AVX-LABEL: test_v16f32_zero:
; AVX: # %bb.0:
; AVX-NEXT: vxorps %xmm2, %xmm2, %xmm2
; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm2
; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2
; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2
; AVX-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[3,3,3,3]
; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-NEXT: vaddss %xmm0, %xmm2, %xmm2
; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2
; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm2
; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; AVX-NEXT: vaddss %xmm0, %xmm2, %xmm0
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,3,3,3]
; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,3,3,3]
; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
; AVX1-SLOW-LABEL: test_v16f32_zero:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm0, %xmm2
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX1-SLOW-NEXT: vaddss %xmm3, %xmm2, %xmm2
; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[3,3,3,3]
; AVX1-SLOW-NEXT: vaddss %xmm3, %xmm2, %xmm2
; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-SLOW-NEXT: vaddss %xmm0, %xmm2, %xmm2
; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX1-SLOW-NEXT: vaddss %xmm3, %xmm2, %xmm2
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX1-SLOW-NEXT: vaddss %xmm3, %xmm2, %xmm2
; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; AVX1-SLOW-NEXT: vaddss %xmm0, %xmm2, %xmm0
; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,3,3,3]
; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX1-SLOW-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,3,3,3]
; AVX1-SLOW-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vzeroupper
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v16f32_zero:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm2
; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX1-FAST-NEXT: vaddss %xmm3, %xmm2, %xmm2
; AVX1-FAST-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[3,3,3,3]
; AVX1-FAST-NEXT: vaddss %xmm3, %xmm2, %xmm2
; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-FAST-NEXT: vaddss %xmm0, %xmm2, %xmm2
; AVX1-FAST-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX1-FAST-NEXT: vaddss %xmm3, %xmm2, %xmm2
; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX1-FAST-NEXT: vaddss %xmm3, %xmm2, %xmm2
; AVX1-FAST-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; AVX1-FAST-NEXT: vaddss %xmm0, %xmm2, %xmm0
; AVX1-FAST-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX1-FAST-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX1-FAST-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX1-FAST-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,3,3,3]
; AVX1-FAST-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX1-FAST-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-FAST-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX1-FAST-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX1-FAST-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX1-FAST-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,3,3,3]
; AVX1-FAST-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vzeroupper
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v16f32_zero:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX2-NEXT: vaddss %xmm2, %xmm0, %xmm2
; AVX2-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX2-NEXT: vaddss %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpermilps {{.*#+}} xmm3 = xmm0[3,3,3,3]
; AVX2-NEXT: vaddss %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX2-NEXT: vaddss %xmm0, %xmm2, %xmm2
; AVX2-NEXT: vmovshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
; AVX2-NEXT: vaddss %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX2-NEXT: vaddss %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,3,3]
; AVX2-NEXT: vaddss %xmm0, %xmm2, %xmm0
; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX2-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX2-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[3,3,3,3]
; AVX2-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vmovshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
; AVX2-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX2-NEXT: vaddss %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,3,3,3]
; AVX2-NEXT: vaddss %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v16f32_zero:
; AVX512: # %bb.0:
; AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; AVX512-NEXT: vaddss %xmm1, %xmm0, %xmm1
; AVX512-NEXT: vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512-NEXT: vaddss %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpermilps {{.*#+}} xmm2 = xmm0[3,3,3,3]
@ -667,7 +771,7 @@ define float @test_v16f32_zero(<16 x float> %a0) {
; AVX512-NEXT: vaddss %xmm0, %xmm1, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call float @llvm.vector.reduce.fadd.f32.v16f32(float 0.0, <16 x float> %a0)
%1 = call float @llvm.vector.reduce.fadd.f32.v16f32(float -0.0, <16 x float> %a0)
ret float %1
}
@ -1240,116 +1344,171 @@ define double @test_v16f64(double %a0, <16 x double> %a1) {
define double @test_v2f64_zero(<2 x double> %a0) {
; SSE-LABEL: test_v2f64_zero:
; SSE: # %bb.0:
; SSE-NEXT: xorpd %xmm1, %xmm1
; SSE-NEXT: movapd %xmm0, %xmm1
; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; SSE-NEXT: addsd %xmm0, %xmm1
; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: movapd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_v2f64_zero:
; AVX: # %bb.0:
; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
; AVX1-SLOW-LABEL: test_v2f64_zero:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v2f64_zero:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm0
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v2f64_zero:
; AVX2: # %bb.0:
; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v2f64_zero:
; AVX512: # %bb.0:
; AVX512-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%1 = call double @llvm.vector.reduce.fadd.f64.v2f64(double 0.0, <2 x double> %a0)
%1 = call double @llvm.vector.reduce.fadd.f64.v2f64(double -0.0, <2 x double> %a0)
ret double %1
}
define double @test_v4f64_zero(<4 x double> %a0) {
; SSE-LABEL: test_v4f64_zero:
; SSE: # %bb.0:
; SSE-NEXT: xorpd %xmm2, %xmm2
; SSE-NEXT: movapd %xmm0, %xmm2
; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
; SSE-NEXT: addsd %xmm0, %xmm2
; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: addsd %xmm2, %xmm0
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: addsd %xmm1, %xmm2
; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: addsd %xmm1, %xmm2
; SSE-NEXT: movapd %xmm2, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_v4f64_zero:
; AVX: # %bb.0:
; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm1
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm1
; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
; AVX1-SLOW-LABEL: test_v4f64_zero:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm1
; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-SLOW-NEXT: vaddsd %xmm0, %xmm1, %xmm1
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-SLOW-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX1-SLOW-NEXT: vzeroupper
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v4f64_zero:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm1
; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-FAST-NEXT: vaddsd %xmm0, %xmm1, %xmm1
; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-FAST-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX1-FAST-NEXT: vzeroupper
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v4f64_zero:
; AVX2: # %bb.0:
; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm1
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX2-NEXT: vaddsd %xmm0, %xmm1, %xmm1
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v4f64_zero:
; AVX512: # %bb.0:
; AVX512-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX512-NEXT: vaddsd %xmm0, %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX512-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call double @llvm.vector.reduce.fadd.f64.v4f64(double 0.0, <4 x double> %a0)
%1 = call double @llvm.vector.reduce.fadd.f64.v4f64(double -0.0, <4 x double> %a0)
ret double %1
}
define double @test_v8f64_zero(<8 x double> %a0) {
; SSE-LABEL: test_v8f64_zero:
; SSE: # %bb.0:
; SSE-NEXT: xorpd %xmm4, %xmm4
; SSE-NEXT: movapd %xmm0, %xmm4
; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm0[1]
; SSE-NEXT: addsd %xmm0, %xmm4
; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: addsd %xmm4, %xmm0
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: addsd %xmm1, %xmm4
; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1]
; SSE-NEXT: addsd %xmm1, %xmm0
; SSE-NEXT: addsd %xmm2, %xmm0
; SSE-NEXT: addsd %xmm1, %xmm4
; SSE-NEXT: addsd %xmm2, %xmm4
; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1,1]
; SSE-NEXT: addsd %xmm2, %xmm0
; SSE-NEXT: addsd %xmm3, %xmm0
; SSE-NEXT: addsd %xmm2, %xmm4
; SSE-NEXT: addsd %xmm3, %xmm4
; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1,1]
; SSE-NEXT: addsd %xmm3, %xmm0
; SSE-NEXT: addsd %xmm3, %xmm4
; SSE-NEXT: movapd %xmm4, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_v8f64_zero:
; AVX: # %bb.0:
; AVX-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; AVX-NEXT: vaddsd %xmm2, %xmm0, %xmm2
; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX-NEXT: vaddsd %xmm3, %xmm2, %xmm2
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-NEXT: vaddsd %xmm0, %xmm2, %xmm2
; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-NEXT: vaddsd %xmm0, %xmm2, %xmm0
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
; AVX1-SLOW-LABEL: test_v8f64_zero:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX1-SLOW-NEXT: vaddsd %xmm2, %xmm0, %xmm2
; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-SLOW-NEXT: vaddsd %xmm0, %xmm2, %xmm2
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-SLOW-NEXT: vaddsd %xmm0, %xmm2, %xmm0
; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX1-SLOW-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vzeroupper
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v8f64_zero:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm2
; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-FAST-NEXT: vaddsd %xmm0, %xmm2, %xmm2
; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-FAST-NEXT: vaddsd %xmm0, %xmm2, %xmm0
; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX1-FAST-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX1-FAST-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vzeroupper
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v8f64_zero:
; AVX2: # %bb.0:
; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX2-NEXT: vaddsd %xmm2, %xmm0, %xmm2
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX2-NEXT: vaddsd %xmm0, %xmm2, %xmm2
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vaddsd %xmm0, %xmm2, %xmm0
; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpermilpd {{.*#+}} xmm2 = xmm1[1,0]
; AVX2-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v8f64_zero:
; AVX512: # %bb.0:
; AVX512-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX512-NEXT: vaddsd %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
@ -1364,15 +1523,14 @@ define double @test_v8f64_zero(<8 x double> %a0) {
; AVX512-NEXT: vaddsd %xmm0, %xmm1, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call double @llvm.vector.reduce.fadd.f64.v8f64(double 0.0, <8 x double> %a0)
%1 = call double @llvm.vector.reduce.fadd.f64.v8f64(double -0.0, <8 x double> %a0)
ret double %1
}
define double @test_v16f64_zero(<16 x double> %a0) {
; SSE-LABEL: test_v16f64_zero:
; SSE: # %bb.0:
; SSE-NEXT: xorpd %xmm8, %xmm8
; SSE-NEXT: addsd %xmm0, %xmm8
; SSE-NEXT: movapd %xmm0, %xmm8
; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
; SSE-NEXT: addsd %xmm8, %xmm0
; SSE-NEXT: addsd %xmm1, %xmm0
@ -1398,46 +1556,105 @@ define double @test_v16f64_zero(<16 x double> %a0) {
; SSE-NEXT: addsd %xmm7, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: test_v16f64_zero:
; AVX: # %bb.0:
; AVX-NEXT: vxorpd %xmm4, %xmm4, %xmm4
; AVX-NEXT: vaddsd %xmm4, %xmm0, %xmm4
; AVX-NEXT: vpermilpd {{.*#+}} xmm5 = xmm0[1,0]
; AVX-NEXT: vaddsd %xmm5, %xmm4, %xmm4
; AVX-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX-NEXT: vaddsd %xmm0, %xmm4, %xmm4
; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX-NEXT: vaddsd %xmm0, %xmm4, %xmm0
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm4 = xmm1[1,0]
; AVX-NEXT: vaddsd %xmm4, %xmm0, %xmm0
; AVX-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vextractf128 $1, %ymm2, %xmm1
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vaddsd %xmm3, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm3[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vextractf128 $1, %ymm3, %xmm1
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
; AVX1-SLOW-LABEL: test_v16f64_zero:
; AVX1-SLOW: # %bb.0:
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm4 = xmm0[1,0]
; AVX1-SLOW-NEXT: vaddsd %xmm4, %xmm0, %xmm4
; AVX1-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-SLOW-NEXT: vaddsd %xmm0, %xmm4, %xmm4
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-SLOW-NEXT: vaddsd %xmm0, %xmm4, %xmm0
; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm4 = xmm1[1,0]
; AVX1-SLOW-NEXT: vaddsd %xmm4, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vextractf128 $1, %ymm2, %xmm1
; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vaddsd %xmm3, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm3[1,0]
; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vextractf128 $1, %ymm3, %xmm1
; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX1-SLOW-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-SLOW-NEXT: vzeroupper
; AVX1-SLOW-NEXT: retq
;
; AVX1-FAST-LABEL: test_v16f64_zero:
; AVX1-FAST: # %bb.0:
; AVX1-FAST-NEXT: vhaddpd %xmm0, %xmm0, %xmm4
; AVX1-FAST-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX1-FAST-NEXT: vaddsd %xmm0, %xmm4, %xmm4
; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX1-FAST-NEXT: vaddsd %xmm0, %xmm4, %xmm0
; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm4 = xmm1[1,0]
; AVX1-FAST-NEXT: vaddsd %xmm4, %xmm0, %xmm0
; AVX1-FAST-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vextractf128 $1, %ymm2, %xmm1
; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vaddsd %xmm3, %xmm0, %xmm0
; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm1 = xmm3[1,0]
; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vextractf128 $1, %ymm3, %xmm1
; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX1-FAST-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX1-FAST-NEXT: vzeroupper
; AVX1-FAST-NEXT: retq
;
; AVX2-LABEL: test_v16f64_zero:
; AVX2: # %bb.0:
; AVX2-NEXT: vpermilpd {{.*#+}} xmm4 = xmm0[1,0]
; AVX2-NEXT: vaddsd %xmm4, %xmm0, %xmm4
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm0
; AVX2-NEXT: vaddsd %xmm0, %xmm4, %xmm4
; AVX2-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
; AVX2-NEXT: vaddsd %xmm0, %xmm4, %xmm0
; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpermilpd {{.*#+}} xmm4 = xmm1[1,0]
; AVX2-NEXT: vaddsd %xmm4, %xmm0, %xmm0
; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm1
; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vaddsd %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm2[1,0]
; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vextractf128 $1, %ymm2, %xmm1
; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vaddsd %xmm3, %xmm0, %xmm0
; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm3[1,0]
; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vextractf128 $1, %ymm3, %xmm1
; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
; AVX2-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: test_v16f64_zero:
; AVX512: # %bb.0:
; AVX512-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; AVX512-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512-NEXT: vaddsd %xmm2, %xmm0, %xmm2
; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
; AVX512-NEXT: vaddsd %xmm3, %xmm2, %xmm2
; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX512-NEXT: vaddsd %xmm3, %xmm2, %xmm2
; AVX512-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
@ -1467,7 +1684,7 @@ define double @test_v16f64_zero(<16 x double> %a0) {
; AVX512-NEXT: vaddsd %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%1 = call double @llvm.vector.reduce.fadd.f64.v16f64(double 0.0, <16 x double> %a0)
%1 = call double @llvm.vector.reduce.fadd.f64.v16f64(double -0.0, <16 x double> %a0)
ret double %1
}