2018-09-04 06:11:47 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-- | FileCheck %s
|
|
|
|
|
|
|
|
declare float @llvm.pow.f32(float, float)
|
|
|
|
declare <4 x float> @llvm.pow.v4f32(<4 x float>, <4 x float>)
|
|
|
|
|
|
|
|
declare double @llvm.pow.f64(double, double)
|
|
|
|
declare <2 x double> @llvm.pow.v2f64(<2 x double>, <2 x double>)
|
|
|
|
|
[DAGCombiner] try to convert pow(x, 1/3) to cbrt(x)
This is a follow-up suggested in D51630 and originally proposed as an IR transform in D49040.
Copying the motivational statement by @evandro from that patch:
"This transformation helps some benchmarks in SPEC CPU2000 and CPU2006, such as 188.ammp,
447.dealII, 453.povray, and especially 300.twolf, as well as some proprietary benchmarks.
Otherwise, no regressions on x86-64 or A64."
I'm proposing to add only the minimum support for a DAG node here. Since we don't have an
LLVM IR intrinsic for cbrt, and there are no other DAG ways to create a FCBRT node yet, I
don't think we need to worry about DAG builder, legalization, a strict variant, etc. We
should be able to expand as needed when adding more functionality/transforms. For reference,
these are transform suggestions currently listed in SimplifyLibCalls.cpp:
// * cbrt(expN(X)) -> expN(x/3)
// * cbrt(sqrt(x)) -> pow(x,1/6)
// * cbrt(cbrt(x)) -> pow(x,1/9)
Also, given that we bail out on long double for now, there should not be any logical
differences between platforms (unless there's some platform out there that has pow()
but not cbrt()).
Differential Revision: https://reviews.llvm.org/D51753
llvm-svn: 342348
2018-09-17 00:50:26 +08:00
|
|
|
declare x86_fp80 @llvm.pow.f80(x86_fp80, x86_fp80)
|
|
|
|
|
2018-09-04 06:11:47 +08:00
|
|
|
define float @pow_f32_one_fourth_fmf(float %x) nounwind {
|
|
|
|
; CHECK-LABEL: pow_f32_one_fourth_fmf:
|
|
|
|
; CHECK: # %bb.0:
|
2018-09-06 01:01:56 +08:00
|
|
|
; CHECK-NEXT: rsqrtss %xmm0, %xmm1
|
|
|
|
; CHECK-NEXT: movaps %xmm0, %xmm2
|
|
|
|
; CHECK-NEXT: mulss %xmm1, %xmm2
|
|
|
|
; CHECK-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
|
|
|
|
; CHECK-NEXT: movaps %xmm2, %xmm4
|
|
|
|
; CHECK-NEXT: mulss %xmm3, %xmm4
|
|
|
|
; CHECK-NEXT: mulss %xmm1, %xmm2
|
2018-09-04 06:11:47 +08:00
|
|
|
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
2018-09-06 01:01:56 +08:00
|
|
|
; CHECK-NEXT: addss %xmm1, %xmm2
|
|
|
|
; CHECK-NEXT: mulss %xmm4, %xmm2
|
|
|
|
; CHECK-NEXT: xorps %xmm4, %xmm4
|
|
|
|
; CHECK-NEXT: cmpeqss %xmm4, %xmm0
|
|
|
|
; CHECK-NEXT: andnps %xmm2, %xmm0
|
|
|
|
; CHECK-NEXT: xorps %xmm2, %xmm2
|
|
|
|
; CHECK-NEXT: rsqrtss %xmm0, %xmm2
|
|
|
|
; CHECK-NEXT: movaps %xmm0, %xmm5
|
|
|
|
; CHECK-NEXT: mulss %xmm2, %xmm5
|
|
|
|
; CHECK-NEXT: mulss %xmm5, %xmm3
|
|
|
|
; CHECK-NEXT: mulss %xmm2, %xmm5
|
|
|
|
; CHECK-NEXT: addss %xmm1, %xmm5
|
|
|
|
; CHECK-NEXT: mulss %xmm3, %xmm5
|
|
|
|
; CHECK-NEXT: cmpeqss %xmm4, %xmm0
|
|
|
|
; CHECK-NEXT: andnps %xmm5, %xmm0
|
|
|
|
; CHECK-NEXT: retq
|
2018-09-04 06:11:47 +08:00
|
|
|
%r = call nsz ninf afn float @llvm.pow.f32(float %x, float 2.5e-01)
|
|
|
|
ret float %r
|
|
|
|
}
|
|
|
|
|
|
|
|
define double @pow_f64_one_fourth_fmf(double %x) nounwind {
|
|
|
|
; CHECK-LABEL: pow_f64_one_fourth_fmf:
|
|
|
|
; CHECK: # %bb.0:
|
2018-09-06 01:01:56 +08:00
|
|
|
; CHECK-NEXT: sqrtsd %xmm0, %xmm0
|
|
|
|
; CHECK-NEXT: sqrtsd %xmm0, %xmm0
|
|
|
|
; CHECK-NEXT: retq
|
2018-09-04 06:11:47 +08:00
|
|
|
%r = call nsz ninf afn double @llvm.pow.f64(double %x, double 2.5e-01)
|
|
|
|
ret double %r
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @pow_v4f32_one_fourth_fmf(<4 x float> %x) nounwind {
|
|
|
|
; CHECK-LABEL: pow_v4f32_one_fourth_fmf:
|
|
|
|
; CHECK: # %bb.0:
|
2018-09-06 01:01:56 +08:00
|
|
|
; CHECK-NEXT: rsqrtps %xmm0, %xmm1
|
|
|
|
; CHECK-NEXT: movaps %xmm0, %xmm2
|
|
|
|
; CHECK-NEXT: mulps %xmm1, %xmm2
|
2018-10-29 12:52:04 +08:00
|
|
|
; CHECK-NEXT: movaps {{.*#+}} xmm3 = [-5.0E-1,-5.0E-1,-5.0E-1,-5.0E-1]
|
2018-09-06 01:01:56 +08:00
|
|
|
; CHECK-NEXT: movaps %xmm2, %xmm4
|
|
|
|
; CHECK-NEXT: mulps %xmm3, %xmm4
|
|
|
|
; CHECK-NEXT: mulps %xmm1, %xmm2
|
2018-10-29 12:52:04 +08:00
|
|
|
; CHECK-NEXT: movaps {{.*#+}} xmm1 = [-3.0E+0,-3.0E+0,-3.0E+0,-3.0E+0]
|
2018-09-06 01:01:56 +08:00
|
|
|
; CHECK-NEXT: addps %xmm1, %xmm2
|
|
|
|
; CHECK-NEXT: mulps %xmm4, %xmm2
|
|
|
|
; CHECK-NEXT: xorps %xmm4, %xmm4
|
|
|
|
; CHECK-NEXT: cmpneqps %xmm4, %xmm0
|
|
|
|
; CHECK-NEXT: andps %xmm2, %xmm0
|
|
|
|
; CHECK-NEXT: rsqrtps %xmm0, %xmm2
|
|
|
|
; CHECK-NEXT: movaps %xmm0, %xmm5
|
|
|
|
; CHECK-NEXT: mulps %xmm2, %xmm5
|
|
|
|
; CHECK-NEXT: mulps %xmm5, %xmm3
|
|
|
|
; CHECK-NEXT: mulps %xmm2, %xmm5
|
|
|
|
; CHECK-NEXT: addps %xmm1, %xmm5
|
|
|
|
; CHECK-NEXT: mulps %xmm3, %xmm5
|
|
|
|
; CHECK-NEXT: cmpneqps %xmm4, %xmm0
|
|
|
|
; CHECK-NEXT: andps %xmm5, %xmm0
|
2018-09-04 06:11:47 +08:00
|
|
|
; CHECK-NEXT: retq
|
|
|
|
%r = call fast <4 x float> @llvm.pow.v4f32(<4 x float> %x, <4 x float> <float 2.5e-1, float 2.5e-1, float 2.5e-01, float 2.5e-01>)
|
|
|
|
ret <4 x float> %r
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x double> @pow_v2f64_one_fourth_fmf(<2 x double> %x) nounwind {
|
|
|
|
; CHECK-LABEL: pow_v2f64_one_fourth_fmf:
|
|
|
|
; CHECK: # %bb.0:
|
2018-09-06 01:01:56 +08:00
|
|
|
; CHECK-NEXT: sqrtpd %xmm0, %xmm0
|
|
|
|
; CHECK-NEXT: sqrtpd %xmm0, %xmm0
|
2018-09-04 06:11:47 +08:00
|
|
|
; CHECK-NEXT: retq
|
|
|
|
%r = call fast <2 x double> @llvm.pow.v2f64(<2 x double> %x, <2 x double> <double 2.5e-1, double 2.5e-1>)
|
|
|
|
ret <2 x double> %r
|
|
|
|
}
|
|
|
|
|
|
|
|
define float @pow_f32_one_fourth_not_enough_fmf(float %x) nounwind {
|
|
|
|
; CHECK-LABEL: pow_f32_one_fourth_not_enough_fmf:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
|
|
|
; CHECK-NEXT: jmp powf # TAILCALL
|
|
|
|
%r = call afn ninf float @llvm.pow.f32(float %x, float 2.5e-01)
|
|
|
|
ret float %r
|
|
|
|
}
|
|
|
|
|
|
|
|
define double @pow_f64_one_fourth_not_enough_fmf(double %x) nounwind {
|
|
|
|
; CHECK-LABEL: pow_f64_one_fourth_not_enough_fmf:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
|
|
|
|
; CHECK-NEXT: jmp pow # TAILCALL
|
|
|
|
%r = call nsz ninf double @llvm.pow.f64(double %x, double 2.5e-01)
|
|
|
|
ret double %r
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x float> @pow_v4f32_one_fourth_not_enough_fmf(<4 x float> %x) nounwind {
|
|
|
|
; CHECK-LABEL: pow_v4f32_one_fourth_not_enough_fmf:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: subq $56, %rsp
|
|
|
|
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
|
|
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
|
|
|
|
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
|
|
|
; CHECK-NEXT: callq powf
|
|
|
|
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
|
|
|
|
; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
|
|
; CHECK-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
|
|
|
|
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
|
|
|
; CHECK-NEXT: callq powf
|
|
|
|
; CHECK-NEXT: unpcklps (%rsp), %xmm0 # 16-byte Folded Reload
|
|
|
|
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
|
|
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
|
|
|
|
; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
|
|
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
|
|
|
; CHECK-NEXT: callq powf
|
|
|
|
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
|
|
; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
|
|
|
|
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
|
|
|
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
|
|
|
; CHECK-NEXT: callq powf
|
|
|
|
; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
|
|
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
|
|
|
|
; CHECK-NEXT: unpcklpd (%rsp), %xmm1 # 16-byte Folded Reload
|
|
|
|
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
|
|
|
|
; CHECK-NEXT: movaps %xmm1, %xmm0
|
|
|
|
; CHECK-NEXT: addq $56, %rsp
|
|
|
|
; CHECK-NEXT: retq
|
|
|
|
%r = call afn nsz <4 x float> @llvm.pow.v4f32(<4 x float> %x, <4 x float> <float 2.5e-1, float 2.5e-1, float 2.5e-01, float 2.5e-01>)
|
|
|
|
ret <4 x float> %r
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x double> @pow_v2f64_one_fourth_not_enough_fmf(<2 x double> %x) nounwind {
|
|
|
|
; CHECK-LABEL: pow_v2f64_one_fourth_not_enough_fmf:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: subq $40, %rsp
|
|
|
|
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
|
|
|
|
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
|
|
|
|
; CHECK-NEXT: callq pow
|
|
|
|
; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
|
|
|
; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
|
|
|
|
; CHECK-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
|
|
|
|
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
|
|
|
|
; CHECK-NEXT: callq pow
|
|
|
|
; CHECK-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
|
|
|
|
; CHECK-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm0[0]
|
|
|
|
; CHECK-NEXT: movaps %xmm1, %xmm0
|
|
|
|
; CHECK-NEXT: addq $40, %rsp
|
|
|
|
; CHECK-NEXT: retq
|
|
|
|
%r = call nsz nnan reassoc <2 x double> @llvm.pow.v2f64(<2 x double> %x, <2 x double> <double 2.5e-1, double 2.5e-1>)
|
|
|
|
ret <2 x double> %r
|
|
|
|
}
|
|
|
|
|
2018-09-07 02:42:55 +08:00
|
|
|
define float @pow_f32_one_third_fmf(float %x) nounwind {
|
|
|
|
; CHECK-LABEL: pow_f32_one_third_fmf:
|
|
|
|
; CHECK: # %bb.0:
|
[DAGCombiner] try to convert pow(x, 1/3) to cbrt(x)
This is a follow-up suggested in D51630 and originally proposed as an IR transform in D49040.
Copying the motivational statement by @evandro from that patch:
"This transformation helps some benchmarks in SPEC CPU2000 and CPU2006, such as 188.ammp,
447.dealII, 453.povray, and especially 300.twolf, as well as some proprietary benchmarks.
Otherwise, no regressions on x86-64 or A64."
I'm proposing to add only the minimum support for a DAG node here. Since we don't have an
LLVM IR intrinsic for cbrt, and there are no other DAG ways to create a FCBRT node yet, I
don't think we need to worry about DAG builder, legalization, a strict variant, etc. We
should be able to expand as needed when adding more functionality/transforms. For reference,
these are transform suggestions currently listed in SimplifyLibCalls.cpp:
// * cbrt(expN(X)) -> expN(x/3)
// * cbrt(sqrt(x)) -> pow(x,1/6)
// * cbrt(cbrt(x)) -> pow(x,1/9)
Also, given that we bail out on long double for now, there should not be any logical
differences between platforms (unless there's some platform out there that has pow()
but not cbrt()).
Differential Revision: https://reviews.llvm.org/D51753
llvm-svn: 342348
2018-09-17 00:50:26 +08:00
|
|
|
; CHECK-NEXT: jmp cbrtf # TAILCALL
|
2018-09-07 02:42:55 +08:00
|
|
|
%one = uitofp i32 1 to float
|
|
|
|
%three = uitofp i32 3 to float
|
|
|
|
%exp = fdiv float %one, %three
|
|
|
|
%r = call nsz nnan ninf afn float @llvm.pow.f32(float %x, float %exp)
|
|
|
|
ret float %r
|
|
|
|
}
|
|
|
|
|
|
|
|
define double @pow_f64_one_third_fmf(double %x) nounwind {
|
|
|
|
; CHECK-LABEL: pow_f64_one_third_fmf:
|
|
|
|
; CHECK: # %bb.0:
|
[DAGCombiner] try to convert pow(x, 1/3) to cbrt(x)
This is a follow-up suggested in D51630 and originally proposed as an IR transform in D49040.
Copying the motivational statement by @evandro from that patch:
"This transformation helps some benchmarks in SPEC CPU2000 and CPU2006, such as 188.ammp,
447.dealII, 453.povray, and especially 300.twolf, as well as some proprietary benchmarks.
Otherwise, no regressions on x86-64 or A64."
I'm proposing to add only the minimum support for a DAG node here. Since we don't have an
LLVM IR intrinsic for cbrt, and there are no other DAG ways to create a FCBRT node yet, I
don't think we need to worry about DAG builder, legalization, a strict variant, etc. We
should be able to expand as needed when adding more functionality/transforms. For reference,
these are transform suggestions currently listed in SimplifyLibCalls.cpp:
// * cbrt(expN(X)) -> expN(x/3)
// * cbrt(sqrt(x)) -> pow(x,1/6)
// * cbrt(cbrt(x)) -> pow(x,1/9)
Also, given that we bail out on long double for now, there should not be any logical
differences between platforms (unless there's some platform out there that has pow()
but not cbrt()).
Differential Revision: https://reviews.llvm.org/D51753
llvm-svn: 342348
2018-09-17 00:50:26 +08:00
|
|
|
; CHECK-NEXT: jmp cbrt # TAILCALL
|
2018-09-07 02:42:55 +08:00
|
|
|
%one = uitofp i32 1 to double
|
|
|
|
%three = uitofp i32 3 to double
|
|
|
|
%exp = fdiv double %one, %three
|
|
|
|
%r = call nsz nnan ninf afn double @llvm.pow.f64(double %x, double %exp)
|
|
|
|
ret double %r
|
|
|
|
}
|
|
|
|
|
[DAGCombiner] try to convert pow(x, 1/3) to cbrt(x)
This is a follow-up suggested in D51630 and originally proposed as an IR transform in D49040.
Copying the motivational statement by @evandro from that patch:
"This transformation helps some benchmarks in SPEC CPU2000 and CPU2006, such as 188.ammp,
447.dealII, 453.povray, and especially 300.twolf, as well as some proprietary benchmarks.
Otherwise, no regressions on x86-64 or A64."
I'm proposing to add only the minimum support for a DAG node here. Since we don't have an
LLVM IR intrinsic for cbrt, and there are no other DAG ways to create a FCBRT node yet, I
don't think we need to worry about DAG builder, legalization, a strict variant, etc. We
should be able to expand as needed when adding more functionality/transforms. For reference,
these are transform suggestions currently listed in SimplifyLibCalls.cpp:
// * cbrt(expN(X)) -> expN(x/3)
// * cbrt(sqrt(x)) -> pow(x,1/6)
// * cbrt(cbrt(x)) -> pow(x,1/9)
Also, given that we bail out on long double for now, there should not be any logical
differences between platforms (unless there's some platform out there that has pow()
but not cbrt()).
Differential Revision: https://reviews.llvm.org/D51753
llvm-svn: 342348
2018-09-17 00:50:26 +08:00
|
|
|
; TODO: We could turn this into cbrtl, but currently we only handle float/double types.
|
|
|
|
|
|
|
|
define x86_fp80 @pow_f80_one_third_fmf(x86_fp80 %x) nounwind {
|
|
|
|
; CHECK-LABEL: pow_f80_one_third_fmf:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: subq $40, %rsp
|
|
|
|
; CHECK-NEXT: fldt {{[0-9]+}}(%rsp)
|
|
|
|
; CHECK-NEXT: fldt {{.*}}(%rip)
|
|
|
|
; CHECK-NEXT: fstpt {{[0-9]+}}(%rsp)
|
|
|
|
; CHECK-NEXT: fstpt (%rsp)
|
|
|
|
; CHECK-NEXT: callq powl
|
|
|
|
; CHECK-NEXT: addq $40, %rsp
|
|
|
|
; CHECK-NEXT: retq
|
|
|
|
%one = uitofp i32 1 to x86_fp80
|
|
|
|
%three = uitofp i32 3 to x86_fp80
|
|
|
|
%exp = fdiv x86_fp80 %one, %three
|
|
|
|
%r = call nsz nnan ninf afn x86_fp80 @llvm.pow.f80(x86_fp80 %x, x86_fp80 %exp)
|
|
|
|
ret x86_fp80 %r
|
|
|
|
}
|
|
|
|
|
|
|
|
; We might want to allow this. The exact hex value for 1/3 as a double is 0x3fd5555555555555.
|
|
|
|
|
|
|
|
define double @pow_f64_not_exactly_one_third_fmf(double %x) nounwind {
|
|
|
|
; CHECK-LABEL: pow_f64_not_exactly_one_third_fmf:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
|
|
|
|
; CHECK-NEXT: jmp pow # TAILCALL
|
|
|
|
%r = call nsz nnan ninf afn double @llvm.pow.f64(double %x, double 0x3fd5555555555556)
|
|
|
|
ret double %r
|
|
|
|
}
|
|
|
|
|
|
|
|
; We require all 4 of nsz, ninf, nnan, afn.
|
|
|
|
|
|
|
|
define double @pow_f64_not_enough_fmf(double %x) nounwind {
|
|
|
|
; CHECK-LABEL: pow_f64_not_enough_fmf:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
|
|
|
|
; CHECK-NEXT: jmp pow # TAILCALL
|
|
|
|
%r = call nsz ninf afn double @llvm.pow.f64(double %x, double 0x3fd5555555555555)
|
|
|
|
ret double %r
|
|
|
|
}
|
|
|
|
|