llvm-project/llvm/test/Transforms/InstCombine/fsub.ll

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

787 lines
25 KiB
LLVM
Raw Normal View History

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -instcombine -S | FileCheck %s
; PR4374
define float @test1(float %x, float %y) {
; CHECK-LABEL: @test1(
; CHECK-NEXT: [[T1:%.*]] = fsub float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[T2:%.*]] = fneg float [[T1]]
; CHECK-NEXT: ret float [[T2]]
;
%t1 = fsub float %x, %y
%t2 = fsub float -0.0, %t1
ret float %t2
}
define float @test1_unary(float %x, float %y) {
; CHECK-LABEL: @test1_unary(
; CHECK-NEXT: [[T1:%.*]] = fsub float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[T2:%.*]] = fneg float [[T1]]
; CHECK-NEXT: ret float [[T2]]
;
%t1 = fsub float %x, %y
%t2 = fneg float %t1
ret float %t2
}
; Can't do anything with the test above because -0.0 - 0.0 = -0.0, but if we have nsz:
; -(X - Y) --> Y - X
define float @neg_sub_nsz(float %x, float %y) {
; CHECK-LABEL: @neg_sub_nsz(
; CHECK-NEXT: [[TMP1:%.*]] = fsub nsz float [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret float [[TMP1]]
;
%t1 = fsub float %x, %y
%t2 = fsub nsz float -0.0, %t1
ret float %t2
}
define float @unary_neg_sub_nsz(float %x, float %y) {
; CHECK-LABEL: @unary_neg_sub_nsz(
; CHECK-NEXT: [[T2:%.*]] = fsub nsz float [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: ret float [[T2]]
;
%t1 = fsub float %x, %y
%t2 = fneg nsz float %t1
ret float %t2
}
; If the subtract has another use, we don't do the transform (even though it
; doesn't increase the IR instruction count) because we assume that fneg is
; easier to analyze and generally cheaper than generic fsub.
declare void @use(float)
declare void @use2(float, double)
define float @neg_sub_nsz_extra_use(float %x, float %y) {
; CHECK-LABEL: @neg_sub_nsz_extra_use(
; CHECK-NEXT: [[T1:%.*]] = fsub float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[T2:%.*]] = fneg nsz float [[T1]]
; CHECK-NEXT: call void @use(float [[T1]])
; CHECK-NEXT: ret float [[T2]]
;
%t1 = fsub float %x, %y
%t2 = fsub nsz float -0.0, %t1
call void @use(float %t1)
ret float %t2
}
define float @unary_neg_sub_nsz_extra_use(float %x, float %y) {
; CHECK-LABEL: @unary_neg_sub_nsz_extra_use(
; CHECK-NEXT: [[T1:%.*]] = fsub float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[T2:%.*]] = fneg nsz float [[T1]]
; CHECK-NEXT: call void @use(float [[T1]])
; CHECK-NEXT: ret float [[T2]]
;
%t1 = fsub float %x, %y
%t2 = fneg nsz float %t1
call void @use(float %t1)
ret float %t2
}
; With nsz: Z - (X - Y) --> Z + (Y - X)
define float @sub_sub_nsz(float %x, float %y, float %z) {
; CHECK-LABEL: @sub_sub_nsz(
; CHECK-NEXT: [[TMP1:%.*]] = fsub nsz float [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: [[T2:%.*]] = fadd nsz float [[TMP1]], [[Z:%.*]]
; CHECK-NEXT: ret float [[T2]]
;
%t1 = fsub float %x, %y
%t2 = fsub nsz float %z, %t1
ret float %t2
}
; With nsz and reassoc: Y - ((X * 5) + Y) --> X * -5
define float @sub_add_neg_x(float %x, float %y) {
; CHECK-LABEL: @sub_add_neg_x(
; CHECK-NEXT: [[R:%.*]] = fmul reassoc nsz float [[X:%.*]], -5.000000e+00
; CHECK-NEXT: ret float [[R]]
;
%mul = fmul float %x, 5.000000e+00
%add = fadd float %mul, %y
%r = fsub nsz reassoc float %y, %add
ret float %r
}
; Same as above: if 'Z' is not -0.0, swap fsub operands and convert to fadd.
define float @sub_sub_known_not_negzero(float %x, float %y) {
; CHECK-LABEL: @sub_sub_known_not_negzero(
; CHECK-NEXT: [[TMP1:%.*]] = fsub float [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: [[T2:%.*]] = fadd float [[TMP1]], 4.200000e+01
; CHECK-NEXT: ret float [[T2]]
;
%t1 = fsub float %x, %y
%t2 = fsub float 42.0, %t1
ret float %t2
}
; <rdar://problem/7530098>
define double @test2(double %x, double %y) {
; CHECK-LABEL: @test2(
; CHECK-NEXT: [[T1:%.*]] = fadd double [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[T2:%.*]] = fsub double [[X]], [[T1]]
; CHECK-NEXT: ret double [[T2]]
;
%t1 = fadd double %x, %y
%t2 = fsub double %x, %t1
ret double %t2
}
; X - C --> X + (-C)
define float @constant_op1(float %x, float %y) {
; CHECK-LABEL: @constant_op1(
; CHECK-NEXT: [[R:%.*]] = fadd float [[X:%.*]], -4.200000e+01
; CHECK-NEXT: ret float [[R]]
;
%r = fsub float %x, 42.0
ret float %r
}
define <2 x float> @constant_op1_vec(<2 x float> %x, <2 x float> %y) {
; CHECK-LABEL: @constant_op1_vec(
; CHECK-NEXT: [[R:%.*]] = fadd <2 x float> [[X:%.*]], <float -4.200000e+01, float 4.200000e+01>
; CHECK-NEXT: ret <2 x float> [[R]]
;
%r = fsub <2 x float> %x, <float 42.0, float -42.0>
ret <2 x float> %r
}
define <2 x float> @constant_op1_vec_undef(<2 x float> %x, <2 x float> %y) {
; CHECK-LABEL: @constant_op1_vec_undef(
; CHECK-NEXT: [[R:%.*]] = fadd <2 x float> [[X:%.*]], <float undef, float 4.200000e+01>
; CHECK-NEXT: ret <2 x float> [[R]]
;
%r = fsub <2 x float> %x, <float undef, float -42.0>
ret <2 x float> %r
}
; X - (-Y) --> X + Y
define float @neg_op1(float %x, float %y) {
; CHECK-LABEL: @neg_op1(
; CHECK-NEXT: [[R:%.*]] = fadd float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret float [[R]]
;
%negy = fsub float -0.0, %y
%r = fsub float %x, %negy
ret float %r
}
define float @unary_neg_op1(float %x, float %y) {
; CHECK-LABEL: @unary_neg_op1(
; CHECK-NEXT: [[R:%.*]] = fadd float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret float [[R]]
;
%negy = fneg float %y
%r = fsub float %x, %negy
ret float %r
}
define <2 x float> @neg_op1_vec(<2 x float> %x, <2 x float> %y) {
; CHECK-LABEL: @neg_op1_vec(
; CHECK-NEXT: [[R:%.*]] = fadd <2 x float> [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret <2 x float> [[R]]
;
%negy = fsub <2 x float> <float -0.0, float -0.0>, %y
%r = fsub <2 x float> %x, %negy
ret <2 x float> %r
}
define <2 x float> @unary_neg_op1_vec(<2 x float> %x, <2 x float> %y) {
; CHECK-LABEL: @unary_neg_op1_vec(
; CHECK-NEXT: [[R:%.*]] = fadd <2 x float> [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret <2 x float> [[R]]
;
%negy = fneg <2 x float> %y
%r = fsub <2 x float> %x, %negy
ret <2 x float> %r
}
define <2 x float> @neg_op1_vec_undef(<2 x float> %x, <2 x float> %y) {
; CHECK-LABEL: @neg_op1_vec_undef(
; CHECK-NEXT: [[R:%.*]] = fadd <2 x float> [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret <2 x float> [[R]]
;
%negy = fsub <2 x float> <float -0.0, float undef>, %y
%r = fsub <2 x float> %x, %negy
ret <2 x float> %r
}
; Similar to above - but look through fpext/fptrunc casts to find the fneg.
define double @neg_ext_op1(float %a, double %b) {
; CHECK-LABEL: @neg_ext_op1(
; CHECK-NEXT: [[TMP1:%.*]] = fpext float [[A:%.*]] to double
; CHECK-NEXT: [[T3:%.*]] = fadd double [[TMP1]], [[B:%.*]]
; CHECK-NEXT: ret double [[T3]]
;
%t1 = fsub float -0.0, %a
%t2 = fpext float %t1 to double
%t3 = fsub double %b, %t2
ret double %t3
}
define double @unary_neg_ext_op1(float %a, double %b) {
; CHECK-LABEL: @unary_neg_ext_op1(
; CHECK-NEXT: [[TMP1:%.*]] = fpext float [[A:%.*]] to double
; CHECK-NEXT: [[T3:%.*]] = fadd double [[TMP1]], [[B:%.*]]
; CHECK-NEXT: ret double [[T3]]
;
%t1 = fneg float %a
%t2 = fpext float %t1 to double
%t3 = fsub double %b, %t2
ret double %t3
}
; Verify that vectors work too.
define <2 x float> @neg_trunc_op1(<2 x double> %a, <2 x float> %b) {
; CHECK-LABEL: @neg_trunc_op1(
; CHECK-NEXT: [[TMP1:%.*]] = fptrunc <2 x double> [[A:%.*]] to <2 x float>
; CHECK-NEXT: [[T3:%.*]] = fadd <2 x float> [[TMP1]], [[B:%.*]]
; CHECK-NEXT: ret <2 x float> [[T3]]
;
%t1 = fsub <2 x double> <double -0.0, double -0.0>, %a
%t2 = fptrunc <2 x double> %t1 to <2 x float>
%t3 = fsub <2 x float> %b, %t2
ret <2 x float> %t3
}
define <2 x float> @unary_neg_trunc_op1(<2 x double> %a, <2 x float> %b) {
; CHECK-LABEL: @unary_neg_trunc_op1(
; CHECK-NEXT: [[TMP1:%.*]] = fptrunc <2 x double> [[A:%.*]] to <2 x float>
; CHECK-NEXT: [[T3:%.*]] = fadd <2 x float> [[TMP1]], [[B:%.*]]
; CHECK-NEXT: ret <2 x float> [[T3]]
;
%t1 = fneg <2 x double> %a
%t2 = fptrunc <2 x double> %t1 to <2 x float>
%t3 = fsub <2 x float> %b, %t2
ret <2 x float> %t3
}
; No FMF needed, but they should propagate to the fadd.
define double @neg_ext_op1_fast(float %a, double %b) {
; CHECK-LABEL: @neg_ext_op1_fast(
; CHECK-NEXT: [[TMP1:%.*]] = fpext float [[A:%.*]] to double
; CHECK-NEXT: [[T3:%.*]] = fadd fast double [[TMP1]], [[B:%.*]]
; CHECK-NEXT: ret double [[T3]]
;
%t1 = fsub float -0.0, %a
%t2 = fpext float %t1 to double
%t3 = fsub fast double %b, %t2
ret double %t3
}
define double @unary_neg_ext_op1_fast(float %a, double %b) {
; CHECK-LABEL: @unary_neg_ext_op1_fast(
; CHECK-NEXT: [[TMP1:%.*]] = fpext float [[A:%.*]] to double
; CHECK-NEXT: [[T3:%.*]] = fadd fast double [[TMP1]], [[B:%.*]]
; CHECK-NEXT: ret double [[T3]]
;
%t1 = fneg float %a
%t2 = fpext float %t1 to double
%t3 = fsub fast double %b, %t2
ret double %t3
}
; Extra use should prevent the transform.
define float @neg_ext_op1_extra_use(half %a, float %b) {
; CHECK-LABEL: @neg_ext_op1_extra_use(
; CHECK-NEXT: [[T1:%.*]] = fneg half [[A:%.*]]
; CHECK-NEXT: [[T2:%.*]] = fpext half [[T1]] to float
; CHECK-NEXT: [[T3:%.*]] = fsub float [[B:%.*]], [[T2]]
; CHECK-NEXT: call void @use(float [[T2]])
; CHECK-NEXT: ret float [[T3]]
;
%t1 = fsub half -0.0, %a
%t2 = fpext half %t1 to float
%t3 = fsub float %b, %t2
call void @use(float %t2)
ret float %t3
}
define float @unary_neg_ext_op1_extra_use(half %a, float %b) {
; CHECK-LABEL: @unary_neg_ext_op1_extra_use(
; CHECK-NEXT: [[T1:%.*]] = fneg half [[A:%.*]]
; CHECK-NEXT: [[T2:%.*]] = fpext half [[T1]] to float
; CHECK-NEXT: [[T3:%.*]] = fsub float [[B:%.*]], [[T2]]
; CHECK-NEXT: call void @use(float [[T2]])
; CHECK-NEXT: ret float [[T3]]
;
%t1 = fneg half %a
%t2 = fpext half %t1 to float
%t3 = fsub float %b, %t2
call void @use(float %t2)
ret float %t3
}
; One-use fptrunc is always hoisted above fneg, so the corresponding
; multi-use bug for fptrunc isn't visible with a fold starting from
; the last fsub.
define float @neg_trunc_op1_extra_use(double %a, float %b) {
; CHECK-LABEL: @neg_trunc_op1_extra_use(
; CHECK-NEXT: [[TMP1:%.*]] = fptrunc double [[A:%.*]] to float
; CHECK-NEXT: [[T2:%.*]] = fneg float [[TMP1]]
; CHECK-NEXT: [[T3:%.*]] = fadd float [[TMP1]], [[B:%.*]]
; CHECK-NEXT: call void @use(float [[T2]])
; CHECK-NEXT: ret float [[T3]]
;
%t1 = fsub double -0.0, %a
%t2 = fptrunc double %t1 to float
%t3 = fsub float %b, %t2
call void @use(float %t2)
ret float %t3
}
define float @unary_neg_trunc_op1_extra_use(double %a, float %b) {
; CHECK-LABEL: @unary_neg_trunc_op1_extra_use(
; CHECK-NEXT: [[TMP1:%.*]] = fptrunc double [[A:%.*]] to float
; CHECK-NEXT: [[T2:%.*]] = fneg float [[TMP1]]
; CHECK-NEXT: [[T3:%.*]] = fadd float [[TMP1]], [[B:%.*]]
; CHECK-NEXT: call void @use(float [[T2]])
; CHECK-NEXT: ret float [[T3]]
;
%t1 = fneg double %a
%t2 = fptrunc double %t1 to float
%t3 = fsub float %b, %t2
call void @use(float %t2)
ret float %t3
}
; Extra uses should prevent the transform.
define float @neg_trunc_op1_extra_uses(double %a, float %b) {
; CHECK-LABEL: @neg_trunc_op1_extra_uses(
; CHECK-NEXT: [[T1:%.*]] = fneg double [[A:%.*]]
; CHECK-NEXT: [[T2:%.*]] = fptrunc double [[T1]] to float
; CHECK-NEXT: [[T3:%.*]] = fsub float [[B:%.*]], [[T2]]
; CHECK-NEXT: call void @use2(float [[T2]], double [[T1]])
; CHECK-NEXT: ret float [[T3]]
;
%t1 = fsub double -0.0, %a
%t2 = fptrunc double %t1 to float
%t3 = fsub float %b, %t2
call void @use2(float %t2, double %t1)
ret float %t3
}
define float @unary_neg_trunc_op1_extra_uses(double %a, float %b) {
; CHECK-LABEL: @unary_neg_trunc_op1_extra_uses(
; CHECK-NEXT: [[T1:%.*]] = fneg double [[A:%.*]]
; CHECK-NEXT: [[T2:%.*]] = fptrunc double [[T1]] to float
; CHECK-NEXT: [[T3:%.*]] = fsub float [[B:%.*]], [[T2]]
; CHECK-NEXT: call void @use2(float [[T2]], double [[T1]])
; CHECK-NEXT: ret float [[T3]]
;
%t1 = fneg double %a
%t2 = fptrunc double %t1 to float
%t3 = fsub float %b, %t2
call void @use2(float %t2, double %t1)
ret float %t3
}
; Don't negate a constant expression to form fadd and induce infinite looping:
; https://bugs.llvm.org/show_bug.cgi?id=37605
@b = external global i16, align 1
define float @PR37605(float %conv) {
; CHECK-LABEL: @PR37605(
; CHECK-NEXT: [[SUB:%.*]] = fsub float [[CONV:%.*]], bitcast (i32 ptrtoint (i16* @b to i32) to float)
; CHECK-NEXT: ret float [[SUB]]
;
%sub = fsub float %conv, bitcast (i32 ptrtoint (i16* @b to i32) to float)
ret float %sub
}
define double @fsub_fdiv_fneg1(double %x, double %y, double %z) {
; CHECK-LABEL: @fsub_fdiv_fneg1(
; CHECK-NEXT: [[TMP1:%.*]] = fdiv double [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[R:%.*]] = fadd double [[TMP1]], [[Z:%.*]]
; CHECK-NEXT: ret double [[R]]
;
%neg = fsub double -0.000000e+00, %x
%div = fdiv double %neg, %y
%r = fsub double %z, %div
ret double %r
}
define <2 x double> @fsub_fdiv_fneg2(<2 x double> %x, <2 x double> %y, <2 x double> %z) {
; CHECK-LABEL: @fsub_fdiv_fneg2(
; CHECK-NEXT: [[TMP1:%.*]] = fdiv <2 x double> [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: [[R:%.*]] = fadd <2 x double> [[TMP1]], [[Z:%.*]]
; CHECK-NEXT: ret <2 x double> [[R]]
;
%neg = fsub <2 x double> <double -0.0, double -0.0>, %x
%div = fdiv <2 x double> %y, %neg
%r = fsub <2 x double> %z, %div
ret <2 x double> %r
}
define double @fsub_fmul_fneg1(double %x, double %y, double %z) {
; CHECK-LABEL: @fsub_fmul_fneg1(
; CHECK-NEXT: [[TMP1:%.*]] = fmul double [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[R:%.*]] = fadd double [[TMP1]], [[Z:%.*]]
; CHECK-NEXT: ret double [[R]]
;
%neg = fsub double -0.000000e+00, %x
%mul = fmul double %neg, %y
%r = fsub double %z, %mul
ret double %r
}
define double @fsub_fmul_fneg2(double %x, double %y, double %z) {
; CHECK-LABEL: @fsub_fmul_fneg2(
; CHECK-NEXT: [[TMP1:%.*]] = fmul double [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[R:%.*]] = fadd double [[TMP1]], [[Z:%.*]]
; CHECK-NEXT: ret double [[R]]
;
%neg = fsub double -0.000000e+00, %x
%mul = fmul double %y, %neg
%r = fsub double %z, %mul
ret double %r
}
define float @fsub_fdiv_fneg1_extra_use(float %x, float %y, float %z) {
; CHECK-LABEL: @fsub_fdiv_fneg1_extra_use(
; CHECK-NEXT: [[NEG:%.*]] = fneg float [[X:%.*]]
[InstCombine] canonicalize fneg before fmul/fdiv Reverse the canonicalization of fneg relative to fmul/fdiv. That makes it easier to implement the transforms (and possibly other fneg transforms) in 1 place because we can always start the pattern match from fneg (either the legacy binop or the new unop). There's a secondary practical benefit seen in PR21914 and PR42681: https://bugs.llvm.org/show_bug.cgi?id=21914 https://bugs.llvm.org/show_bug.cgi?id=42681 ...hoisting fneg rather than sinking seems to play nicer with LICM in IR (although this change may expose analysis holes in the other direction). 1. The instcombine test changes show the expected neutral IR diffs from reversing the order. 2. The reassociation tests show that we were missing an optimization opportunity to fold away fneg-of-fneg. My reading of IEEE-754 says that all of these transforms are allowed (regardless of binop/unop fneg version) because: "For all other operations [besides copy/abs/negate/copysign], this standard does not specify the sign bit of a NaN result." In all of these transforms, we always have some other binop (fadd/fsub/fmul/fdiv), so we are free to flip the sign bit of a potential intermediate NaN operand. (If that interpretation is wrong, then we must already have a bug in the existing transforms?) 3. The clang tests shouldn't exist as-is, but that's effectively a revert of rL367149 (the test broke with an extension of the pre-existing fneg canonicalization in rL367146). Differential Revision: https://reviews.llvm.org/D65399 llvm-svn: 367447
2019-08-01 00:53:22 +08:00
; CHECK-NEXT: [[DIV:%.*]] = fdiv float [[NEG]], [[Y:%.*]]
; CHECK-NEXT: call void @use(float [[DIV]])
[InstCombine] canonicalize fneg before fmul/fdiv Reverse the canonicalization of fneg relative to fmul/fdiv. That makes it easier to implement the transforms (and possibly other fneg transforms) in 1 place because we can always start the pattern match from fneg (either the legacy binop or the new unop). There's a secondary practical benefit seen in PR21914 and PR42681: https://bugs.llvm.org/show_bug.cgi?id=21914 https://bugs.llvm.org/show_bug.cgi?id=42681 ...hoisting fneg rather than sinking seems to play nicer with LICM in IR (although this change may expose analysis holes in the other direction). 1. The instcombine test changes show the expected neutral IR diffs from reversing the order. 2. The reassociation tests show that we were missing an optimization opportunity to fold away fneg-of-fneg. My reading of IEEE-754 says that all of these transforms are allowed (regardless of binop/unop fneg version) because: "For all other operations [besides copy/abs/negate/copysign], this standard does not specify the sign bit of a NaN result." In all of these transforms, we always have some other binop (fadd/fsub/fmul/fdiv), so we are free to flip the sign bit of a potential intermediate NaN operand. (If that interpretation is wrong, then we must already have a bug in the existing transforms?) 3. The clang tests shouldn't exist as-is, but that's effectively a revert of rL367149 (the test broke with an extension of the pre-existing fneg canonicalization in rL367146). Differential Revision: https://reviews.llvm.org/D65399 llvm-svn: 367447
2019-08-01 00:53:22 +08:00
; CHECK-NEXT: [[R:%.*]] = fsub float [[Z:%.*]], [[DIV]]
; CHECK-NEXT: ret float [[R]]
;
%neg = fsub float -0.000000e+00, %x
%div = fdiv float %neg, %y
call void @use(float %div)
%r = fsub float %z, %div
ret float %r
}
define float @fsub_fdiv_fneg2_extra_use(float %x, float %y, float %z) {
; CHECK-LABEL: @fsub_fdiv_fneg2_extra_use(
; CHECK-NEXT: [[NEG:%.*]] = fneg float [[X:%.*]]
[InstCombine] canonicalize fneg before fmul/fdiv Reverse the canonicalization of fneg relative to fmul/fdiv. That makes it easier to implement the transforms (and possibly other fneg transforms) in 1 place because we can always start the pattern match from fneg (either the legacy binop or the new unop). There's a secondary practical benefit seen in PR21914 and PR42681: https://bugs.llvm.org/show_bug.cgi?id=21914 https://bugs.llvm.org/show_bug.cgi?id=42681 ...hoisting fneg rather than sinking seems to play nicer with LICM in IR (although this change may expose analysis holes in the other direction). 1. The instcombine test changes show the expected neutral IR diffs from reversing the order. 2. The reassociation tests show that we were missing an optimization opportunity to fold away fneg-of-fneg. My reading of IEEE-754 says that all of these transforms are allowed (regardless of binop/unop fneg version) because: "For all other operations [besides copy/abs/negate/copysign], this standard does not specify the sign bit of a NaN result." In all of these transforms, we always have some other binop (fadd/fsub/fmul/fdiv), so we are free to flip the sign bit of a potential intermediate NaN operand. (If that interpretation is wrong, then we must already have a bug in the existing transforms?) 3. The clang tests shouldn't exist as-is, but that's effectively a revert of rL367149 (the test broke with an extension of the pre-existing fneg canonicalization in rL367146). Differential Revision: https://reviews.llvm.org/D65399 llvm-svn: 367447
2019-08-01 00:53:22 +08:00
; CHECK-NEXT: [[DIV:%.*]] = fdiv float [[Y:%.*]], [[NEG]]
; CHECK-NEXT: call void @use(float [[DIV]])
[InstCombine] canonicalize fneg before fmul/fdiv Reverse the canonicalization of fneg relative to fmul/fdiv. That makes it easier to implement the transforms (and possibly other fneg transforms) in 1 place because we can always start the pattern match from fneg (either the legacy binop or the new unop). There's a secondary practical benefit seen in PR21914 and PR42681: https://bugs.llvm.org/show_bug.cgi?id=21914 https://bugs.llvm.org/show_bug.cgi?id=42681 ...hoisting fneg rather than sinking seems to play nicer with LICM in IR (although this change may expose analysis holes in the other direction). 1. The instcombine test changes show the expected neutral IR diffs from reversing the order. 2. The reassociation tests show that we were missing an optimization opportunity to fold away fneg-of-fneg. My reading of IEEE-754 says that all of these transforms are allowed (regardless of binop/unop fneg version) because: "For all other operations [besides copy/abs/negate/copysign], this standard does not specify the sign bit of a NaN result." In all of these transforms, we always have some other binop (fadd/fsub/fmul/fdiv), so we are free to flip the sign bit of a potential intermediate NaN operand. (If that interpretation is wrong, then we must already have a bug in the existing transforms?) 3. The clang tests shouldn't exist as-is, but that's effectively a revert of rL367149 (the test broke with an extension of the pre-existing fneg canonicalization in rL367146). Differential Revision: https://reviews.llvm.org/D65399 llvm-svn: 367447
2019-08-01 00:53:22 +08:00
; CHECK-NEXT: [[R:%.*]] = fsub float [[Z:%.*]], [[DIV]]
; CHECK-NEXT: ret float [[R]]
;
%neg = fsub float -0.000000e+00, %x
%div = fdiv float %y, %neg
call void @use(float %div)
%r = fsub float %z, %div
ret float %r
}
declare void @use_vec(<2 x float>)
define <2 x float> @fsub_fmul_fneg1_extra_use(<2 x float> %x, <2 x float> %y, <2 x float> %z) {
; CHECK-LABEL: @fsub_fmul_fneg1_extra_use(
; CHECK-NEXT: [[NEG:%.*]] = fneg <2 x float> [[X:%.*]]
[InstCombine] canonicalize fneg before fmul/fdiv Reverse the canonicalization of fneg relative to fmul/fdiv. That makes it easier to implement the transforms (and possibly other fneg transforms) in 1 place because we can always start the pattern match from fneg (either the legacy binop or the new unop). There's a secondary practical benefit seen in PR21914 and PR42681: https://bugs.llvm.org/show_bug.cgi?id=21914 https://bugs.llvm.org/show_bug.cgi?id=42681 ...hoisting fneg rather than sinking seems to play nicer with LICM in IR (although this change may expose analysis holes in the other direction). 1. The instcombine test changes show the expected neutral IR diffs from reversing the order. 2. The reassociation tests show that we were missing an optimization opportunity to fold away fneg-of-fneg. My reading of IEEE-754 says that all of these transforms are allowed (regardless of binop/unop fneg version) because: "For all other operations [besides copy/abs/negate/copysign], this standard does not specify the sign bit of a NaN result." In all of these transforms, we always have some other binop (fadd/fsub/fmul/fdiv), so we are free to flip the sign bit of a potential intermediate NaN operand. (If that interpretation is wrong, then we must already have a bug in the existing transforms?) 3. The clang tests shouldn't exist as-is, but that's effectively a revert of rL367149 (the test broke with an extension of the pre-existing fneg canonicalization in rL367146). Differential Revision: https://reviews.llvm.org/D65399 llvm-svn: 367447
2019-08-01 00:53:22 +08:00
; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[NEG]], [[Y:%.*]]
; CHECK-NEXT: call void @use_vec(<2 x float> [[MUL]])
[InstCombine] canonicalize fneg before fmul/fdiv Reverse the canonicalization of fneg relative to fmul/fdiv. That makes it easier to implement the transforms (and possibly other fneg transforms) in 1 place because we can always start the pattern match from fneg (either the legacy binop or the new unop). There's a secondary practical benefit seen in PR21914 and PR42681: https://bugs.llvm.org/show_bug.cgi?id=21914 https://bugs.llvm.org/show_bug.cgi?id=42681 ...hoisting fneg rather than sinking seems to play nicer with LICM in IR (although this change may expose analysis holes in the other direction). 1. The instcombine test changes show the expected neutral IR diffs from reversing the order. 2. The reassociation tests show that we were missing an optimization opportunity to fold away fneg-of-fneg. My reading of IEEE-754 says that all of these transforms are allowed (regardless of binop/unop fneg version) because: "For all other operations [besides copy/abs/negate/copysign], this standard does not specify the sign bit of a NaN result." In all of these transforms, we always have some other binop (fadd/fsub/fmul/fdiv), so we are free to flip the sign bit of a potential intermediate NaN operand. (If that interpretation is wrong, then we must already have a bug in the existing transforms?) 3. The clang tests shouldn't exist as-is, but that's effectively a revert of rL367149 (the test broke with an extension of the pre-existing fneg canonicalization in rL367146). Differential Revision: https://reviews.llvm.org/D65399 llvm-svn: 367447
2019-08-01 00:53:22 +08:00
; CHECK-NEXT: [[R:%.*]] = fsub <2 x float> [[Z:%.*]], [[MUL]]
; CHECK-NEXT: ret <2 x float> [[R]]
;
%neg = fsub <2 x float> <float -0.0, float -0.0>, %x
%mul = fmul <2 x float> %neg, %y
call void @use_vec(<2 x float> %mul)
%r = fsub <2 x float> %z, %mul
ret <2 x float> %r
}
define float @fsub_fmul_fneg2_extra_use(float %x, float %y, float %z) {
; CHECK-LABEL: @fsub_fmul_fneg2_extra_use(
; CHECK-NEXT: [[NEG:%.*]] = fneg float [[X:%.*]]
[InstCombine] canonicalize fneg before fmul/fdiv Reverse the canonicalization of fneg relative to fmul/fdiv. That makes it easier to implement the transforms (and possibly other fneg transforms) in 1 place because we can always start the pattern match from fneg (either the legacy binop or the new unop). There's a secondary practical benefit seen in PR21914 and PR42681: https://bugs.llvm.org/show_bug.cgi?id=21914 https://bugs.llvm.org/show_bug.cgi?id=42681 ...hoisting fneg rather than sinking seems to play nicer with LICM in IR (although this change may expose analysis holes in the other direction). 1. The instcombine test changes show the expected neutral IR diffs from reversing the order. 2. The reassociation tests show that we were missing an optimization opportunity to fold away fneg-of-fneg. My reading of IEEE-754 says that all of these transforms are allowed (regardless of binop/unop fneg version) because: "For all other operations [besides copy/abs/negate/copysign], this standard does not specify the sign bit of a NaN result." In all of these transforms, we always have some other binop (fadd/fsub/fmul/fdiv), so we are free to flip the sign bit of a potential intermediate NaN operand. (If that interpretation is wrong, then we must already have a bug in the existing transforms?) 3. The clang tests shouldn't exist as-is, but that's effectively a revert of rL367149 (the test broke with an extension of the pre-existing fneg canonicalization in rL367146). Differential Revision: https://reviews.llvm.org/D65399 llvm-svn: 367447
2019-08-01 00:53:22 +08:00
; CHECK-NEXT: [[MUL:%.*]] = fmul float [[NEG]], [[Y:%.*]]
; CHECK-NEXT: call void @use(float [[MUL]])
[InstCombine] canonicalize fneg before fmul/fdiv Reverse the canonicalization of fneg relative to fmul/fdiv. That makes it easier to implement the transforms (and possibly other fneg transforms) in 1 place because we can always start the pattern match from fneg (either the legacy binop or the new unop). There's a secondary practical benefit seen in PR21914 and PR42681: https://bugs.llvm.org/show_bug.cgi?id=21914 https://bugs.llvm.org/show_bug.cgi?id=42681 ...hoisting fneg rather than sinking seems to play nicer with LICM in IR (although this change may expose analysis holes in the other direction). 1. The instcombine test changes show the expected neutral IR diffs from reversing the order. 2. The reassociation tests show that we were missing an optimization opportunity to fold away fneg-of-fneg. My reading of IEEE-754 says that all of these transforms are allowed (regardless of binop/unop fneg version) because: "For all other operations [besides copy/abs/negate/copysign], this standard does not specify the sign bit of a NaN result." In all of these transforms, we always have some other binop (fadd/fsub/fmul/fdiv), so we are free to flip the sign bit of a potential intermediate NaN operand. (If that interpretation is wrong, then we must already have a bug in the existing transforms?) 3. The clang tests shouldn't exist as-is, but that's effectively a revert of rL367149 (the test broke with an extension of the pre-existing fneg canonicalization in rL367146). Differential Revision: https://reviews.llvm.org/D65399 llvm-svn: 367447
2019-08-01 00:53:22 +08:00
; CHECK-NEXT: [[R:%.*]] = fsub float [[Z:%.*]], [[MUL]]
; CHECK-NEXT: ret float [[R]]
;
%neg = fsub float -0.000000e+00, %x
%mul = fmul float %y, %neg
call void @use(float %mul)
%r = fsub float %z, %mul
ret float %r
}
define float @fsub_fdiv_fneg1_extra_use2(float %x, float %y, float %z) {
; CHECK-LABEL: @fsub_fdiv_fneg1_extra_use2(
; CHECK-NEXT: [[NEG:%.*]] = fneg float [[X:%.*]]
; CHECK-NEXT: call void @use(float [[NEG]])
; CHECK-NEXT: [[TMP1:%.*]] = fdiv float [[X]], [[Y:%.*]]
; CHECK-NEXT: [[R:%.*]] = fadd float [[TMP1]], [[Z:%.*]]
; CHECK-NEXT: ret float [[R]]
;
%neg = fsub float -0.000000e+00, %x
call void @use(float %neg)
%div = fdiv float %neg, %y
%r = fsub float %z, %div
ret float %r
}
define float @fsub_fdiv_fneg2_extra_use2(float %x, float %y, float %z) {
; CHECK-LABEL: @fsub_fdiv_fneg2_extra_use2(
; CHECK-NEXT: [[NEG:%.*]] = fneg float [[X:%.*]]
; CHECK-NEXT: call void @use(float [[NEG]])
; CHECK-NEXT: [[TMP1:%.*]] = fdiv float [[Y:%.*]], [[X]]
; CHECK-NEXT: [[R:%.*]] = fadd float [[TMP1]], [[Z:%.*]]
; CHECK-NEXT: ret float [[R]]
;
%neg = fsub float -0.000000e+00, %x
call void @use(float %neg)
%div = fdiv float %y, %neg
%r = fsub float %z, %div
ret float %r
}
define <2 x float> @fsub_fmul_fneg1_extra_use2(<2 x float> %x, <2 x float> %y, <2 x float> %z) {
; CHECK-LABEL: @fsub_fmul_fneg1_extra_use2(
; CHECK-NEXT: [[NEG:%.*]] = fneg <2 x float> [[X:%.*]]
; CHECK-NEXT: call void @use_vec(<2 x float> [[NEG]])
; CHECK-NEXT: [[TMP1:%.*]] = fmul <2 x float> [[X]], [[Y:%.*]]
; CHECK-NEXT: [[R:%.*]] = fadd <2 x float> [[TMP1]], [[Z:%.*]]
; CHECK-NEXT: ret <2 x float> [[R]]
;
%neg = fsub <2 x float> <float -0.0, float -0.0>, %x
call void @use_vec(<2 x float> %neg)
%mul = fmul <2 x float> %neg, %y
%r = fsub <2 x float> %z, %mul
ret <2 x float> %r
}
define float @fsub_fmul_fneg2_extra_use2(float %x, float %y, float %z) {
; CHECK-LABEL: @fsub_fmul_fneg2_extra_use2(
; CHECK-NEXT: [[NEG:%.*]] = fneg float [[X:%.*]]
; CHECK-NEXT: call void @use(float [[NEG]])
; CHECK-NEXT: [[TMP1:%.*]] = fmul float [[X]], [[Y:%.*]]
; CHECK-NEXT: [[R:%.*]] = fadd float [[TMP1]], [[Z:%.*]]
; CHECK-NEXT: ret float [[R]]
;
%neg = fsub float -0.000000e+00, %x
call void @use(float %neg)
%mul = fmul float %y, %neg
%r = fsub float %z, %mul
ret float %r
}
define float @fsub_fdiv_fneg1_extra_use3(float %x, float %y, float %z) {
; CHECK-LABEL: @fsub_fdiv_fneg1_extra_use3(
; CHECK-NEXT: [[NEG:%.*]] = fneg float [[X:%.*]]
; CHECK-NEXT: call void @use(float [[NEG]])
; CHECK-NEXT: [[DIV:%.*]] = fdiv float [[NEG]], [[Y:%.*]]
; CHECK-NEXT: call void @use(float [[DIV]])
; CHECK-NEXT: [[R:%.*]] = fsub float [[Z:%.*]], [[DIV]]
; CHECK-NEXT: ret float [[R]]
;
%neg = fsub float -0.000000e+00, %x
call void @use(float %neg)
%div = fdiv float %neg, %y
call void @use(float %div)
%r = fsub float %z, %div
ret float %r
}
define float @fsub_fdiv_fneg2_extra_use3(float %x, float %y, float %z) {
; CHECK-LABEL: @fsub_fdiv_fneg2_extra_use3(
; CHECK-NEXT: [[NEG:%.*]] = fneg float [[X:%.*]]
; CHECK-NEXT: call void @use(float [[NEG]])
; CHECK-NEXT: [[DIV:%.*]] = fdiv float [[Y:%.*]], [[NEG]]
; CHECK-NEXT: call void @use(float [[DIV]])
; CHECK-NEXT: [[R:%.*]] = fsub float [[Z:%.*]], [[DIV]]
; CHECK-NEXT: ret float [[R]]
;
%neg = fsub float -0.000000e+00, %x
call void @use(float %neg)
%div = fdiv float %y, %neg
call void @use(float %div)
%r = fsub float %z, %div
ret float %r
}
define <2 x float> @fsub_fmul_fneg1_extra_use3(<2 x float> %x, <2 x float> %y, <2 x float> %z) {
; CHECK-LABEL: @fsub_fmul_fneg1_extra_use3(
; CHECK-NEXT: [[NEG:%.*]] = fneg <2 x float> [[X:%.*]]
; CHECK-NEXT: call void @use_vec(<2 x float> [[NEG]])
; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[NEG]], [[Y:%.*]]
; CHECK-NEXT: call void @use_vec(<2 x float> [[MUL]])
; CHECK-NEXT: [[R:%.*]] = fsub <2 x float> [[Z:%.*]], [[MUL]]
; CHECK-NEXT: ret <2 x float> [[R]]
;
%neg = fsub <2 x float> <float -0.0, float -0.0>, %x
call void @use_vec(<2 x float> %neg)
%mul = fmul <2 x float> %neg, %y
call void @use_vec(<2 x float> %mul)
%r = fsub <2 x float> %z, %mul
ret <2 x float> %r
}
define float @fsub_fmul_fneg2_extra_use3(float %x, float %y, float %z) {
; CHECK-LABEL: @fsub_fmul_fneg2_extra_use3(
; CHECK-NEXT: [[NEG:%.*]] = fneg float [[X:%.*]]
; CHECK-NEXT: call void @use(float [[NEG]])
; CHECK-NEXT: [[MUL:%.*]] = fmul float [[NEG]], [[Y:%.*]]
; CHECK-NEXT: call void @use(float [[MUL]])
; CHECK-NEXT: [[R:%.*]] = fsub float [[Z:%.*]], [[MUL]]
; CHECK-NEXT: ret float [[R]]
;
%neg = fsub float -0.000000e+00, %x
call void @use(float %neg)
%mul = fmul float %y, %neg
call void @use(float %mul)
%r = fsub float %z, %mul
ret float %r
}
; Negative test - can't reassociate without FMF.
define float @fsub_fsub(float %x, float %y, float %z) {
; CHECK-LABEL: @fsub_fsub(
; CHECK-NEXT: [[XY:%.*]] = fsub float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[XYZ:%.*]] = fsub float [[XY]], [[Z:%.*]]
; CHECK-NEXT: ret float [[XYZ]]
;
%xy = fsub float %x, %y
%xyz = fsub float %xy, %z
ret float %xyz
}
; Negative test - can't reassociate without enough FMF.
define float @fsub_fsub_nsz(float %x, float %y, float %z) {
; CHECK-LABEL: @fsub_fsub_nsz(
; CHECK-NEXT: [[XY:%.*]] = fsub float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[XYZ:%.*]] = fsub nsz float [[XY]], [[Z:%.*]]
; CHECK-NEXT: ret float [[XYZ]]
;
%xy = fsub float %x, %y
%xyz = fsub nsz float %xy, %z
ret float %xyz
}
; Negative test - can't reassociate without enough FMF.
define float @fsub_fsub_reassoc(float %x, float %y, float %z) {
; CHECK-LABEL: @fsub_fsub_reassoc(
; CHECK-NEXT: [[XY:%.*]] = fsub float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[XYZ:%.*]] = fsub reassoc float [[XY]], [[Z:%.*]]
; CHECK-NEXT: ret float [[XYZ]]
;
%xy = fsub float %x, %y
%xyz = fsub reassoc float %xy, %z
ret float %xyz
}
define float @fsub_fsub_nsz_reassoc(float %x, float %y, float %z) {
; CHECK-LABEL: @fsub_fsub_nsz_reassoc(
; CHECK-NEXT: [[TMP1:%.*]] = fadd reassoc nsz float [[Y:%.*]], [[Z:%.*]]
; CHECK-NEXT: [[XYZ:%.*]] = fsub reassoc nsz float [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret float [[XYZ]]
;
%xy = fsub float %x, %y
%xyz = fsub nsz reassoc float %xy, %z
ret float %xyz
}
define <2 x double> @fsub_fsub_fast_vec(<2 x double> %x, <2 x double> %y, <2 x double> %z) {
; CHECK-LABEL: @fsub_fsub_fast_vec(
; CHECK-NEXT: [[TMP1:%.*]] = fadd fast <2 x double> [[Y:%.*]], [[Z:%.*]]
; CHECK-NEXT: [[XYZ:%.*]] = fsub fast <2 x double> [[X:%.*]], [[TMP1]]
; CHECK-NEXT: ret <2 x double> [[XYZ]]
;
%xy = fsub fast <2 x double> %x, %y
%xyz = fsub fast reassoc <2 x double> %xy, %z
ret <2 x double> %xyz
}
; Negative test - don't reassociate and increase instructions.
define float @fsub_fsub_nsz_reassoc_extra_use(float %x, float %y, float %z) {
; CHECK-LABEL: @fsub_fsub_nsz_reassoc_extra_use(
; CHECK-NEXT: [[XY:%.*]] = fsub float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: call void @use(float [[XY]])
; CHECK-NEXT: [[XYZ:%.*]] = fsub reassoc nsz float [[XY]], [[Z:%.*]]
; CHECK-NEXT: ret float [[XYZ]]
;
%xy = fsub float %x, %y
call void @use(float %xy)
%xyz = fsub nsz reassoc float %xy, %z
ret float %xyz
}
define float @fneg_fsub(float %x, float %y) {
; CHECK-LABEL: @fneg_fsub(
; CHECK-NEXT: [[NEGX:%.*]] = fneg float [[X:%.*]]
; CHECK-NEXT: [[SUB:%.*]] = fsub float [[NEGX]], [[Y:%.*]]
; CHECK-NEXT: ret float [[SUB]]
;
%negx = fneg float %x
%sub = fsub float %negx, %y
ret float %sub
}
define float @fneg_fsub_nsz(float %x, float %y) {
; CHECK-LABEL: @fneg_fsub_nsz(
; CHECK-NEXT: [[TMP1:%.*]] = fadd nsz float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[SUB:%.*]] = fneg nsz float [[TMP1]]
; CHECK-NEXT: ret float [[SUB]]
;
%negx = fneg float %x
%sub = fsub nsz float %negx, %y
ret float %sub
}
define float @fake_fneg_fsub_fast(float %x, float %y) {
; CHECK-LABEL: @fake_fneg_fsub_fast(
; CHECK-NEXT: [[TMP1:%.*]] = fadd fast float [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[SUB:%.*]] = fneg fast float [[TMP1]]
; CHECK-NEXT: ret float [[SUB]]
;
%negx = fsub float -0.0, %x
%sub = fsub fast float %negx, %y
ret float %sub
}
define float @fake_fneg_fsub_fast_extra_use(float %x, float %y) {
; CHECK-LABEL: @fake_fneg_fsub_fast_extra_use(
; CHECK-NEXT: [[NEGX:%.*]] = fneg float [[X:%.*]]
; CHECK-NEXT: call void @use(float [[NEGX]])
; CHECK-NEXT: [[SUB:%.*]] = fsub fast float [[NEGX:%.*]], [[Y:%.*]]
; CHECK-NEXT: ret float [[SUB]]
;
%negx = fsub float -0.0, %x
call void @use(float %negx)
%sub = fsub fast float %negx, %y
ret float %sub
}
define <2 x float> @fake_fneg_fsub_vec(<2 x float> %x, <2 x float> %y) {
; CHECK-LABEL: @fake_fneg_fsub_vec(
; CHECK-NEXT: [[TMP1:%.*]] = fadd nsz <2 x float> [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[SUB:%.*]] = fneg nsz <2 x float> [[TMP1]]
; CHECK-NEXT: ret <2 x float> [[SUB]]
;
%negx = fsub <2 x float> <float -0.0, float -0.0>, %x
%sub = fsub nsz <2 x float> %negx, %y
ret <2 x float> %sub
}
define float @fneg_fsub_constant(float %x) {
; CHECK-LABEL: @fneg_fsub_constant(
; CHECK-NEXT: [[SUB:%.*]] = fsub nsz float -4.200000e+01, [[X:%.*]]
; CHECK-NEXT: ret float [[SUB]]
;
%negx = fneg float %x
%sub = fsub nsz float %negx, 42.0
ret float %sub
}