2016-07-13 00:21:55 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
2009-10-12 14:14:06 +08:00
|
|
|
; RUN: opt < %s -instcombine -S | FileCheck %s
|
2006-11-27 09:05:10 +08:00
|
|
|
|
2008-03-01 17:15:35 +08:00
|
|
|
define i64 @test_sext_zext(i16 %A) {
|
2016-07-13 00:21:55 +08:00
|
|
|
; CHECK-LABEL: @test_sext_zext(
|
|
|
|
; CHECK-NEXT: [[C2:%.*]] = zext i16 %A to i64
|
|
|
|
; CHECK-NEXT: ret i64 [[C2]]
|
|
|
|
;
|
|
|
|
%c1 = zext i16 %A to i32
|
|
|
|
%c2 = sext i32 %c1 to i64
|
|
|
|
ret i64 %c2
|
2006-11-27 09:05:10 +08:00
|
|
|
}
|
2014-01-20 04:05:13 +08:00
|
|
|
|
|
|
|
define <2 x i64> @test2(<2 x i1> %A) {
|
2016-07-13 00:21:55 +08:00
|
|
|
; CHECK-LABEL: @test2(
|
2016-07-21 08:24:18 +08:00
|
|
|
; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i1> %A, <i1 true, i1 true>
|
|
|
|
; CHECK-NEXT: [[ZEXT:%.*]] = zext <2 x i1> [[XOR]] to <2 x i64>
|
2016-07-13 00:21:55 +08:00
|
|
|
; CHECK-NEXT: ret <2 x i64> [[ZEXT]]
|
|
|
|
;
|
2014-01-20 04:05:13 +08:00
|
|
|
%xor = xor <2 x i1> %A, <i1 true, i1 true>
|
|
|
|
%zext = zext <2 x i1> %xor to <2 x i64>
|
|
|
|
ret <2 x i64> %zext
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x i64> @test3(<2 x i64> %A) {
|
2016-07-13 00:21:55 +08:00
|
|
|
; CHECK-LABEL: @test3(
|
|
|
|
; CHECK-NEXT: [[AND:%.*]] = and <2 x i64> %A, <i64 23, i64 42>
|
|
|
|
; CHECK-NEXT: ret <2 x i64> [[AND]]
|
|
|
|
;
|
2014-01-20 04:05:13 +08:00
|
|
|
%trunc = trunc <2 x i64> %A to <2 x i32>
|
|
|
|
%and = and <2 x i32> %trunc, <i32 23, i32 42>
|
|
|
|
%zext = zext <2 x i32> %and to <2 x i64>
|
|
|
|
ret <2 x i64> %zext
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x i64> @test4(<2 x i64> %A) {
|
2016-07-13 00:21:55 +08:00
|
|
|
; CHECK-LABEL: @test4(
|
2018-09-05 05:17:14 +08:00
|
|
|
; CHECK-NEXT: [[AND:%.*]] = and <2 x i64> [[A:%.*]], <i64 23, i64 42>
|
|
|
|
; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i64> [[AND]], <i64 23, i64 42>
|
2016-07-13 00:21:55 +08:00
|
|
|
; CHECK-NEXT: ret <2 x i64> [[XOR]]
|
|
|
|
;
|
2014-01-20 04:05:13 +08:00
|
|
|
%trunc = trunc <2 x i64> %A to <2 x i32>
|
|
|
|
%and = and <2 x i32> %trunc, <i32 23, i32 42>
|
|
|
|
%xor = xor <2 x i32> %and, <i32 23, i32 42>
|
|
|
|
%zext = zext <2 x i32> %xor to <2 x i64>
|
|
|
|
ret <2 x i64> %zext
|
|
|
|
}
|
2016-07-13 00:21:55 +08:00
|
|
|
|
2016-07-14 02:58:55 +08:00
|
|
|
define i64 @fold_xor_zext_sandwich(i1 %a) {
|
|
|
|
; CHECK-LABEL: @fold_xor_zext_sandwich(
|
2016-07-21 08:24:18 +08:00
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = xor i1 %a, true
|
|
|
|
; CHECK-NEXT: [[ZEXT2:%.*]] = zext i1 [[TMP1]] to i64
|
2016-07-14 02:58:55 +08:00
|
|
|
; CHECK-NEXT: ret i64 [[ZEXT2]]
|
|
|
|
;
|
|
|
|
%zext1 = zext i1 %a to i32
|
|
|
|
%xor = xor i32 %zext1, 1
|
|
|
|
%zext2 = zext i32 %xor to i64
|
|
|
|
ret i64 %zext2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x i64> @fold_xor_zext_sandwich_vec(<2 x i1> %a) {
|
|
|
|
; CHECK-LABEL: @fold_xor_zext_sandwich_vec(
|
2016-07-21 08:24:18 +08:00
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i1> %a, <i1 true, i1 true>
|
|
|
|
; CHECK-NEXT: [[ZEXT2:%.*]] = zext <2 x i1> [[TMP1]] to <2 x i64>
|
|
|
|
; CHECK-NEXT: ret <2 x i64> [[ZEXT2]]
|
2016-07-14 02:58:55 +08:00
|
|
|
;
|
|
|
|
%zext1 = zext <2 x i1> %a to <2 x i32>
|
|
|
|
%xor = xor <2 x i32> %zext1, <i32 1, i32 1>
|
|
|
|
%zext2 = zext <2 x i32> %xor to <2 x i64>
|
|
|
|
ret <2 x i64> %zext2
|
|
|
|
}
|
|
|
|
|
[InstCombine] Refactor optimization of zext(or(icmp, icmp)) to enable more aggressive cast-folding
Summary:
InstCombine unfolds expressions of the form `zext(or(icmp, icmp))` to `or(zext(icmp), zext(icmp))` such that in a later iteration of InstCombine the exposed `zext(icmp)` instructions can be optimized. We now combine this unfolding and the subsequent `zext(icmp)` optimization to be performed together. Since the unfolding doesn't happen separately anymore, we also again enable the folding of `logic(cast(icmp), cast(icmp))` expressions to `cast(logic(icmp, icmp))` which had been disabled due to its interference with the unfolding transformation.
Tested via `make check` and `lnt`.
Background
==========
For a better understanding on how it came to this change we subsequently summarize its history. In commit r275989 we've already tried to enable the folding of `logic(cast(icmp), cast(icmp))` to `cast(logic(icmp, icmp))` which had to be reverted in r276106 because it could lead to an endless loop in InstCombine (also see http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20160718/374347.html). The root of this problem is that in `visitZExt()` in InstCombineCasts.cpp there also exists a reverse of the above folding transformation, that unfolds `zext(or(icmp, icmp))` to `or(zext(icmp), zext(icmp))` in order to expose `zext(icmp)` operations which would then possibly be eliminated by subsequent iterations of InstCombine. However, before these `zext(icmp)` would be eliminated the folding from r275989 could kick in and cause InstCombine to endlessly switch back and forth between the folding and the unfolding transformation. This is the reason why we now combine the `zext`-unfolding and the elimination of the exposed `zext(icmp)` to happen at one go because this enables us to still allow the cast-folding in `logic(cast(icmp), cast(icmp))` without entering an endless loop again.
Details on the submitted changes
================================
- In `visitZExt()` we combine the unfolding and optimization of `zext` instructions.
- In `transformZExtICmp()` we have to use `Builder->CreateIntCast()` instead of `CastInst::CreateIntegerCast()` to make sure that the new `CastInst` is inserted in a `BasicBlock`. The new calls to `transformZExtICmp()` that we introduce in `visitZExt()` would otherwise cause according assertions to be triggered (in our case this happend, for example, with lnt for the MultiSource/Applications/sqlite3 and SingleSource/Regression/C++/EH/recursive-throw tests). The subsequent usage of `replaceInstUsesWith()` is necessary to ensure that the new `CastInst` replaces the `ZExtInst` accordingly.
- In InstCombineAndOrXor.cpp we again allow the folding of casts on `icmp` instructions.
- The instruction order in the optimized IR for the zext-or-icmp.ll test case is different with the introduced changes.
- The test cases in zext.ll have been adopted from the reverted commits r275989 and r276105.
Reviewers: grosser, majnemer, spatel
Subscribers: eli.friedman, majnemer, llvm-commits
Differential Revision: https://reviews.llvm.org/D22864
Contributed-by: Matthias Reisinger <d412vv1n@gmail.com>
llvm-svn: 277635
2016-08-04 03:30:35 +08:00
|
|
|
; Assert that zexts in and(zext(icmp), zext(icmp)) can be folded.
|
2016-10-26 22:08:49 +08:00
|
|
|
|
[InstCombine] Refactor optimization of zext(or(icmp, icmp)) to enable more aggressive cast-folding
Summary:
InstCombine unfolds expressions of the form `zext(or(icmp, icmp))` to `or(zext(icmp), zext(icmp))` such that in a later iteration of InstCombine the exposed `zext(icmp)` instructions can be optimized. We now combine this unfolding and the subsequent `zext(icmp)` optimization to be performed together. Since the unfolding doesn't happen separately anymore, we also again enable the folding of `logic(cast(icmp), cast(icmp))` expressions to `cast(logic(icmp, icmp))` which had been disabled due to its interference with the unfolding transformation.
Tested via `make check` and `lnt`.
Background
==========
For a better understanding on how it came to this change we subsequently summarize its history. In commit r275989 we've already tried to enable the folding of `logic(cast(icmp), cast(icmp))` to `cast(logic(icmp, icmp))` which had to be reverted in r276106 because it could lead to an endless loop in InstCombine (also see http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20160718/374347.html). The root of this problem is that in `visitZExt()` in InstCombineCasts.cpp there also exists a reverse of the above folding transformation, that unfolds `zext(or(icmp, icmp))` to `or(zext(icmp), zext(icmp))` in order to expose `zext(icmp)` operations which would then possibly be eliminated by subsequent iterations of InstCombine. However, before these `zext(icmp)` would be eliminated the folding from r275989 could kick in and cause InstCombine to endlessly switch back and forth between the folding and the unfolding transformation. This is the reason why we now combine the `zext`-unfolding and the elimination of the exposed `zext(icmp)` to happen at one go because this enables us to still allow the cast-folding in `logic(cast(icmp), cast(icmp))` without entering an endless loop again.
Details on the submitted changes
================================
- In `visitZExt()` we combine the unfolding and optimization of `zext` instructions.
- In `transformZExtICmp()` we have to use `Builder->CreateIntCast()` instead of `CastInst::CreateIntegerCast()` to make sure that the new `CastInst` is inserted in a `BasicBlock`. The new calls to `transformZExtICmp()` that we introduce in `visitZExt()` would otherwise cause according assertions to be triggered (in our case this happend, for example, with lnt for the MultiSource/Applications/sqlite3 and SingleSource/Regression/C++/EH/recursive-throw tests). The subsequent usage of `replaceInstUsesWith()` is necessary to ensure that the new `CastInst` replaces the `ZExtInst` accordingly.
- In InstCombineAndOrXor.cpp we again allow the folding of casts on `icmp` instructions.
- The instruction order in the optimized IR for the zext-or-icmp.ll test case is different with the introduced changes.
- The test cases in zext.ll have been adopted from the reverted commits r275989 and r276105.
Reviewers: grosser, majnemer, spatel
Subscribers: eli.friedman, majnemer, llvm-commits
Differential Revision: https://reviews.llvm.org/D22864
Contributed-by: Matthias Reisinger <d412vv1n@gmail.com>
llvm-svn: 277635
2016-08-04 03:30:35 +08:00
|
|
|
define i8 @fold_and_zext_icmp(i64 %a, i64 %b, i64 %c) {
|
2016-10-26 22:08:49 +08:00
|
|
|
; CHECK-LABEL: @fold_and_zext_icmp(
|
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i64 %a, %b
|
|
|
|
; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i64 %a, %c
|
|
|
|
; CHECK-NEXT: [[TMP3:%.*]] = and i1 [[TMP1]], [[TMP2]]
|
|
|
|
; CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP3]] to i8
|
|
|
|
; CHECK-NEXT: ret i8 [[TMP4]]
|
|
|
|
;
|
[InstCombine] Refactor optimization of zext(or(icmp, icmp)) to enable more aggressive cast-folding
Summary:
InstCombine unfolds expressions of the form `zext(or(icmp, icmp))` to `or(zext(icmp), zext(icmp))` such that in a later iteration of InstCombine the exposed `zext(icmp)` instructions can be optimized. We now combine this unfolding and the subsequent `zext(icmp)` optimization to be performed together. Since the unfolding doesn't happen separately anymore, we also again enable the folding of `logic(cast(icmp), cast(icmp))` expressions to `cast(logic(icmp, icmp))` which had been disabled due to its interference with the unfolding transformation.
Tested via `make check` and `lnt`.
Background
==========
For a better understanding on how it came to this change we subsequently summarize its history. In commit r275989 we've already tried to enable the folding of `logic(cast(icmp), cast(icmp))` to `cast(logic(icmp, icmp))` which had to be reverted in r276106 because it could lead to an endless loop in InstCombine (also see http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20160718/374347.html). The root of this problem is that in `visitZExt()` in InstCombineCasts.cpp there also exists a reverse of the above folding transformation, that unfolds `zext(or(icmp, icmp))` to `or(zext(icmp), zext(icmp))` in order to expose `zext(icmp)` operations which would then possibly be eliminated by subsequent iterations of InstCombine. However, before these `zext(icmp)` would be eliminated the folding from r275989 could kick in and cause InstCombine to endlessly switch back and forth between the folding and the unfolding transformation. This is the reason why we now combine the `zext`-unfolding and the elimination of the exposed `zext(icmp)` to happen at one go because this enables us to still allow the cast-folding in `logic(cast(icmp), cast(icmp))` without entering an endless loop again.
Details on the submitted changes
================================
- In `visitZExt()` we combine the unfolding and optimization of `zext` instructions.
- In `transformZExtICmp()` we have to use `Builder->CreateIntCast()` instead of `CastInst::CreateIntegerCast()` to make sure that the new `CastInst` is inserted in a `BasicBlock`. The new calls to `transformZExtICmp()` that we introduce in `visitZExt()` would otherwise cause according assertions to be triggered (in our case this happend, for example, with lnt for the MultiSource/Applications/sqlite3 and SingleSource/Regression/C++/EH/recursive-throw tests). The subsequent usage of `replaceInstUsesWith()` is necessary to ensure that the new `CastInst` replaces the `ZExtInst` accordingly.
- In InstCombineAndOrXor.cpp we again allow the folding of casts on `icmp` instructions.
- The instruction order in the optimized IR for the zext-or-icmp.ll test case is different with the introduced changes.
- The test cases in zext.ll have been adopted from the reverted commits r275989 and r276105.
Reviewers: grosser, majnemer, spatel
Subscribers: eli.friedman, majnemer, llvm-commits
Differential Revision: https://reviews.llvm.org/D22864
Contributed-by: Matthias Reisinger <d412vv1n@gmail.com>
llvm-svn: 277635
2016-08-04 03:30:35 +08:00
|
|
|
%1 = icmp sgt i64 %a, %b
|
|
|
|
%2 = zext i1 %1 to i8
|
|
|
|
%3 = icmp slt i64 %a, %c
|
|
|
|
%4 = zext i1 %3 to i8
|
|
|
|
%5 = and i8 %2, %4
|
|
|
|
ret i8 %5
|
|
|
|
}
|
|
|
|
|
|
|
|
; Assert that zexts in or(zext(icmp), zext(icmp)) can be folded.
|
2016-10-26 22:08:49 +08:00
|
|
|
|
[InstCombine] Refactor optimization of zext(or(icmp, icmp)) to enable more aggressive cast-folding
Summary:
InstCombine unfolds expressions of the form `zext(or(icmp, icmp))` to `or(zext(icmp), zext(icmp))` such that in a later iteration of InstCombine the exposed `zext(icmp)` instructions can be optimized. We now combine this unfolding and the subsequent `zext(icmp)` optimization to be performed together. Since the unfolding doesn't happen separately anymore, we also again enable the folding of `logic(cast(icmp), cast(icmp))` expressions to `cast(logic(icmp, icmp))` which had been disabled due to its interference with the unfolding transformation.
Tested via `make check` and `lnt`.
Background
==========
For a better understanding on how it came to this change we subsequently summarize its history. In commit r275989 we've already tried to enable the folding of `logic(cast(icmp), cast(icmp))` to `cast(logic(icmp, icmp))` which had to be reverted in r276106 because it could lead to an endless loop in InstCombine (also see http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20160718/374347.html). The root of this problem is that in `visitZExt()` in InstCombineCasts.cpp there also exists a reverse of the above folding transformation, that unfolds `zext(or(icmp, icmp))` to `or(zext(icmp), zext(icmp))` in order to expose `zext(icmp)` operations which would then possibly be eliminated by subsequent iterations of InstCombine. However, before these `zext(icmp)` would be eliminated the folding from r275989 could kick in and cause InstCombine to endlessly switch back and forth between the folding and the unfolding transformation. This is the reason why we now combine the `zext`-unfolding and the elimination of the exposed `zext(icmp)` to happen at one go because this enables us to still allow the cast-folding in `logic(cast(icmp), cast(icmp))` without entering an endless loop again.
Details on the submitted changes
================================
- In `visitZExt()` we combine the unfolding and optimization of `zext` instructions.
- In `transformZExtICmp()` we have to use `Builder->CreateIntCast()` instead of `CastInst::CreateIntegerCast()` to make sure that the new `CastInst` is inserted in a `BasicBlock`. The new calls to `transformZExtICmp()` that we introduce in `visitZExt()` would otherwise cause according assertions to be triggered (in our case this happend, for example, with lnt for the MultiSource/Applications/sqlite3 and SingleSource/Regression/C++/EH/recursive-throw tests). The subsequent usage of `replaceInstUsesWith()` is necessary to ensure that the new `CastInst` replaces the `ZExtInst` accordingly.
- In InstCombineAndOrXor.cpp we again allow the folding of casts on `icmp` instructions.
- The instruction order in the optimized IR for the zext-or-icmp.ll test case is different with the introduced changes.
- The test cases in zext.ll have been adopted from the reverted commits r275989 and r276105.
Reviewers: grosser, majnemer, spatel
Subscribers: eli.friedman, majnemer, llvm-commits
Differential Revision: https://reviews.llvm.org/D22864
Contributed-by: Matthias Reisinger <d412vv1n@gmail.com>
llvm-svn: 277635
2016-08-04 03:30:35 +08:00
|
|
|
define i8 @fold_or_zext_icmp(i64 %a, i64 %b, i64 %c) {
|
2016-10-26 22:08:49 +08:00
|
|
|
; CHECK-LABEL: @fold_or_zext_icmp(
|
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i64 %a, %b
|
|
|
|
; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i64 %a, %c
|
|
|
|
; CHECK-NEXT: [[TMP3:%.*]] = or i1 [[TMP1]], [[TMP2]]
|
|
|
|
; CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP3]] to i8
|
|
|
|
; CHECK-NEXT: ret i8 [[TMP4]]
|
|
|
|
;
|
[InstCombine] Refactor optimization of zext(or(icmp, icmp)) to enable more aggressive cast-folding
Summary:
InstCombine unfolds expressions of the form `zext(or(icmp, icmp))` to `or(zext(icmp), zext(icmp))` such that in a later iteration of InstCombine the exposed `zext(icmp)` instructions can be optimized. We now combine this unfolding and the subsequent `zext(icmp)` optimization to be performed together. Since the unfolding doesn't happen separately anymore, we also again enable the folding of `logic(cast(icmp), cast(icmp))` expressions to `cast(logic(icmp, icmp))` which had been disabled due to its interference with the unfolding transformation.
Tested via `make check` and `lnt`.
Background
==========
For a better understanding on how it came to this change we subsequently summarize its history. In commit r275989 we've already tried to enable the folding of `logic(cast(icmp), cast(icmp))` to `cast(logic(icmp, icmp))` which had to be reverted in r276106 because it could lead to an endless loop in InstCombine (also see http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20160718/374347.html). The root of this problem is that in `visitZExt()` in InstCombineCasts.cpp there also exists a reverse of the above folding transformation, that unfolds `zext(or(icmp, icmp))` to `or(zext(icmp), zext(icmp))` in order to expose `zext(icmp)` operations which would then possibly be eliminated by subsequent iterations of InstCombine. However, before these `zext(icmp)` would be eliminated the folding from r275989 could kick in and cause InstCombine to endlessly switch back and forth between the folding and the unfolding transformation. This is the reason why we now combine the `zext`-unfolding and the elimination of the exposed `zext(icmp)` to happen at one go because this enables us to still allow the cast-folding in `logic(cast(icmp), cast(icmp))` without entering an endless loop again.
Details on the submitted changes
================================
- In `visitZExt()` we combine the unfolding and optimization of `zext` instructions.
- In `transformZExtICmp()` we have to use `Builder->CreateIntCast()` instead of `CastInst::CreateIntegerCast()` to make sure that the new `CastInst` is inserted in a `BasicBlock`. The new calls to `transformZExtICmp()` that we introduce in `visitZExt()` would otherwise cause according assertions to be triggered (in our case this happend, for example, with lnt for the MultiSource/Applications/sqlite3 and SingleSource/Regression/C++/EH/recursive-throw tests). The subsequent usage of `replaceInstUsesWith()` is necessary to ensure that the new `CastInst` replaces the `ZExtInst` accordingly.
- In InstCombineAndOrXor.cpp we again allow the folding of casts on `icmp` instructions.
- The instruction order in the optimized IR for the zext-or-icmp.ll test case is different with the introduced changes.
- The test cases in zext.ll have been adopted from the reverted commits r275989 and r276105.
Reviewers: grosser, majnemer, spatel
Subscribers: eli.friedman, majnemer, llvm-commits
Differential Revision: https://reviews.llvm.org/D22864
Contributed-by: Matthias Reisinger <d412vv1n@gmail.com>
llvm-svn: 277635
2016-08-04 03:30:35 +08:00
|
|
|
%1 = icmp sgt i64 %a, %b
|
|
|
|
%2 = zext i1 %1 to i8
|
|
|
|
%3 = icmp slt i64 %a, %c
|
|
|
|
%4 = zext i1 %3 to i8
|
|
|
|
%5 = or i8 %2, %4
|
|
|
|
ret i8 %5
|
|
|
|
}
|
|
|
|
|
|
|
|
; Assert that zexts in xor(zext(icmp), zext(icmp)) can be folded.
|
2016-10-26 22:08:49 +08:00
|
|
|
|
[InstCombine] Refactor optimization of zext(or(icmp, icmp)) to enable more aggressive cast-folding
Summary:
InstCombine unfolds expressions of the form `zext(or(icmp, icmp))` to `or(zext(icmp), zext(icmp))` such that in a later iteration of InstCombine the exposed `zext(icmp)` instructions can be optimized. We now combine this unfolding and the subsequent `zext(icmp)` optimization to be performed together. Since the unfolding doesn't happen separately anymore, we also again enable the folding of `logic(cast(icmp), cast(icmp))` expressions to `cast(logic(icmp, icmp))` which had been disabled due to its interference with the unfolding transformation.
Tested via `make check` and `lnt`.
Background
==========
For a better understanding on how it came to this change we subsequently summarize its history. In commit r275989 we've already tried to enable the folding of `logic(cast(icmp), cast(icmp))` to `cast(logic(icmp, icmp))` which had to be reverted in r276106 because it could lead to an endless loop in InstCombine (also see http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20160718/374347.html). The root of this problem is that in `visitZExt()` in InstCombineCasts.cpp there also exists a reverse of the above folding transformation, that unfolds `zext(or(icmp, icmp))` to `or(zext(icmp), zext(icmp))` in order to expose `zext(icmp)` operations which would then possibly be eliminated by subsequent iterations of InstCombine. However, before these `zext(icmp)` would be eliminated the folding from r275989 could kick in and cause InstCombine to endlessly switch back and forth between the folding and the unfolding transformation. This is the reason why we now combine the `zext`-unfolding and the elimination of the exposed `zext(icmp)` to happen at one go because this enables us to still allow the cast-folding in `logic(cast(icmp), cast(icmp))` without entering an endless loop again.
Details on the submitted changes
================================
- In `visitZExt()` we combine the unfolding and optimization of `zext` instructions.
- In `transformZExtICmp()` we have to use `Builder->CreateIntCast()` instead of `CastInst::CreateIntegerCast()` to make sure that the new `CastInst` is inserted in a `BasicBlock`. The new calls to `transformZExtICmp()` that we introduce in `visitZExt()` would otherwise cause according assertions to be triggered (in our case this happend, for example, with lnt for the MultiSource/Applications/sqlite3 and SingleSource/Regression/C++/EH/recursive-throw tests). The subsequent usage of `replaceInstUsesWith()` is necessary to ensure that the new `CastInst` replaces the `ZExtInst` accordingly.
- In InstCombineAndOrXor.cpp we again allow the folding of casts on `icmp` instructions.
- The instruction order in the optimized IR for the zext-or-icmp.ll test case is different with the introduced changes.
- The test cases in zext.ll have been adopted from the reverted commits r275989 and r276105.
Reviewers: grosser, majnemer, spatel
Subscribers: eli.friedman, majnemer, llvm-commits
Differential Revision: https://reviews.llvm.org/D22864
Contributed-by: Matthias Reisinger <d412vv1n@gmail.com>
llvm-svn: 277635
2016-08-04 03:30:35 +08:00
|
|
|
define i8 @fold_xor_zext_icmp(i64 %a, i64 %b, i64 %c) {
|
2016-10-26 22:08:49 +08:00
|
|
|
; CHECK-LABEL: @fold_xor_zext_icmp(
|
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i64 %a, %b
|
|
|
|
; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i64 %a, %c
|
|
|
|
; CHECK-NEXT: [[TMP3:%.*]] = xor i1 [[TMP1]], [[TMP2]]
|
|
|
|
; CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP3]] to i8
|
|
|
|
; CHECK-NEXT: ret i8 [[TMP4]]
|
|
|
|
;
|
[InstCombine] Refactor optimization of zext(or(icmp, icmp)) to enable more aggressive cast-folding
Summary:
InstCombine unfolds expressions of the form `zext(or(icmp, icmp))` to `or(zext(icmp), zext(icmp))` such that in a later iteration of InstCombine the exposed `zext(icmp)` instructions can be optimized. We now combine this unfolding and the subsequent `zext(icmp)` optimization to be performed together. Since the unfolding doesn't happen separately anymore, we also again enable the folding of `logic(cast(icmp), cast(icmp))` expressions to `cast(logic(icmp, icmp))` which had been disabled due to its interference with the unfolding transformation.
Tested via `make check` and `lnt`.
Background
==========
For a better understanding on how it came to this change we subsequently summarize its history. In commit r275989 we've already tried to enable the folding of `logic(cast(icmp), cast(icmp))` to `cast(logic(icmp, icmp))` which had to be reverted in r276106 because it could lead to an endless loop in InstCombine (also see http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20160718/374347.html). The root of this problem is that in `visitZExt()` in InstCombineCasts.cpp there also exists a reverse of the above folding transformation, that unfolds `zext(or(icmp, icmp))` to `or(zext(icmp), zext(icmp))` in order to expose `zext(icmp)` operations which would then possibly be eliminated by subsequent iterations of InstCombine. However, before these `zext(icmp)` would be eliminated the folding from r275989 could kick in and cause InstCombine to endlessly switch back and forth between the folding and the unfolding transformation. This is the reason why we now combine the `zext`-unfolding and the elimination of the exposed `zext(icmp)` to happen at one go because this enables us to still allow the cast-folding in `logic(cast(icmp), cast(icmp))` without entering an endless loop again.
Details on the submitted changes
================================
- In `visitZExt()` we combine the unfolding and optimization of `zext` instructions.
- In `transformZExtICmp()` we have to use `Builder->CreateIntCast()` instead of `CastInst::CreateIntegerCast()` to make sure that the new `CastInst` is inserted in a `BasicBlock`. The new calls to `transformZExtICmp()` that we introduce in `visitZExt()` would otherwise cause according assertions to be triggered (in our case this happend, for example, with lnt for the MultiSource/Applications/sqlite3 and SingleSource/Regression/C++/EH/recursive-throw tests). The subsequent usage of `replaceInstUsesWith()` is necessary to ensure that the new `CastInst` replaces the `ZExtInst` accordingly.
- In InstCombineAndOrXor.cpp we again allow the folding of casts on `icmp` instructions.
- The instruction order in the optimized IR for the zext-or-icmp.ll test case is different with the introduced changes.
- The test cases in zext.ll have been adopted from the reverted commits r275989 and r276105.
Reviewers: grosser, majnemer, spatel
Subscribers: eli.friedman, majnemer, llvm-commits
Differential Revision: https://reviews.llvm.org/D22864
Contributed-by: Matthias Reisinger <d412vv1n@gmail.com>
llvm-svn: 277635
2016-08-04 03:30:35 +08:00
|
|
|
%1 = icmp sgt i64 %a, %b
|
|
|
|
%2 = zext i1 %1 to i8
|
|
|
|
%3 = icmp slt i64 %a, %c
|
|
|
|
%4 = zext i1 %3 to i8
|
|
|
|
%5 = xor i8 %2, %4
|
|
|
|
ret i8 %5
|
|
|
|
}
|
|
|
|
|
|
|
|
; Assert that zexts in logic(zext(icmp), zext(icmp)) are also folded accross
|
|
|
|
; nested logical operators.
|
2016-10-26 22:08:49 +08:00
|
|
|
|
[InstCombine] Refactor optimization of zext(or(icmp, icmp)) to enable more aggressive cast-folding
Summary:
InstCombine unfolds expressions of the form `zext(or(icmp, icmp))` to `or(zext(icmp), zext(icmp))` such that in a later iteration of InstCombine the exposed `zext(icmp)` instructions can be optimized. We now combine this unfolding and the subsequent `zext(icmp)` optimization to be performed together. Since the unfolding doesn't happen separately anymore, we also again enable the folding of `logic(cast(icmp), cast(icmp))` expressions to `cast(logic(icmp, icmp))` which had been disabled due to its interference with the unfolding transformation.
Tested via `make check` and `lnt`.
Background
==========
For a better understanding on how it came to this change we subsequently summarize its history. In commit r275989 we've already tried to enable the folding of `logic(cast(icmp), cast(icmp))` to `cast(logic(icmp, icmp))` which had to be reverted in r276106 because it could lead to an endless loop in InstCombine (also see http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20160718/374347.html). The root of this problem is that in `visitZExt()` in InstCombineCasts.cpp there also exists a reverse of the above folding transformation, that unfolds `zext(or(icmp, icmp))` to `or(zext(icmp), zext(icmp))` in order to expose `zext(icmp)` operations which would then possibly be eliminated by subsequent iterations of InstCombine. However, before these `zext(icmp)` would be eliminated the folding from r275989 could kick in and cause InstCombine to endlessly switch back and forth between the folding and the unfolding transformation. This is the reason why we now combine the `zext`-unfolding and the elimination of the exposed `zext(icmp)` to happen at one go because this enables us to still allow the cast-folding in `logic(cast(icmp), cast(icmp))` without entering an endless loop again.
Details on the submitted changes
================================
- In `visitZExt()` we combine the unfolding and optimization of `zext` instructions.
- In `transformZExtICmp()` we have to use `Builder->CreateIntCast()` instead of `CastInst::CreateIntegerCast()` to make sure that the new `CastInst` is inserted in a `BasicBlock`. The new calls to `transformZExtICmp()` that we introduce in `visitZExt()` would otherwise cause according assertions to be triggered (in our case this happend, for example, with lnt for the MultiSource/Applications/sqlite3 and SingleSource/Regression/C++/EH/recursive-throw tests). The subsequent usage of `replaceInstUsesWith()` is necessary to ensure that the new `CastInst` replaces the `ZExtInst` accordingly.
- In InstCombineAndOrXor.cpp we again allow the folding of casts on `icmp` instructions.
- The instruction order in the optimized IR for the zext-or-icmp.ll test case is different with the introduced changes.
- The test cases in zext.ll have been adopted from the reverted commits r275989 and r276105.
Reviewers: grosser, majnemer, spatel
Subscribers: eli.friedman, majnemer, llvm-commits
Differential Revision: https://reviews.llvm.org/D22864
Contributed-by: Matthias Reisinger <d412vv1n@gmail.com>
llvm-svn: 277635
2016-08-04 03:30:35 +08:00
|
|
|
define i8 @fold_nested_logic_zext_icmp(i64 %a, i64 %b, i64 %c, i64 %d) {
|
2016-10-26 22:08:49 +08:00
|
|
|
; CHECK-LABEL: @fold_nested_logic_zext_icmp(
|
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i64 %a, %b
|
|
|
|
; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i64 %a, %c
|
|
|
|
; CHECK-NEXT: [[TMP3:%.*]] = and i1 [[TMP1]], [[TMP2]]
|
|
|
|
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 %a, %d
|
|
|
|
; CHECK-NEXT: [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
|
|
|
|
; CHECK-NEXT: [[TMP6:%.*]] = zext i1 [[TMP5]] to i8
|
|
|
|
; CHECK-NEXT: ret i8 [[TMP6]]
|
|
|
|
;
|
[InstCombine] Refactor optimization of zext(or(icmp, icmp)) to enable more aggressive cast-folding
Summary:
InstCombine unfolds expressions of the form `zext(or(icmp, icmp))` to `or(zext(icmp), zext(icmp))` such that in a later iteration of InstCombine the exposed `zext(icmp)` instructions can be optimized. We now combine this unfolding and the subsequent `zext(icmp)` optimization to be performed together. Since the unfolding doesn't happen separately anymore, we also again enable the folding of `logic(cast(icmp), cast(icmp))` expressions to `cast(logic(icmp, icmp))` which had been disabled due to its interference with the unfolding transformation.
Tested via `make check` and `lnt`.
Background
==========
For a better understanding on how it came to this change we subsequently summarize its history. In commit r275989 we've already tried to enable the folding of `logic(cast(icmp), cast(icmp))` to `cast(logic(icmp, icmp))` which had to be reverted in r276106 because it could lead to an endless loop in InstCombine (also see http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20160718/374347.html). The root of this problem is that in `visitZExt()` in InstCombineCasts.cpp there also exists a reverse of the above folding transformation, that unfolds `zext(or(icmp, icmp))` to `or(zext(icmp), zext(icmp))` in order to expose `zext(icmp)` operations which would then possibly be eliminated by subsequent iterations of InstCombine. However, before these `zext(icmp)` would be eliminated the folding from r275989 could kick in and cause InstCombine to endlessly switch back and forth between the folding and the unfolding transformation. This is the reason why we now combine the `zext`-unfolding and the elimination of the exposed `zext(icmp)` to happen at one go because this enables us to still allow the cast-folding in `logic(cast(icmp), cast(icmp))` without entering an endless loop again.
Details on the submitted changes
================================
- In `visitZExt()` we combine the unfolding and optimization of `zext` instructions.
- In `transformZExtICmp()` we have to use `Builder->CreateIntCast()` instead of `CastInst::CreateIntegerCast()` to make sure that the new `CastInst` is inserted in a `BasicBlock`. The new calls to `transformZExtICmp()` that we introduce in `visitZExt()` would otherwise cause according assertions to be triggered (in our case this happend, for example, with lnt for the MultiSource/Applications/sqlite3 and SingleSource/Regression/C++/EH/recursive-throw tests). The subsequent usage of `replaceInstUsesWith()` is necessary to ensure that the new `CastInst` replaces the `ZExtInst` accordingly.
- In InstCombineAndOrXor.cpp we again allow the folding of casts on `icmp` instructions.
- The instruction order in the optimized IR for the zext-or-icmp.ll test case is different with the introduced changes.
- The test cases in zext.ll have been adopted from the reverted commits r275989 and r276105.
Reviewers: grosser, majnemer, spatel
Subscribers: eli.friedman, majnemer, llvm-commits
Differential Revision: https://reviews.llvm.org/D22864
Contributed-by: Matthias Reisinger <d412vv1n@gmail.com>
llvm-svn: 277635
2016-08-04 03:30:35 +08:00
|
|
|
%1 = icmp sgt i64 %a, %b
|
|
|
|
%2 = zext i1 %1 to i8
|
|
|
|
%3 = icmp slt i64 %a, %c
|
|
|
|
%4 = zext i1 %3 to i8
|
|
|
|
%5 = and i8 %2, %4
|
|
|
|
%6 = icmp eq i64 %a, %d
|
|
|
|
%7 = zext i1 %6 to i8
|
|
|
|
%8 = or i8 %5, %7
|
|
|
|
ret i8 %8
|
|
|
|
}
|
2016-10-26 22:08:49 +08:00
|
|
|
|
|
|
|
; This test is for Integer BitWidth > 64 && BitWidth <= 1024.
|
|
|
|
|
|
|
|
define i1024 @sext_zext_apint1(i77 %A) {
|
|
|
|
; CHECK-LABEL: @sext_zext_apint1(
|
|
|
|
; CHECK-NEXT: [[C2:%.*]] = zext i77 %A to i1024
|
|
|
|
; CHECK-NEXT: ret i1024 [[C2]]
|
|
|
|
;
|
|
|
|
%c1 = zext i77 %A to i533
|
|
|
|
%c2 = sext i533 %c1 to i1024
|
|
|
|
ret i1024 %c2
|
|
|
|
}
|
|
|
|
|
|
|
|
; This test is for Integer BitWidth <= 64 && BitWidth % 2 != 0.
|
|
|
|
|
|
|
|
define i47 @sext_zext_apint2(i11 %A) {
|
|
|
|
; CHECK-LABEL: @sext_zext_apint2(
|
|
|
|
; CHECK-NEXT: [[C2:%.*]] = zext i11 %A to i47
|
|
|
|
; CHECK-NEXT: ret i47 [[C2]]
|
|
|
|
;
|
|
|
|
%c1 = zext i11 %A to i39
|
|
|
|
%c2 = sext i39 %c1 to i47
|
|
|
|
ret i47 %c2
|
|
|
|
}
|
|
|
|
|