2018-06-07 03:38:21 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
|
|
|
; RUN: opt < %s -instcombine -S | FileCheck %s
|
|
|
|
|
|
|
|
; https://bugs.llvm.org/show_bug.cgi?id=37603
|
|
|
|
|
|
|
|
; Pattern:
|
|
|
|
; (1 << NBits) - 1
|
|
|
|
; Should be transformed into:
|
|
|
|
; ~(-(1 << NBits))
|
|
|
|
; The `not` may end up being folded into `and`
|
|
|
|
|
|
|
|
; ============================================================================ ;
|
|
|
|
; Most basic positive tests
|
|
|
|
; ============================================================================ ;
|
|
|
|
|
|
|
|
; No no-wrap tags on shl
|
|
|
|
|
|
|
|
define i32 @shl_add(i32 %NBits) {
|
|
|
|
; CHECK-LABEL: @shl_add(
|
[InstCombine] PR37603: low bit mask canonicalization
Summary:
This is [[ https://bugs.llvm.org/show_bug.cgi?id=37603 | PR37603 ]].
https://godbolt.org/g/VCMNpS
https://rise4fun.com/Alive/idM
When doing bit manipulations, it is quite common to calculate some bit mask,
and apply it to some value via `and`.
The typical C code looks like:
```
int mask_signed_add(int nbits) {
return (1 << nbits) - 1;
}
```
which is translated into (with `-O3`)
```
define dso_local i32 @mask_signed_add(int)(i32) local_unnamed_addr #0 {
%2 = shl i32 1, %0
%3 = add nsw i32 %2, -1
ret i32 %3
}
```
But there is a second, less readable variant:
```
int mask_signed_xor(int nbits) {
return ~(-(1 << nbits));
}
```
which is translated into (with `-O3`)
```
define dso_local i32 @mask_signed_xor(int)(i32) local_unnamed_addr #0 {
%2 = shl i32 -1, %0
%3 = xor i32 %2, -1
ret i32 %3
}
```
Since we created such a mask, it is quite likely that we will use it in `and` next.
And then we may get rid of `not` op by folding into `andn`.
But now that i have actually looked:
https://godbolt.org/g/VTUDmU
_some_ backend changes will be needed too.
We clearly loose `bzhi` recognition.
Reviewers: spatel, craig.topper, RKSimon
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D47428
llvm-svn: 334127
2018-06-07 03:38:27 +08:00
|
|
|
; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
|
|
|
|
; CHECK-NEXT: [[RET:%.*]] = xor i32 [[NOTMASK]], -1
|
2018-06-07 03:38:21 +08:00
|
|
|
; CHECK-NEXT: ret i32 [[RET]]
|
|
|
|
;
|
|
|
|
%setbit = shl i32 1, %NBits
|
|
|
|
%ret = add i32 %setbit, -1
|
|
|
|
ret i32 %ret
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @shl_add_nsw(i32 %NBits) {
|
|
|
|
; CHECK-LABEL: @shl_add_nsw(
|
[InstCombine] PR37603: low bit mask canonicalization
Summary:
This is [[ https://bugs.llvm.org/show_bug.cgi?id=37603 | PR37603 ]].
https://godbolt.org/g/VCMNpS
https://rise4fun.com/Alive/idM
When doing bit manipulations, it is quite common to calculate some bit mask,
and apply it to some value via `and`.
The typical C code looks like:
```
int mask_signed_add(int nbits) {
return (1 << nbits) - 1;
}
```
which is translated into (with `-O3`)
```
define dso_local i32 @mask_signed_add(int)(i32) local_unnamed_addr #0 {
%2 = shl i32 1, %0
%3 = add nsw i32 %2, -1
ret i32 %3
}
```
But there is a second, less readable variant:
```
int mask_signed_xor(int nbits) {
return ~(-(1 << nbits));
}
```
which is translated into (with `-O3`)
```
define dso_local i32 @mask_signed_xor(int)(i32) local_unnamed_addr #0 {
%2 = shl i32 -1, %0
%3 = xor i32 %2, -1
ret i32 %3
}
```
Since we created such a mask, it is quite likely that we will use it in `and` next.
And then we may get rid of `not` op by folding into `andn`.
But now that i have actually looked:
https://godbolt.org/g/VTUDmU
_some_ backend changes will be needed too.
We clearly loose `bzhi` recognition.
Reviewers: spatel, craig.topper, RKSimon
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D47428
llvm-svn: 334127
2018-06-07 03:38:27 +08:00
|
|
|
; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
|
|
|
|
; CHECK-NEXT: [[RET:%.*]] = xor i32 [[NOTMASK]], -1
|
2018-06-07 03:38:21 +08:00
|
|
|
; CHECK-NEXT: ret i32 [[RET]]
|
|
|
|
;
|
|
|
|
%setbit = shl i32 1, %NBits
|
|
|
|
%ret = add nsw i32 %setbit, -1
|
|
|
|
ret i32 %ret
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @shl_add_nuw(i32 %NBits) {
|
|
|
|
; CHECK-LABEL: @shl_add_nuw(
|
[InstSimplify] add nuw %x, -1 -> -1 fold.
Summary:
`%ret = add nuw i8 %x, C`
From [[ https://llvm.org/docs/LangRef.html#add-instruction | langref ]]:
nuw and nsw stand for “No Unsigned Wrap” and “No Signed Wrap”,
respectively. If the nuw and/or nsw keywords are present,
the result value of the add is a poison value if unsigned
and/or signed overflow, respectively, occurs.
So if `C` is `-1`, `%x` can only be `0`, and the result is always `-1`.
I'm not sure we want to use `KnownBits`/`LVI` here, because there is
exactly one possible value (all bits set, `-1`), so some other pass
should take care of replacing the known-all-ones with constant `-1`.
The `test/Transforms/InstCombine/set-lowbits-mask-canonicalize.ll` change *is* confusing.
What happening is, before this: (omitting `nuw` for simplicity)
1. First, InstCombine D47428/rL334127 folds `shl i32 1, %NBits`) to `shl nuw i32 -1, %NBits`
2. Then, InstSimplify D47883/rL334222 folds `shl nuw i32 -1, %NBits` to `-1`,
3. `-1` is inverted to `0`.
But now:
1. *This* InstSimplify fold `%ret = add nuw i32 %setbit, -1` -> `-1` happens first,
before InstCombine D47428/rL334127 fold could happen.
Thus we now end up with the opposite constant,
and it is all good: https://rise4fun.com/Alive/OA9
https://rise4fun.com/Alive/sldC
Was mentioned in D47428 review.
Follow-up for D47883.
Reviewers: spatel, craig.topper
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D47908
llvm-svn: 334298
2018-06-08 23:44:47 +08:00
|
|
|
; CHECK-NEXT: ret i32 -1
|
2018-06-07 03:38:21 +08:00
|
|
|
;
|
|
|
|
%setbit = shl i32 1, %NBits
|
|
|
|
%ret = add nuw i32 %setbit, -1
|
|
|
|
ret i32 %ret
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @shl_add_nsw_nuw(i32 %NBits) {
|
|
|
|
; CHECK-LABEL: @shl_add_nsw_nuw(
|
[InstSimplify] add nuw %x, -1 -> -1 fold.
Summary:
`%ret = add nuw i8 %x, C`
From [[ https://llvm.org/docs/LangRef.html#add-instruction | langref ]]:
nuw and nsw stand for “No Unsigned Wrap” and “No Signed Wrap”,
respectively. If the nuw and/or nsw keywords are present,
the result value of the add is a poison value if unsigned
and/or signed overflow, respectively, occurs.
So if `C` is `-1`, `%x` can only be `0`, and the result is always `-1`.
I'm not sure we want to use `KnownBits`/`LVI` here, because there is
exactly one possible value (all bits set, `-1`), so some other pass
should take care of replacing the known-all-ones with constant `-1`.
The `test/Transforms/InstCombine/set-lowbits-mask-canonicalize.ll` change *is* confusing.
What happening is, before this: (omitting `nuw` for simplicity)
1. First, InstCombine D47428/rL334127 folds `shl i32 1, %NBits`) to `shl nuw i32 -1, %NBits`
2. Then, InstSimplify D47883/rL334222 folds `shl nuw i32 -1, %NBits` to `-1`,
3. `-1` is inverted to `0`.
But now:
1. *This* InstSimplify fold `%ret = add nuw i32 %setbit, -1` -> `-1` happens first,
before InstCombine D47428/rL334127 fold could happen.
Thus we now end up with the opposite constant,
and it is all good: https://rise4fun.com/Alive/OA9
https://rise4fun.com/Alive/sldC
Was mentioned in D47428 review.
Follow-up for D47883.
Reviewers: spatel, craig.topper
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D47908
llvm-svn: 334298
2018-06-08 23:44:47 +08:00
|
|
|
; CHECK-NEXT: ret i32 -1
|
2018-06-07 03:38:21 +08:00
|
|
|
;
|
|
|
|
%setbit = shl i32 1, %NBits
|
|
|
|
%ret = add nuw nsw i32 %setbit, -1
|
|
|
|
ret i32 %ret
|
|
|
|
}
|
|
|
|
|
|
|
|
; shl is nsw
|
|
|
|
|
|
|
|
define i32 @shl_nsw_add(i32 %NBits) {
|
|
|
|
; CHECK-LABEL: @shl_nsw_add(
|
[InstCombine] PR37603: low bit mask canonicalization
Summary:
This is [[ https://bugs.llvm.org/show_bug.cgi?id=37603 | PR37603 ]].
https://godbolt.org/g/VCMNpS
https://rise4fun.com/Alive/idM
When doing bit manipulations, it is quite common to calculate some bit mask,
and apply it to some value via `and`.
The typical C code looks like:
```
int mask_signed_add(int nbits) {
return (1 << nbits) - 1;
}
```
which is translated into (with `-O3`)
```
define dso_local i32 @mask_signed_add(int)(i32) local_unnamed_addr #0 {
%2 = shl i32 1, %0
%3 = add nsw i32 %2, -1
ret i32 %3
}
```
But there is a second, less readable variant:
```
int mask_signed_xor(int nbits) {
return ~(-(1 << nbits));
}
```
which is translated into (with `-O3`)
```
define dso_local i32 @mask_signed_xor(int)(i32) local_unnamed_addr #0 {
%2 = shl i32 -1, %0
%3 = xor i32 %2, -1
ret i32 %3
}
```
Since we created such a mask, it is quite likely that we will use it in `and` next.
And then we may get rid of `not` op by folding into `andn`.
But now that i have actually looked:
https://godbolt.org/g/VTUDmU
_some_ backend changes will be needed too.
We clearly loose `bzhi` recognition.
Reviewers: spatel, craig.topper, RKSimon
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D47428
llvm-svn: 334127
2018-06-07 03:38:27 +08:00
|
|
|
; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
|
|
|
|
; CHECK-NEXT: [[RET:%.*]] = xor i32 [[NOTMASK]], -1
|
2018-06-07 03:38:21 +08:00
|
|
|
; CHECK-NEXT: ret i32 [[RET]]
|
|
|
|
;
|
|
|
|
%setbit = shl nsw i32 1, %NBits
|
|
|
|
%ret = add i32 %setbit, -1
|
|
|
|
ret i32 %ret
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @shl_nsw_add_nsw(i32 %NBits) {
|
|
|
|
; CHECK-LABEL: @shl_nsw_add_nsw(
|
[InstCombine] PR37603: low bit mask canonicalization
Summary:
This is [[ https://bugs.llvm.org/show_bug.cgi?id=37603 | PR37603 ]].
https://godbolt.org/g/VCMNpS
https://rise4fun.com/Alive/idM
When doing bit manipulations, it is quite common to calculate some bit mask,
and apply it to some value via `and`.
The typical C code looks like:
```
int mask_signed_add(int nbits) {
return (1 << nbits) - 1;
}
```
which is translated into (with `-O3`)
```
define dso_local i32 @mask_signed_add(int)(i32) local_unnamed_addr #0 {
%2 = shl i32 1, %0
%3 = add nsw i32 %2, -1
ret i32 %3
}
```
But there is a second, less readable variant:
```
int mask_signed_xor(int nbits) {
return ~(-(1 << nbits));
}
```
which is translated into (with `-O3`)
```
define dso_local i32 @mask_signed_xor(int)(i32) local_unnamed_addr #0 {
%2 = shl i32 -1, %0
%3 = xor i32 %2, -1
ret i32 %3
}
```
Since we created such a mask, it is quite likely that we will use it in `and` next.
And then we may get rid of `not` op by folding into `andn`.
But now that i have actually looked:
https://godbolt.org/g/VTUDmU
_some_ backend changes will be needed too.
We clearly loose `bzhi` recognition.
Reviewers: spatel, craig.topper, RKSimon
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D47428
llvm-svn: 334127
2018-06-07 03:38:27 +08:00
|
|
|
; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
|
|
|
|
; CHECK-NEXT: [[RET:%.*]] = xor i32 [[NOTMASK]], -1
|
2018-06-07 03:38:21 +08:00
|
|
|
; CHECK-NEXT: ret i32 [[RET]]
|
|
|
|
;
|
|
|
|
%setbit = shl nsw i32 1, %NBits
|
|
|
|
%ret = add nsw i32 %setbit, -1
|
|
|
|
ret i32 %ret
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @shl_nsw_add_nuw(i32 %NBits) {
|
|
|
|
; CHECK-LABEL: @shl_nsw_add_nuw(
|
[InstSimplify] add nuw %x, -1 -> -1 fold.
Summary:
`%ret = add nuw i8 %x, C`
From [[ https://llvm.org/docs/LangRef.html#add-instruction | langref ]]:
nuw and nsw stand for “No Unsigned Wrap” and “No Signed Wrap”,
respectively. If the nuw and/or nsw keywords are present,
the result value of the add is a poison value if unsigned
and/or signed overflow, respectively, occurs.
So if `C` is `-1`, `%x` can only be `0`, and the result is always `-1`.
I'm not sure we want to use `KnownBits`/`LVI` here, because there is
exactly one possible value (all bits set, `-1`), so some other pass
should take care of replacing the known-all-ones with constant `-1`.
The `test/Transforms/InstCombine/set-lowbits-mask-canonicalize.ll` change *is* confusing.
What happening is, before this: (omitting `nuw` for simplicity)
1. First, InstCombine D47428/rL334127 folds `shl i32 1, %NBits`) to `shl nuw i32 -1, %NBits`
2. Then, InstSimplify D47883/rL334222 folds `shl nuw i32 -1, %NBits` to `-1`,
3. `-1` is inverted to `0`.
But now:
1. *This* InstSimplify fold `%ret = add nuw i32 %setbit, -1` -> `-1` happens first,
before InstCombine D47428/rL334127 fold could happen.
Thus we now end up with the opposite constant,
and it is all good: https://rise4fun.com/Alive/OA9
https://rise4fun.com/Alive/sldC
Was mentioned in D47428 review.
Follow-up for D47883.
Reviewers: spatel, craig.topper
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D47908
llvm-svn: 334298
2018-06-08 23:44:47 +08:00
|
|
|
; CHECK-NEXT: ret i32 -1
|
2018-06-07 03:38:21 +08:00
|
|
|
;
|
|
|
|
%setbit = shl nsw i32 1, %NBits
|
|
|
|
%ret = add nuw i32 %setbit, -1
|
|
|
|
ret i32 %ret
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @shl_nsw_add_nsw_nuw(i32 %NBits) {
|
|
|
|
; CHECK-LABEL: @shl_nsw_add_nsw_nuw(
|
[InstSimplify] add nuw %x, -1 -> -1 fold.
Summary:
`%ret = add nuw i8 %x, C`
From [[ https://llvm.org/docs/LangRef.html#add-instruction | langref ]]:
nuw and nsw stand for “No Unsigned Wrap” and “No Signed Wrap”,
respectively. If the nuw and/or nsw keywords are present,
the result value of the add is a poison value if unsigned
and/or signed overflow, respectively, occurs.
So if `C` is `-1`, `%x` can only be `0`, and the result is always `-1`.
I'm not sure we want to use `KnownBits`/`LVI` here, because there is
exactly one possible value (all bits set, `-1`), so some other pass
should take care of replacing the known-all-ones with constant `-1`.
The `test/Transforms/InstCombine/set-lowbits-mask-canonicalize.ll` change *is* confusing.
What happening is, before this: (omitting `nuw` for simplicity)
1. First, InstCombine D47428/rL334127 folds `shl i32 1, %NBits`) to `shl nuw i32 -1, %NBits`
2. Then, InstSimplify D47883/rL334222 folds `shl nuw i32 -1, %NBits` to `-1`,
3. `-1` is inverted to `0`.
But now:
1. *This* InstSimplify fold `%ret = add nuw i32 %setbit, -1` -> `-1` happens first,
before InstCombine D47428/rL334127 fold could happen.
Thus we now end up with the opposite constant,
and it is all good: https://rise4fun.com/Alive/OA9
https://rise4fun.com/Alive/sldC
Was mentioned in D47428 review.
Follow-up for D47883.
Reviewers: spatel, craig.topper
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D47908
llvm-svn: 334298
2018-06-08 23:44:47 +08:00
|
|
|
; CHECK-NEXT: ret i32 -1
|
2018-06-07 03:38:21 +08:00
|
|
|
;
|
|
|
|
%setbit = shl nsw i32 1, %NBits
|
|
|
|
%ret = add nuw nsw i32 %setbit, -1
|
|
|
|
ret i32 %ret
|
|
|
|
}
|
|
|
|
|
|
|
|
; shl is nuw
|
|
|
|
|
|
|
|
define i32 @shl_nuw_add(i32 %NBits) {
|
|
|
|
; CHECK-LABEL: @shl_nuw_add(
|
[InstCombine] PR37603: low bit mask canonicalization
Summary:
This is [[ https://bugs.llvm.org/show_bug.cgi?id=37603 | PR37603 ]].
https://godbolt.org/g/VCMNpS
https://rise4fun.com/Alive/idM
When doing bit manipulations, it is quite common to calculate some bit mask,
and apply it to some value via `and`.
The typical C code looks like:
```
int mask_signed_add(int nbits) {
return (1 << nbits) - 1;
}
```
which is translated into (with `-O3`)
```
define dso_local i32 @mask_signed_add(int)(i32) local_unnamed_addr #0 {
%2 = shl i32 1, %0
%3 = add nsw i32 %2, -1
ret i32 %3
}
```
But there is a second, less readable variant:
```
int mask_signed_xor(int nbits) {
return ~(-(1 << nbits));
}
```
which is translated into (with `-O3`)
```
define dso_local i32 @mask_signed_xor(int)(i32) local_unnamed_addr #0 {
%2 = shl i32 -1, %0
%3 = xor i32 %2, -1
ret i32 %3
}
```
Since we created such a mask, it is quite likely that we will use it in `and` next.
And then we may get rid of `not` op by folding into `andn`.
But now that i have actually looked:
https://godbolt.org/g/VTUDmU
_some_ backend changes will be needed too.
We clearly loose `bzhi` recognition.
Reviewers: spatel, craig.topper, RKSimon
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D47428
llvm-svn: 334127
2018-06-07 03:38:27 +08:00
|
|
|
; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
|
|
|
|
; CHECK-NEXT: [[RET:%.*]] = xor i32 [[NOTMASK]], -1
|
2018-06-07 03:38:21 +08:00
|
|
|
; CHECK-NEXT: ret i32 [[RET]]
|
|
|
|
;
|
|
|
|
%setbit = shl nuw i32 1, %NBits
|
|
|
|
%ret = add i32 %setbit, -1
|
|
|
|
ret i32 %ret
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @shl_nuw_add_nsw(i32 %NBits) {
|
|
|
|
; CHECK-LABEL: @shl_nuw_add_nsw(
|
[InstCombine] PR37603: low bit mask canonicalization
Summary:
This is [[ https://bugs.llvm.org/show_bug.cgi?id=37603 | PR37603 ]].
https://godbolt.org/g/VCMNpS
https://rise4fun.com/Alive/idM
When doing bit manipulations, it is quite common to calculate some bit mask,
and apply it to some value via `and`.
The typical C code looks like:
```
int mask_signed_add(int nbits) {
return (1 << nbits) - 1;
}
```
which is translated into (with `-O3`)
```
define dso_local i32 @mask_signed_add(int)(i32) local_unnamed_addr #0 {
%2 = shl i32 1, %0
%3 = add nsw i32 %2, -1
ret i32 %3
}
```
But there is a second, less readable variant:
```
int mask_signed_xor(int nbits) {
return ~(-(1 << nbits));
}
```
which is translated into (with `-O3`)
```
define dso_local i32 @mask_signed_xor(int)(i32) local_unnamed_addr #0 {
%2 = shl i32 -1, %0
%3 = xor i32 %2, -1
ret i32 %3
}
```
Since we created such a mask, it is quite likely that we will use it in `and` next.
And then we may get rid of `not` op by folding into `andn`.
But now that i have actually looked:
https://godbolt.org/g/VTUDmU
_some_ backend changes will be needed too.
We clearly loose `bzhi` recognition.
Reviewers: spatel, craig.topper, RKSimon
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D47428
llvm-svn: 334127
2018-06-07 03:38:27 +08:00
|
|
|
; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
|
|
|
|
; CHECK-NEXT: [[RET:%.*]] = xor i32 [[NOTMASK]], -1
|
2018-06-07 03:38:21 +08:00
|
|
|
; CHECK-NEXT: ret i32 [[RET]]
|
|
|
|
;
|
|
|
|
%setbit = shl nuw i32 1, %NBits
|
|
|
|
%ret = add nsw i32 %setbit, -1
|
|
|
|
ret i32 %ret
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @shl_nuw_add_nuw(i32 %NBits) {
|
|
|
|
; CHECK-LABEL: @shl_nuw_add_nuw(
|
[InstSimplify] add nuw %x, -1 -> -1 fold.
Summary:
`%ret = add nuw i8 %x, C`
From [[ https://llvm.org/docs/LangRef.html#add-instruction | langref ]]:
nuw and nsw stand for “No Unsigned Wrap” and “No Signed Wrap”,
respectively. If the nuw and/or nsw keywords are present,
the result value of the add is a poison value if unsigned
and/or signed overflow, respectively, occurs.
So if `C` is `-1`, `%x` can only be `0`, and the result is always `-1`.
I'm not sure we want to use `KnownBits`/`LVI` here, because there is
exactly one possible value (all bits set, `-1`), so some other pass
should take care of replacing the known-all-ones with constant `-1`.
The `test/Transforms/InstCombine/set-lowbits-mask-canonicalize.ll` change *is* confusing.
What happening is, before this: (omitting `nuw` for simplicity)
1. First, InstCombine D47428/rL334127 folds `shl i32 1, %NBits`) to `shl nuw i32 -1, %NBits`
2. Then, InstSimplify D47883/rL334222 folds `shl nuw i32 -1, %NBits` to `-1`,
3. `-1` is inverted to `0`.
But now:
1. *This* InstSimplify fold `%ret = add nuw i32 %setbit, -1` -> `-1` happens first,
before InstCombine D47428/rL334127 fold could happen.
Thus we now end up with the opposite constant,
and it is all good: https://rise4fun.com/Alive/OA9
https://rise4fun.com/Alive/sldC
Was mentioned in D47428 review.
Follow-up for D47883.
Reviewers: spatel, craig.topper
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D47908
llvm-svn: 334298
2018-06-08 23:44:47 +08:00
|
|
|
; CHECK-NEXT: ret i32 -1
|
2018-06-07 03:38:21 +08:00
|
|
|
;
|
|
|
|
%setbit = shl nuw i32 1, %NBits
|
|
|
|
%ret = add nuw i32 %setbit, -1
|
|
|
|
ret i32 %ret
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @shl_nuw_add_nsw_nuw(i32 %NBits) {
|
|
|
|
; CHECK-LABEL: @shl_nuw_add_nsw_nuw(
|
[InstSimplify] add nuw %x, -1 -> -1 fold.
Summary:
`%ret = add nuw i8 %x, C`
From [[ https://llvm.org/docs/LangRef.html#add-instruction | langref ]]:
nuw and nsw stand for “No Unsigned Wrap” and “No Signed Wrap”,
respectively. If the nuw and/or nsw keywords are present,
the result value of the add is a poison value if unsigned
and/or signed overflow, respectively, occurs.
So if `C` is `-1`, `%x` can only be `0`, and the result is always `-1`.
I'm not sure we want to use `KnownBits`/`LVI` here, because there is
exactly one possible value (all bits set, `-1`), so some other pass
should take care of replacing the known-all-ones with constant `-1`.
The `test/Transforms/InstCombine/set-lowbits-mask-canonicalize.ll` change *is* confusing.
What happening is, before this: (omitting `nuw` for simplicity)
1. First, InstCombine D47428/rL334127 folds `shl i32 1, %NBits`) to `shl nuw i32 -1, %NBits`
2. Then, InstSimplify D47883/rL334222 folds `shl nuw i32 -1, %NBits` to `-1`,
3. `-1` is inverted to `0`.
But now:
1. *This* InstSimplify fold `%ret = add nuw i32 %setbit, -1` -> `-1` happens first,
before InstCombine D47428/rL334127 fold could happen.
Thus we now end up with the opposite constant,
and it is all good: https://rise4fun.com/Alive/OA9
https://rise4fun.com/Alive/sldC
Was mentioned in D47428 review.
Follow-up for D47883.
Reviewers: spatel, craig.topper
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D47908
llvm-svn: 334298
2018-06-08 23:44:47 +08:00
|
|
|
; CHECK-NEXT: ret i32 -1
|
2018-06-07 03:38:21 +08:00
|
|
|
;
|
|
|
|
%setbit = shl nuw i32 1, %NBits
|
|
|
|
%ret = add nuw nsw i32 %setbit, -1
|
|
|
|
ret i32 %ret
|
|
|
|
}
|
|
|
|
|
|
|
|
; shl is nuw nsw
|
|
|
|
|
|
|
|
define i32 @shl_nsw_nuw_add(i32 %NBits) {
|
|
|
|
; CHECK-LABEL: @shl_nsw_nuw_add(
|
[InstCombine] PR37603: low bit mask canonicalization
Summary:
This is [[ https://bugs.llvm.org/show_bug.cgi?id=37603 | PR37603 ]].
https://godbolt.org/g/VCMNpS
https://rise4fun.com/Alive/idM
When doing bit manipulations, it is quite common to calculate some bit mask,
and apply it to some value via `and`.
The typical C code looks like:
```
int mask_signed_add(int nbits) {
return (1 << nbits) - 1;
}
```
which is translated into (with `-O3`)
```
define dso_local i32 @mask_signed_add(int)(i32) local_unnamed_addr #0 {
%2 = shl i32 1, %0
%3 = add nsw i32 %2, -1
ret i32 %3
}
```
But there is a second, less readable variant:
```
int mask_signed_xor(int nbits) {
return ~(-(1 << nbits));
}
```
which is translated into (with `-O3`)
```
define dso_local i32 @mask_signed_xor(int)(i32) local_unnamed_addr #0 {
%2 = shl i32 -1, %0
%3 = xor i32 %2, -1
ret i32 %3
}
```
Since we created such a mask, it is quite likely that we will use it in `and` next.
And then we may get rid of `not` op by folding into `andn`.
But now that i have actually looked:
https://godbolt.org/g/VTUDmU
_some_ backend changes will be needed too.
We clearly loose `bzhi` recognition.
Reviewers: spatel, craig.topper, RKSimon
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D47428
llvm-svn: 334127
2018-06-07 03:38:27 +08:00
|
|
|
; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
|
|
|
|
; CHECK-NEXT: [[RET:%.*]] = xor i32 [[NOTMASK]], -1
|
2018-06-07 03:38:21 +08:00
|
|
|
; CHECK-NEXT: ret i32 [[RET]]
|
|
|
|
;
|
|
|
|
%setbit = shl nuw nsw i32 1, %NBits
|
|
|
|
%ret = add i32 %setbit, -1
|
|
|
|
ret i32 %ret
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @shl_nsw_nuw_add_nsw(i32 %NBits) {
|
|
|
|
; CHECK-LABEL: @shl_nsw_nuw_add_nsw(
|
[InstCombine] PR37603: low bit mask canonicalization
Summary:
This is [[ https://bugs.llvm.org/show_bug.cgi?id=37603 | PR37603 ]].
https://godbolt.org/g/VCMNpS
https://rise4fun.com/Alive/idM
When doing bit manipulations, it is quite common to calculate some bit mask,
and apply it to some value via `and`.
The typical C code looks like:
```
int mask_signed_add(int nbits) {
return (1 << nbits) - 1;
}
```
which is translated into (with `-O3`)
```
define dso_local i32 @mask_signed_add(int)(i32) local_unnamed_addr #0 {
%2 = shl i32 1, %0
%3 = add nsw i32 %2, -1
ret i32 %3
}
```
But there is a second, less readable variant:
```
int mask_signed_xor(int nbits) {
return ~(-(1 << nbits));
}
```
which is translated into (with `-O3`)
```
define dso_local i32 @mask_signed_xor(int)(i32) local_unnamed_addr #0 {
%2 = shl i32 -1, %0
%3 = xor i32 %2, -1
ret i32 %3
}
```
Since we created such a mask, it is quite likely that we will use it in `and` next.
And then we may get rid of `not` op by folding into `andn`.
But now that i have actually looked:
https://godbolt.org/g/VTUDmU
_some_ backend changes will be needed too.
We clearly loose `bzhi` recognition.
Reviewers: spatel, craig.topper, RKSimon
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D47428
llvm-svn: 334127
2018-06-07 03:38:27 +08:00
|
|
|
; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw i32 -1, [[NBITS:%.*]]
|
|
|
|
; CHECK-NEXT: [[RET:%.*]] = xor i32 [[NOTMASK]], -1
|
2018-06-07 03:38:21 +08:00
|
|
|
; CHECK-NEXT: ret i32 [[RET]]
|
|
|
|
;
|
|
|
|
%setbit = shl nuw nsw i32 1, %NBits
|
|
|
|
%ret = add nsw i32 %setbit, -1
|
|
|
|
ret i32 %ret
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @shl_nsw_nuw_add_nuw(i32 %NBits) {
|
|
|
|
; CHECK-LABEL: @shl_nsw_nuw_add_nuw(
|
[InstSimplify] add nuw %x, -1 -> -1 fold.
Summary:
`%ret = add nuw i8 %x, C`
From [[ https://llvm.org/docs/LangRef.html#add-instruction | langref ]]:
nuw and nsw stand for “No Unsigned Wrap” and “No Signed Wrap”,
respectively. If the nuw and/or nsw keywords are present,
the result value of the add is a poison value if unsigned
and/or signed overflow, respectively, occurs.
So if `C` is `-1`, `%x` can only be `0`, and the result is always `-1`.
I'm not sure we want to use `KnownBits`/`LVI` here, because there is
exactly one possible value (all bits set, `-1`), so some other pass
should take care of replacing the known-all-ones with constant `-1`.
The `test/Transforms/InstCombine/set-lowbits-mask-canonicalize.ll` change *is* confusing.
What happening is, before this: (omitting `nuw` for simplicity)
1. First, InstCombine D47428/rL334127 folds `shl i32 1, %NBits`) to `shl nuw i32 -1, %NBits`
2. Then, InstSimplify D47883/rL334222 folds `shl nuw i32 -1, %NBits` to `-1`,
3. `-1` is inverted to `0`.
But now:
1. *This* InstSimplify fold `%ret = add nuw i32 %setbit, -1` -> `-1` happens first,
before InstCombine D47428/rL334127 fold could happen.
Thus we now end up with the opposite constant,
and it is all good: https://rise4fun.com/Alive/OA9
https://rise4fun.com/Alive/sldC
Was mentioned in D47428 review.
Follow-up for D47883.
Reviewers: spatel, craig.topper
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D47908
llvm-svn: 334298
2018-06-08 23:44:47 +08:00
|
|
|
; CHECK-NEXT: ret i32 -1
|
2018-06-07 03:38:21 +08:00
|
|
|
;
|
|
|
|
%setbit = shl nuw nsw i32 1, %NBits
|
|
|
|
%ret = add nuw i32 %setbit, -1
|
|
|
|
ret i32 %ret
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @shl_nsw_nuw_add_nsw_nuw(i32 %NBits) {
|
|
|
|
; CHECK-LABEL: @shl_nsw_nuw_add_nsw_nuw(
|
[InstSimplify] add nuw %x, -1 -> -1 fold.
Summary:
`%ret = add nuw i8 %x, C`
From [[ https://llvm.org/docs/LangRef.html#add-instruction | langref ]]:
nuw and nsw stand for “No Unsigned Wrap” and “No Signed Wrap”,
respectively. If the nuw and/or nsw keywords are present,
the result value of the add is a poison value if unsigned
and/or signed overflow, respectively, occurs.
So if `C` is `-1`, `%x` can only be `0`, and the result is always `-1`.
I'm not sure we want to use `KnownBits`/`LVI` here, because there is
exactly one possible value (all bits set, `-1`), so some other pass
should take care of replacing the known-all-ones with constant `-1`.
The `test/Transforms/InstCombine/set-lowbits-mask-canonicalize.ll` change *is* confusing.
What happening is, before this: (omitting `nuw` for simplicity)
1. First, InstCombine D47428/rL334127 folds `shl i32 1, %NBits`) to `shl nuw i32 -1, %NBits`
2. Then, InstSimplify D47883/rL334222 folds `shl nuw i32 -1, %NBits` to `-1`,
3. `-1` is inverted to `0`.
But now:
1. *This* InstSimplify fold `%ret = add nuw i32 %setbit, -1` -> `-1` happens first,
before InstCombine D47428/rL334127 fold could happen.
Thus we now end up with the opposite constant,
and it is all good: https://rise4fun.com/Alive/OA9
https://rise4fun.com/Alive/sldC
Was mentioned in D47428 review.
Follow-up for D47883.
Reviewers: spatel, craig.topper
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D47908
llvm-svn: 334298
2018-06-08 23:44:47 +08:00
|
|
|
; CHECK-NEXT: ret i32 -1
|
2018-06-07 03:38:21 +08:00
|
|
|
;
|
|
|
|
%setbit = shl nuw nsw i32 1, %NBits
|
|
|
|
%ret = add nuw nsw i32 %setbit, -1
|
|
|
|
ret i32 %ret
|
|
|
|
}
|
|
|
|
|
|
|
|
; ============================================================================ ;
|
|
|
|
; Vectors
|
|
|
|
; ============================================================================ ;
|
|
|
|
|
|
|
|
define <2 x i32> @shl_add_vec(<2 x i32> %NBits) {
|
|
|
|
; CHECK-LABEL: @shl_add_vec(
|
[InstCombine] PR37603: low bit mask canonicalization
Summary:
This is [[ https://bugs.llvm.org/show_bug.cgi?id=37603 | PR37603 ]].
https://godbolt.org/g/VCMNpS
https://rise4fun.com/Alive/idM
When doing bit manipulations, it is quite common to calculate some bit mask,
and apply it to some value via `and`.
The typical C code looks like:
```
int mask_signed_add(int nbits) {
return (1 << nbits) - 1;
}
```
which is translated into (with `-O3`)
```
define dso_local i32 @mask_signed_add(int)(i32) local_unnamed_addr #0 {
%2 = shl i32 1, %0
%3 = add nsw i32 %2, -1
ret i32 %3
}
```
But there is a second, less readable variant:
```
int mask_signed_xor(int nbits) {
return ~(-(1 << nbits));
}
```
which is translated into (with `-O3`)
```
define dso_local i32 @mask_signed_xor(int)(i32) local_unnamed_addr #0 {
%2 = shl i32 -1, %0
%3 = xor i32 %2, -1
ret i32 %3
}
```
Since we created such a mask, it is quite likely that we will use it in `and` next.
And then we may get rid of `not` op by folding into `andn`.
But now that i have actually looked:
https://godbolt.org/g/VTUDmU
_some_ backend changes will be needed too.
We clearly loose `bzhi` recognition.
Reviewers: spatel, craig.topper, RKSimon
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D47428
llvm-svn: 334127
2018-06-07 03:38:27 +08:00
|
|
|
; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw <2 x i32> <i32 -1, i32 -1>, [[NBITS:%.*]]
|
|
|
|
; CHECK-NEXT: [[RET:%.*]] = xor <2 x i32> [[NOTMASK]], <i32 -1, i32 -1>
|
2018-06-07 03:38:21 +08:00
|
|
|
; CHECK-NEXT: ret <2 x i32> [[RET]]
|
|
|
|
;
|
|
|
|
%setbit = shl <2 x i32> <i32 1, i32 1>, %NBits
|
|
|
|
%ret = add <2 x i32> %setbit, <i32 -1, i32 -1>
|
|
|
|
ret <2 x i32> %ret
|
|
|
|
}
|
|
|
|
|
|
|
|
define <3 x i32> @shl_add_vec_undef0(<3 x i32> %NBits) {
|
|
|
|
; CHECK-LABEL: @shl_add_vec_undef0(
|
[InstCombine] PR37603: low bit mask canonicalization
Summary:
This is [[ https://bugs.llvm.org/show_bug.cgi?id=37603 | PR37603 ]].
https://godbolt.org/g/VCMNpS
https://rise4fun.com/Alive/idM
When doing bit manipulations, it is quite common to calculate some bit mask,
and apply it to some value via `and`.
The typical C code looks like:
```
int mask_signed_add(int nbits) {
return (1 << nbits) - 1;
}
```
which is translated into (with `-O3`)
```
define dso_local i32 @mask_signed_add(int)(i32) local_unnamed_addr #0 {
%2 = shl i32 1, %0
%3 = add nsw i32 %2, -1
ret i32 %3
}
```
But there is a second, less readable variant:
```
int mask_signed_xor(int nbits) {
return ~(-(1 << nbits));
}
```
which is translated into (with `-O3`)
```
define dso_local i32 @mask_signed_xor(int)(i32) local_unnamed_addr #0 {
%2 = shl i32 -1, %0
%3 = xor i32 %2, -1
ret i32 %3
}
```
Since we created such a mask, it is quite likely that we will use it in `and` next.
And then we may get rid of `not` op by folding into `andn`.
But now that i have actually looked:
https://godbolt.org/g/VTUDmU
_some_ backend changes will be needed too.
We clearly loose `bzhi` recognition.
Reviewers: spatel, craig.topper, RKSimon
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D47428
llvm-svn: 334127
2018-06-07 03:38:27 +08:00
|
|
|
; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw <3 x i32> <i32 -1, i32 -1, i32 -1>, [[NBITS:%.*]]
|
|
|
|
; CHECK-NEXT: [[RET:%.*]] = xor <3 x i32> [[NOTMASK]], <i32 -1, i32 -1, i32 -1>
|
2018-06-07 03:38:21 +08:00
|
|
|
; CHECK-NEXT: ret <3 x i32> [[RET]]
|
|
|
|
;
|
|
|
|
%setbit = shl <3 x i32> <i32 1, i32 undef, i32 1>, %NBits
|
|
|
|
%ret = add <3 x i32> %setbit, <i32 -1, i32 -1, i32 -1>
|
|
|
|
ret <3 x i32> %ret
|
|
|
|
}
|
|
|
|
|
|
|
|
define <3 x i32> @shl_add_vec_undef1(<3 x i32> %NBits) {
|
|
|
|
; CHECK-LABEL: @shl_add_vec_undef1(
|
[InstCombine] PR37603: low bit mask canonicalization
Summary:
This is [[ https://bugs.llvm.org/show_bug.cgi?id=37603 | PR37603 ]].
https://godbolt.org/g/VCMNpS
https://rise4fun.com/Alive/idM
When doing bit manipulations, it is quite common to calculate some bit mask,
and apply it to some value via `and`.
The typical C code looks like:
```
int mask_signed_add(int nbits) {
return (1 << nbits) - 1;
}
```
which is translated into (with `-O3`)
```
define dso_local i32 @mask_signed_add(int)(i32) local_unnamed_addr #0 {
%2 = shl i32 1, %0
%3 = add nsw i32 %2, -1
ret i32 %3
}
```
But there is a second, less readable variant:
```
int mask_signed_xor(int nbits) {
return ~(-(1 << nbits));
}
```
which is translated into (with `-O3`)
```
define dso_local i32 @mask_signed_xor(int)(i32) local_unnamed_addr #0 {
%2 = shl i32 -1, %0
%3 = xor i32 %2, -1
ret i32 %3
}
```
Since we created such a mask, it is quite likely that we will use it in `and` next.
And then we may get rid of `not` op by folding into `andn`.
But now that i have actually looked:
https://godbolt.org/g/VTUDmU
_some_ backend changes will be needed too.
We clearly loose `bzhi` recognition.
Reviewers: spatel, craig.topper, RKSimon
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D47428
llvm-svn: 334127
2018-06-07 03:38:27 +08:00
|
|
|
; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw <3 x i32> <i32 -1, i32 -1, i32 -1>, [[NBITS:%.*]]
|
|
|
|
; CHECK-NEXT: [[RET:%.*]] = xor <3 x i32> [[NOTMASK]], <i32 -1, i32 -1, i32 -1>
|
2018-06-07 03:38:21 +08:00
|
|
|
; CHECK-NEXT: ret <3 x i32> [[RET]]
|
|
|
|
;
|
|
|
|
%setbit = shl <3 x i32> <i32 1, i32 1, i32 1>, %NBits
|
|
|
|
%ret = add <3 x i32> %setbit, <i32 -1, i32 undef, i32 -1>
|
|
|
|
ret <3 x i32> %ret
|
|
|
|
}
|
|
|
|
|
|
|
|
define <3 x i32> @shl_add_vec_undef2(<3 x i32> %NBits) {
|
|
|
|
; CHECK-LABEL: @shl_add_vec_undef2(
|
[InstCombine] PR37603: low bit mask canonicalization
Summary:
This is [[ https://bugs.llvm.org/show_bug.cgi?id=37603 | PR37603 ]].
https://godbolt.org/g/VCMNpS
https://rise4fun.com/Alive/idM
When doing bit manipulations, it is quite common to calculate some bit mask,
and apply it to some value via `and`.
The typical C code looks like:
```
int mask_signed_add(int nbits) {
return (1 << nbits) - 1;
}
```
which is translated into (with `-O3`)
```
define dso_local i32 @mask_signed_add(int)(i32) local_unnamed_addr #0 {
%2 = shl i32 1, %0
%3 = add nsw i32 %2, -1
ret i32 %3
}
```
But there is a second, less readable variant:
```
int mask_signed_xor(int nbits) {
return ~(-(1 << nbits));
}
```
which is translated into (with `-O3`)
```
define dso_local i32 @mask_signed_xor(int)(i32) local_unnamed_addr #0 {
%2 = shl i32 -1, %0
%3 = xor i32 %2, -1
ret i32 %3
}
```
Since we created such a mask, it is quite likely that we will use it in `and` next.
And then we may get rid of `not` op by folding into `andn`.
But now that i have actually looked:
https://godbolt.org/g/VTUDmU
_some_ backend changes will be needed too.
We clearly loose `bzhi` recognition.
Reviewers: spatel, craig.topper, RKSimon
Reviewed By: spatel
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D47428
llvm-svn: 334127
2018-06-07 03:38:27 +08:00
|
|
|
; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw <3 x i32> <i32 -1, i32 -1, i32 -1>, [[NBITS:%.*]]
|
|
|
|
; CHECK-NEXT: [[RET:%.*]] = xor <3 x i32> [[NOTMASK]], <i32 -1, i32 -1, i32 -1>
|
2018-06-07 03:38:21 +08:00
|
|
|
; CHECK-NEXT: ret <3 x i32> [[RET]]
|
|
|
|
;
|
|
|
|
%setbit = shl <3 x i32> <i32 1, i32 undef, i32 1>, %NBits
|
|
|
|
%ret = add <3 x i32> %setbit, <i32 -1, i32 undef, i32 -1>
|
|
|
|
ret <3 x i32> %ret
|
|
|
|
}
|
|
|
|
|
|
|
|
; ============================================================================ ;
|
|
|
|
; Negative tests. Should not be folded.
|
|
|
|
; ============================================================================ ;
|
|
|
|
|
|
|
|
declare void @use32(i32)
|
|
|
|
|
|
|
|
; One use only.
|
|
|
|
define i32 @bad_oneuse0(i32 %NBits) {
|
|
|
|
; CHECK-LABEL: @bad_oneuse0(
|
|
|
|
; CHECK-NEXT: [[SETBIT:%.*]] = shl i32 1, [[NBITS:%.*]]
|
|
|
|
; CHECK-NEXT: call void @use32(i32 [[SETBIT]])
|
|
|
|
; CHECK-NEXT: [[RET:%.*]] = add i32 [[SETBIT]], -1
|
|
|
|
; CHECK-NEXT: ret i32 [[RET]]
|
|
|
|
;
|
|
|
|
%setbit = shl i32 1, %NBits
|
|
|
|
call void @use32(i32 %setbit)
|
|
|
|
%ret = add i32 %setbit, -1
|
|
|
|
ret i32 %ret
|
|
|
|
}
|
|
|
|
|
|
|
|
; shift base is not `1` constant
|
|
|
|
|
|
|
|
define i32 @bad_shl(i32 %base, i32 %NBits) {
|
|
|
|
; CHECK-LABEL: @bad_shl(
|
|
|
|
; CHECK-NEXT: [[SETBIT:%.*]] = shl i32 [[BASE:%.*]], [[NBITS:%.*]]
|
|
|
|
; CHECK-NEXT: [[RET:%.*]] = add i32 [[SETBIT]], -1
|
|
|
|
; CHECK-NEXT: ret i32 [[RET]]
|
|
|
|
;
|
|
|
|
%setbit = shl i32 %base, %NBits ; %base instead of 1
|
|
|
|
%ret = add i32 %setbit, -1
|
|
|
|
ret i32 %ret
|
|
|
|
}
|
|
|
|
|
|
|
|
; Second `add` operand is not `-1` constant
|
|
|
|
|
|
|
|
define i32 @bad_add0(i32 %NBits, i32 %addop2) {
|
|
|
|
; CHECK-LABEL: @bad_add0(
|
|
|
|
; CHECK-NEXT: [[SETBIT:%.*]] = shl i32 1, [[NBITS:%.*]]
|
|
|
|
; CHECK-NEXT: [[RET:%.*]] = add i32 [[SETBIT]], [[ADDOP2:%.*]]
|
|
|
|
; CHECK-NEXT: ret i32 [[RET]]
|
|
|
|
;
|
|
|
|
%setbit = shl i32 1, %NBits
|
|
|
|
%ret = add i32 %setbit, %addop2
|
|
|
|
ret i32 %ret
|
|
|
|
}
|
|
|
|
|
|
|
|
; Bad add constant
|
|
|
|
|
|
|
|
define i32 @bad_add1(i32 %NBits) {
|
|
|
|
; CHECK-LABEL: @bad_add1(
|
|
|
|
; CHECK-NEXT: [[SETBIT:%.*]] = shl i32 1, [[NBITS:%.*]]
|
|
|
|
; CHECK-NEXT: [[RET:%.*]] = add i32 [[SETBIT]], 1
|
|
|
|
; CHECK-NEXT: ret i32 [[RET]]
|
|
|
|
;
|
|
|
|
%setbit = shl i32 1, %NBits
|
|
|
|
%ret = add i32 %setbit, 1 ; not -1
|
|
|
|
ret i32 %ret
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @bad_add2(i32 %NBits) {
|
|
|
|
; CHECK-LABEL: @bad_add2(
|
|
|
|
; CHECK-NEXT: [[SETBIT:%.*]] = shl i32 1, [[NBITS:%.*]]
|
|
|
|
; CHECK-NEXT: [[RET:%.*]] = add i32 [[SETBIT]], -2
|
|
|
|
; CHECK-NEXT: ret i32 [[RET]]
|
|
|
|
;
|
|
|
|
%setbit = shl i32 1, %NBits
|
|
|
|
%ret = add i32 %setbit, -2 ; not -1
|
|
|
|
ret i32 %ret
|
|
|
|
}
|