2018-07-14 04:33:34 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
|
|
|
; RUN: opt < %s -instcombine -S | FileCheck %s
|
|
|
|
|
|
|
|
; https://bugs.llvm.org/show_bug.cgi?id=38149
|
|
|
|
|
|
|
|
; Pattern:
|
|
|
|
; ((%x << MaskedBits) a>> MaskedBits) == %x
|
|
|
|
; Should be transformed into:
|
|
|
|
; (add %x, (1 << (KeptBits-1))) u< (1 << KeptBits)
|
|
|
|
; Where KeptBits = bitwidth(%x) - MaskedBits
|
|
|
|
|
|
|
|
; ============================================================================ ;
|
|
|
|
; Basic positive tests
|
|
|
|
; ============================================================================ ;
|
|
|
|
|
|
|
|
define i1 @p0(i8 %x) {
|
|
|
|
; CHECK-LABEL: @p0(
|
[InstCombine] Re-commit: Fold 'check for [no] signed truncation' pattern
Summary:
[[ https://bugs.llvm.org/show_bug.cgi?id=38149 | PR38149 ]]
As discussed in https://reviews.llvm.org/D49179#1158957 and later,
the IR for 'check for [no] signed truncation' pattern can be improved:
https://rise4fun.com/Alive/gBf
^ that pattern will be produced by Implicit Integer Truncation sanitizer,
https://reviews.llvm.org/D48958 https://bugs.llvm.org/show_bug.cgi?id=21530
in signed case, therefore it is probably a good idea to improve it.
The DAGCombine will reverse this transform, see
https://reviews.llvm.org/D49266
This transform is surprisingly frustrating.
This does not deal with non-splat shift amounts, or with undef shift amounts.
I've outlined what i think the solution should be:
```
// Potential handling of non-splats: for each element:
// * if both are undef, replace with constant 0.
// Because (1<<0) is OK and is 1, and ((1<<0)>>1) is also OK and is 0.
// * if both are not undef, and are different, bailout.
// * else, only one is undef, then pick the non-undef one.
```
This is a re-commit, as the original patch, committed in rL337190
was reverted in rL337344 as it broke chromium build:
https://bugs.llvm.org/show_bug.cgi?id=38204 and
https://crbug.com/864832
Proofs that the fixed folds are ok: https://rise4fun.com/Alive/VYM
Differential Revision: https://reviews.llvm.org/D49320
llvm-svn: 337376
2018-07-18 18:55:17 +08:00
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X:%.*]], 4
|
|
|
|
; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], 8
|
2018-07-14 04:33:34 +08:00
|
|
|
; CHECK-NEXT: ret i1 [[TMP2]]
|
|
|
|
;
|
2018-07-18 16:15:13 +08:00
|
|
|
%tmp0 = shl i8 %x, 5
|
|
|
|
%tmp1 = ashr exact i8 %tmp0, 5
|
2018-07-14 04:33:34 +08:00
|
|
|
%tmp2 = icmp eq i8 %tmp1, %x
|
|
|
|
ret i1 %tmp2
|
|
|
|
}
|
|
|
|
|
2018-07-18 16:49:51 +08:00
|
|
|
; Big unusual bit width, https://bugs.llvm.org/show_bug.cgi?id=38204
|
|
|
|
define i1 @pb(i65 %x) {
|
|
|
|
; CHECK-LABEL: @pb(
|
[InstCombine] Re-commit: Fold 'check for [no] signed truncation' pattern
Summary:
[[ https://bugs.llvm.org/show_bug.cgi?id=38149 | PR38149 ]]
As discussed in https://reviews.llvm.org/D49179#1158957 and later,
the IR for 'check for [no] signed truncation' pattern can be improved:
https://rise4fun.com/Alive/gBf
^ that pattern will be produced by Implicit Integer Truncation sanitizer,
https://reviews.llvm.org/D48958 https://bugs.llvm.org/show_bug.cgi?id=21530
in signed case, therefore it is probably a good idea to improve it.
The DAGCombine will reverse this transform, see
https://reviews.llvm.org/D49266
This transform is surprisingly frustrating.
This does not deal with non-splat shift amounts, or with undef shift amounts.
I've outlined what i think the solution should be:
```
// Potential handling of non-splats: for each element:
// * if both are undef, replace with constant 0.
// Because (1<<0) is OK and is 1, and ((1<<0)>>1) is also OK and is 0.
// * if both are not undef, and are different, bailout.
// * else, only one is undef, then pick the non-undef one.
```
This is a re-commit, as the original patch, committed in rL337190
was reverted in rL337344 as it broke chromium build:
https://bugs.llvm.org/show_bug.cgi?id=38204 and
https://crbug.com/864832
Proofs that the fixed folds are ok: https://rise4fun.com/Alive/VYM
Differential Revision: https://reviews.llvm.org/D49320
llvm-svn: 337376
2018-07-18 18:55:17 +08:00
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add i65 [[X:%.*]], 9223372036854775808
|
|
|
|
; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i65 [[TMP1]], -1
|
2018-07-18 16:49:51 +08:00
|
|
|
; CHECK-NEXT: ret i1 [[TMP2]]
|
|
|
|
;
|
|
|
|
%tmp0 = shl i65 %x, 1
|
|
|
|
%tmp1 = ashr exact i65 %tmp0, 1
|
|
|
|
%tmp2 = icmp eq i65 %x, %tmp1
|
|
|
|
ret i1 %tmp2
|
|
|
|
}
|
|
|
|
|
2018-07-14 04:33:34 +08:00
|
|
|
; ============================================================================ ;
|
|
|
|
; Vector tests
|
|
|
|
; ============================================================================ ;
|
|
|
|
|
|
|
|
define <2 x i1> @p1_vec_splat(<2 x i8> %x) {
|
|
|
|
; CHECK-LABEL: @p1_vec_splat(
|
[InstCombine] Re-commit: Fold 'check for [no] signed truncation' pattern
Summary:
[[ https://bugs.llvm.org/show_bug.cgi?id=38149 | PR38149 ]]
As discussed in https://reviews.llvm.org/D49179#1158957 and later,
the IR for 'check for [no] signed truncation' pattern can be improved:
https://rise4fun.com/Alive/gBf
^ that pattern will be produced by Implicit Integer Truncation sanitizer,
https://reviews.llvm.org/D48958 https://bugs.llvm.org/show_bug.cgi?id=21530
in signed case, therefore it is probably a good idea to improve it.
The DAGCombine will reverse this transform, see
https://reviews.llvm.org/D49266
This transform is surprisingly frustrating.
This does not deal with non-splat shift amounts, or with undef shift amounts.
I've outlined what i think the solution should be:
```
// Potential handling of non-splats: for each element:
// * if both are undef, replace with constant 0.
// Because (1<<0) is OK and is 1, and ((1<<0)>>1) is also OK and is 0.
// * if both are not undef, and are different, bailout.
// * else, only one is undef, then pick the non-undef one.
```
This is a re-commit, as the original patch, committed in rL337190
was reverted in rL337344 as it broke chromium build:
https://bugs.llvm.org/show_bug.cgi?id=38204 and
https://crbug.com/864832
Proofs that the fixed folds are ok: https://rise4fun.com/Alive/VYM
Differential Revision: https://reviews.llvm.org/D49320
llvm-svn: 337376
2018-07-18 18:55:17 +08:00
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[X:%.*]], <i8 4, i8 4>
|
|
|
|
; CHECK-NEXT: [[TMP2:%.*]] = icmp ult <2 x i8> [[TMP1]], <i8 8, i8 8>
|
2018-07-14 04:33:34 +08:00
|
|
|
; CHECK-NEXT: ret <2 x i1> [[TMP2]]
|
|
|
|
;
|
2018-07-18 16:15:13 +08:00
|
|
|
%tmp0 = shl <2 x i8> %x, <i8 5, i8 5>
|
|
|
|
%tmp1 = ashr exact <2 x i8> %tmp0, <i8 5, i8 5>
|
2018-07-14 04:33:34 +08:00
|
|
|
%tmp2 = icmp eq <2 x i8> %tmp1, %x
|
|
|
|
ret <2 x i1> %tmp2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x i1> @p2_vec_nonsplat(<2 x i8> %x) {
|
|
|
|
; CHECK-LABEL: @p2_vec_nonsplat(
|
2018-07-18 16:15:13 +08:00
|
|
|
; CHECK-NEXT: [[TMP0:%.*]] = shl <2 x i8> [[X:%.*]], <i8 5, i8 6>
|
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = ashr exact <2 x i8> [[TMP0]], <i8 5, i8 6>
|
2018-07-14 04:33:34 +08:00
|
|
|
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq <2 x i8> [[TMP1]], [[X]]
|
|
|
|
; CHECK-NEXT: ret <2 x i1> [[TMP2]]
|
|
|
|
;
|
2018-07-18 16:15:13 +08:00
|
|
|
%tmp0 = shl <2 x i8> %x, <i8 5, i8 6>
|
|
|
|
%tmp1 = ashr exact <2 x i8> %tmp0, <i8 5, i8 6>
|
2018-07-14 04:33:34 +08:00
|
|
|
%tmp2 = icmp eq <2 x i8> %tmp1, %x
|
|
|
|
ret <2 x i1> %tmp2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <3 x i1> @p3_vec_undef0(<3 x i8> %x) {
|
|
|
|
; CHECK-LABEL: @p3_vec_undef0(
|
2018-07-18 16:15:13 +08:00
|
|
|
; CHECK-NEXT: [[TMP0:%.*]] = shl <3 x i8> [[X:%.*]], <i8 5, i8 undef, i8 5>
|
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = ashr exact <3 x i8> [[TMP0]], <i8 5, i8 5, i8 5>
|
2018-07-14 04:33:34 +08:00
|
|
|
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq <3 x i8> [[TMP1]], [[X]]
|
|
|
|
; CHECK-NEXT: ret <3 x i1> [[TMP2]]
|
|
|
|
;
|
2018-07-18 16:15:13 +08:00
|
|
|
%tmp0 = shl <3 x i8> %x, <i8 5, i8 undef, i8 5>
|
|
|
|
%tmp1 = ashr exact <3 x i8> %tmp0, <i8 5, i8 5, i8 5>
|
2018-07-14 04:33:34 +08:00
|
|
|
%tmp2 = icmp eq <3 x i8> %tmp1, %x
|
|
|
|
ret <3 x i1> %tmp2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <3 x i1> @p4_vec_undef1(<3 x i8> %x) {
|
|
|
|
; CHECK-LABEL: @p4_vec_undef1(
|
2018-07-18 16:15:13 +08:00
|
|
|
; CHECK-NEXT: [[TMP0:%.*]] = shl <3 x i8> [[X:%.*]], <i8 5, i8 5, i8 5>
|
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = ashr exact <3 x i8> [[TMP0]], <i8 5, i8 undef, i8 5>
|
2018-07-14 04:33:34 +08:00
|
|
|
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq <3 x i8> [[TMP1]], [[X]]
|
|
|
|
; CHECK-NEXT: ret <3 x i1> [[TMP2]]
|
|
|
|
;
|
2018-07-18 16:15:13 +08:00
|
|
|
%tmp0 = shl <3 x i8> %x, <i8 5, i8 5, i8 5>
|
|
|
|
%tmp1 = ashr exact <3 x i8> %tmp0, <i8 5, i8 undef, i8 5>
|
2018-07-14 04:33:34 +08:00
|
|
|
%tmp2 = icmp eq <3 x i8> %tmp1, %x
|
|
|
|
ret <3 x i1> %tmp2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <3 x i1> @p5_vec_undef2(<3 x i8> %x) {
|
|
|
|
; CHECK-LABEL: @p5_vec_undef2(
|
2018-07-18 16:15:13 +08:00
|
|
|
; CHECK-NEXT: [[TMP0:%.*]] = shl <3 x i8> [[X:%.*]], <i8 5, i8 undef, i8 5>
|
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = ashr exact <3 x i8> [[TMP0]], <i8 5, i8 undef, i8 5>
|
2018-07-14 04:33:34 +08:00
|
|
|
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq <3 x i8> [[TMP1]], [[X]]
|
|
|
|
; CHECK-NEXT: ret <3 x i1> [[TMP2]]
|
|
|
|
;
|
2018-07-18 16:15:13 +08:00
|
|
|
%tmp0 = shl <3 x i8> %x, <i8 5, i8 undef, i8 5>
|
|
|
|
%tmp1 = ashr exact <3 x i8> %tmp0, <i8 5, i8 undef, i8 5>
|
2018-07-14 04:33:34 +08:00
|
|
|
%tmp2 = icmp eq <3 x i8> %tmp1, %x
|
|
|
|
ret <3 x i1> %tmp2
|
|
|
|
}
|
|
|
|
|
|
|
|
; ============================================================================ ;
|
|
|
|
; Commutativity tests.
|
|
|
|
; ============================================================================ ;
|
|
|
|
|
|
|
|
declare i8 @gen8()
|
|
|
|
|
|
|
|
define i1 @c0() {
|
|
|
|
; CHECK-LABEL: @c0(
|
|
|
|
; CHECK-NEXT: [[X:%.*]] = call i8 @gen8()
|
[InstCombine] Re-commit: Fold 'check for [no] signed truncation' pattern
Summary:
[[ https://bugs.llvm.org/show_bug.cgi?id=38149 | PR38149 ]]
As discussed in https://reviews.llvm.org/D49179#1158957 and later,
the IR for 'check for [no] signed truncation' pattern can be improved:
https://rise4fun.com/Alive/gBf
^ that pattern will be produced by Implicit Integer Truncation sanitizer,
https://reviews.llvm.org/D48958 https://bugs.llvm.org/show_bug.cgi?id=21530
in signed case, therefore it is probably a good idea to improve it.
The DAGCombine will reverse this transform, see
https://reviews.llvm.org/D49266
This transform is surprisingly frustrating.
This does not deal with non-splat shift amounts, or with undef shift amounts.
I've outlined what i think the solution should be:
```
// Potential handling of non-splats: for each element:
// * if both are undef, replace with constant 0.
// Because (1<<0) is OK and is 1, and ((1<<0)>>1) is also OK and is 0.
// * if both are not undef, and are different, bailout.
// * else, only one is undef, then pick the non-undef one.
```
This is a re-commit, as the original patch, committed in rL337190
was reverted in rL337344 as it broke chromium build:
https://bugs.llvm.org/show_bug.cgi?id=38204 and
https://crbug.com/864832
Proofs that the fixed folds are ok: https://rise4fun.com/Alive/VYM
Differential Revision: https://reviews.llvm.org/D49320
llvm-svn: 337376
2018-07-18 18:55:17 +08:00
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X]], 4
|
|
|
|
; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], 8
|
2018-07-14 04:33:34 +08:00
|
|
|
; CHECK-NEXT: ret i1 [[TMP2]]
|
|
|
|
;
|
|
|
|
%x = call i8 @gen8()
|
2018-07-18 16:15:13 +08:00
|
|
|
%tmp0 = shl i8 %x, 5
|
|
|
|
%tmp1 = ashr exact i8 %tmp0, 5
|
2018-07-14 04:33:34 +08:00
|
|
|
%tmp2 = icmp eq i8 %x, %tmp1 ; swapped order
|
|
|
|
ret i1 %tmp2
|
|
|
|
}
|
|
|
|
|
|
|
|
; ============================================================================ ;
|
|
|
|
; One-use tests.
|
|
|
|
; ============================================================================ ;
|
|
|
|
|
|
|
|
declare void @use8(i8)
|
|
|
|
|
|
|
|
define i1 @n_oneuse0(i8 %x) {
|
|
|
|
; CHECK-LABEL: @n_oneuse0(
|
2018-07-18 16:15:13 +08:00
|
|
|
; CHECK-NEXT: [[TMP0:%.*]] = shl i8 [[X:%.*]], 5
|
2018-07-14 04:33:34 +08:00
|
|
|
; CHECK-NEXT: call void @use8(i8 [[TMP0]])
|
[InstCombine] Re-commit: Fold 'check for [no] signed truncation' pattern
Summary:
[[ https://bugs.llvm.org/show_bug.cgi?id=38149 | PR38149 ]]
As discussed in https://reviews.llvm.org/D49179#1158957 and later,
the IR for 'check for [no] signed truncation' pattern can be improved:
https://rise4fun.com/Alive/gBf
^ that pattern will be produced by Implicit Integer Truncation sanitizer,
https://reviews.llvm.org/D48958 https://bugs.llvm.org/show_bug.cgi?id=21530
in signed case, therefore it is probably a good idea to improve it.
The DAGCombine will reverse this transform, see
https://reviews.llvm.org/D49266
This transform is surprisingly frustrating.
This does not deal with non-splat shift amounts, or with undef shift amounts.
I've outlined what i think the solution should be:
```
// Potential handling of non-splats: for each element:
// * if both are undef, replace with constant 0.
// Because (1<<0) is OK and is 1, and ((1<<0)>>1) is also OK and is 0.
// * if both are not undef, and are different, bailout.
// * else, only one is undef, then pick the non-undef one.
```
This is a re-commit, as the original patch, committed in rL337190
was reverted in rL337344 as it broke chromium build:
https://bugs.llvm.org/show_bug.cgi?id=38204 and
https://crbug.com/864832
Proofs that the fixed folds are ok: https://rise4fun.com/Alive/VYM
Differential Revision: https://reviews.llvm.org/D49320
llvm-svn: 337376
2018-07-18 18:55:17 +08:00
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X]], 4
|
|
|
|
; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], 8
|
2018-07-14 04:33:34 +08:00
|
|
|
; CHECK-NEXT: ret i1 [[TMP2]]
|
|
|
|
;
|
2018-07-18 16:15:13 +08:00
|
|
|
%tmp0 = shl i8 %x, 5
|
2018-07-14 04:33:34 +08:00
|
|
|
call void @use8(i8 %tmp0)
|
2018-07-18 16:15:13 +08:00
|
|
|
%tmp1 = ashr exact i8 %tmp0, 5
|
2018-07-14 04:33:34 +08:00
|
|
|
%tmp2 = icmp eq i8 %tmp1, %x
|
|
|
|
ret i1 %tmp2
|
|
|
|
}
|
|
|
|
|
|
|
|
define i1 @n_oneuse1(i8 %x) {
|
|
|
|
; CHECK-LABEL: @n_oneuse1(
|
2018-07-18 16:15:13 +08:00
|
|
|
; CHECK-NEXT: [[TMP0:%.*]] = shl i8 [[X:%.*]], 5
|
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = ashr exact i8 [[TMP0]], 5
|
2018-07-14 04:33:34 +08:00
|
|
|
; CHECK-NEXT: call void @use8(i8 [[TMP1]])
|
|
|
|
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], [[X]]
|
|
|
|
; CHECK-NEXT: ret i1 [[TMP2]]
|
|
|
|
;
|
2018-07-18 16:15:13 +08:00
|
|
|
%tmp0 = shl i8 %x, 5
|
|
|
|
%tmp1 = ashr exact i8 %tmp0, 5
|
2018-07-14 04:33:34 +08:00
|
|
|
call void @use8(i8 %tmp1)
|
|
|
|
%tmp2 = icmp eq i8 %tmp1, %x
|
|
|
|
ret i1 %tmp2
|
|
|
|
}
|
|
|
|
|
|
|
|
define i1 @n_oneuse2(i8 %x) {
|
|
|
|
; CHECK-LABEL: @n_oneuse2(
|
2018-07-18 16:15:13 +08:00
|
|
|
; CHECK-NEXT: [[TMP0:%.*]] = shl i8 [[X:%.*]], 5
|
2018-07-14 04:33:34 +08:00
|
|
|
; CHECK-NEXT: call void @use8(i8 [[TMP0]])
|
2018-07-18 16:15:13 +08:00
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = ashr exact i8 [[TMP0]], 5
|
2018-07-14 04:33:34 +08:00
|
|
|
; CHECK-NEXT: call void @use8(i8 [[TMP1]])
|
|
|
|
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], [[X]]
|
|
|
|
; CHECK-NEXT: ret i1 [[TMP2]]
|
|
|
|
;
|
2018-07-18 16:15:13 +08:00
|
|
|
%tmp0 = shl i8 %x, 5
|
2018-07-14 04:33:34 +08:00
|
|
|
call void @use8(i8 %tmp0)
|
2018-07-18 16:15:13 +08:00
|
|
|
%tmp1 = ashr exact i8 %tmp0, 5
|
2018-07-14 04:33:34 +08:00
|
|
|
call void @use8(i8 %tmp1)
|
|
|
|
%tmp2 = icmp eq i8 %tmp1, %x
|
|
|
|
ret i1 %tmp2
|
|
|
|
}
|
|
|
|
|
|
|
|
; ============================================================================ ;
|
|
|
|
; Negative tests
|
|
|
|
; ============================================================================ ;
|
|
|
|
|
|
|
|
define i1 @n0(i8 %x) {
|
|
|
|
; CHECK-LABEL: @n0(
|
2018-07-18 16:15:13 +08:00
|
|
|
; CHECK-NEXT: [[TMP0:%.*]] = shl i8 [[X:%.*]], 5
|
2018-07-14 04:33:34 +08:00
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = ashr exact i8 [[TMP0]], 3
|
|
|
|
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], [[X]]
|
|
|
|
; CHECK-NEXT: ret i1 [[TMP2]]
|
|
|
|
;
|
2018-07-18 16:15:13 +08:00
|
|
|
%tmp0 = shl i8 %x, 5
|
|
|
|
%tmp1 = ashr exact i8 %tmp0, 3 ; not 5
|
2018-07-14 04:33:34 +08:00
|
|
|
%tmp2 = icmp eq i8 %tmp1, %x
|
|
|
|
ret i1 %tmp2
|
|
|
|
}
|
|
|
|
|
|
|
|
define i1 @n1(i8 %x) {
|
|
|
|
; CHECK-LABEL: @n1(
|
2018-07-18 16:15:13 +08:00
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i8 [[X:%.*]], 8
|
2018-07-14 04:33:34 +08:00
|
|
|
; CHECK-NEXT: ret i1 [[TMP1]]
|
|
|
|
;
|
2018-07-18 16:15:13 +08:00
|
|
|
%tmp0 = shl i8 %x, 5
|
|
|
|
%tmp1 = lshr exact i8 %tmp0, 5 ; not ashr
|
2018-07-14 04:33:34 +08:00
|
|
|
%tmp2 = icmp eq i8 %tmp1, %x
|
|
|
|
ret i1 %tmp2
|
|
|
|
}
|
|
|
|
|
|
|
|
define i1 @n2(i8 %x, i8 %y) {
|
|
|
|
; CHECK-LABEL: @n2(
|
2018-07-18 16:15:13 +08:00
|
|
|
; CHECK-NEXT: [[TMP0:%.*]] = shl i8 [[X:%.*]], 5
|
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = ashr exact i8 [[TMP0]], 5
|
2018-07-14 04:33:34 +08:00
|
|
|
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8 [[TMP1]], [[Y:%.*]]
|
|
|
|
; CHECK-NEXT: ret i1 [[TMP2]]
|
|
|
|
;
|
2018-07-18 16:15:13 +08:00
|
|
|
%tmp0 = shl i8 %x, 5
|
|
|
|
%tmp1 = ashr exact i8 %tmp0, 5
|
2018-07-14 04:33:34 +08:00
|
|
|
%tmp2 = icmp eq i8 %tmp1, %y ; not %x
|
|
|
|
ret i1 %tmp2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x i1> @n3_vec_nonsplat(<2 x i8> %x) {
|
|
|
|
; CHECK-LABEL: @n3_vec_nonsplat(
|
2018-07-18 16:15:13 +08:00
|
|
|
; CHECK-NEXT: [[TMP0:%.*]] = shl <2 x i8> [[X:%.*]], <i8 5, i8 5>
|
|
|
|
; CHECK-NEXT: [[TMP1:%.*]] = ashr exact <2 x i8> [[TMP0]], <i8 5, i8 3>
|
2018-07-14 04:33:34 +08:00
|
|
|
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq <2 x i8> [[TMP1]], [[X]]
|
|
|
|
; CHECK-NEXT: ret <2 x i1> [[TMP2]]
|
|
|
|
;
|
2018-07-18 16:15:13 +08:00
|
|
|
%tmp0 = shl <2 x i8> %x, <i8 5, i8 5>
|
|
|
|
%tmp1 = ashr exact <2 x i8> %tmp0, <i8 5, i8 3> ; 3 instead of 5
|
2018-07-14 04:33:34 +08:00
|
|
|
%tmp2 = icmp eq <2 x i8> %tmp1, %x
|
|
|
|
ret <2 x i1> %tmp2
|
|
|
|
}
|