[InstCombine] Fold icmp ugt/ult (add nuw X, C2), C --> icmp ugt/ult X, (C - C2)

Support for sgt/slt was added in rL294898, this adds the same cases also for unsigned compares.

This is the Alive proof: https://rise4fun.com/Alive/nyY

Differential Revision: https://reviews.llvm.org/D50972

llvm-svn: 341353
This commit is contained in:
Nicola Zaghen 2018-09-04 10:29:48 +00:00
parent ef16ea7f7a
commit 9588ad9611
6 changed files with 49 additions and 19 deletions

View File

@ -2338,12 +2338,15 @@ Instruction *InstCombiner::foldICmpAddConstant(ICmpInst &Cmp,
CmpInst::Predicate Pred = Cmp.getPredicate();
// If the add does not wrap, we can always adjust the compare by subtracting
// the constants. Equality comparisons are handled elsewhere. SGE/SLE are
// canonicalized to SGT/SLT.
if (Add->hasNoSignedWrap() &&
(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT)) {
// the constants. Equality comparisons are handled elsewhere. SGE/SLE/UGE/ULE
// are canonicalized to SGT/SLT/UGT/ULT.
if ((Add->hasNoSignedWrap() &&
(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT)) ||
(Add->hasNoUnsignedWrap() &&
(Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULT))) {
bool Overflow;
APInt NewC = C.ssub_ov(*C2, Overflow);
APInt NewC =
Cmp.isSigned() ? C.ssub_ov(*C2, Overflow) : C.usub_ov(*C2, Overflow);
// If there is overflow, the result must be true or false.
// TODO: Can we assert there is no overflow because InstSimplify always
// handles those cases?

View File

@ -8,7 +8,7 @@ define void @test() #0 {
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ult i64 [[INDVARS_IV_NEXT]], 40
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp ult i64 [[INDVARS_IV]], 39
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret void

View File

@ -283,8 +283,7 @@ define i1 @slt_zero_add_nuw_signbit(i8 %x) {
define i1 @reduce_add_ult(i32 %in) {
; CHECK-LABEL: @reduce_add_ult(
; CHECK-NEXT: [[A6:%.*]] = add nuw i32 [[IN:%.*]], 3
; CHECK-NEXT: [[A18:%.*]] = icmp ult i32 [[A6]], 12
; CHECK-NEXT: [[A18:%.*]] = icmp ult i32 [[IN:%.*]], 9
; CHECK-NEXT: ret i1 [[A18]]
;
%a6 = add nuw i32 %in, 3
@ -294,8 +293,7 @@ define i1 @reduce_add_ult(i32 %in) {
define i1 @reduce_add_ugt(i32 %in) {
; CHECK-LABEL: @reduce_add_ugt(
; CHECK-NEXT: [[A6:%.*]] = add nuw i32 [[IN:%.*]], 3
; CHECK-NEXT: [[A18:%.*]] = icmp ugt i32 [[A6]], 12
; CHECK-NEXT: [[A18:%.*]] = icmp ugt i32 [[IN:%.*]], 9
; CHECK-NEXT: ret i1 [[A18]]
;
%a6 = add nuw i32 %in, 3
@ -305,8 +303,7 @@ define i1 @reduce_add_ugt(i32 %in) {
define i1 @reduce_add_ule(i32 %in) {
; CHECK-LABEL: @reduce_add_ule(
; CHECK-NEXT: [[A6:%.*]] = add nuw i32 [[IN:%.*]], 3
; CHECK-NEXT: [[A18:%.*]] = icmp ult i32 [[A6]], 13
; CHECK-NEXT: [[A18:%.*]] = icmp ult i32 [[IN:%.*]], 10
; CHECK-NEXT: ret i1 [[A18]]
;
%a6 = add nuw i32 %in, 3
@ -316,11 +313,41 @@ define i1 @reduce_add_ule(i32 %in) {
define i1 @reduce_add_uge(i32 %in) {
; CHECK-LABEL: @reduce_add_uge(
; CHECK-NEXT: [[A6:%.*]] = add nuw i32 [[IN:%.*]], 3
; CHECK-NEXT: [[A18:%.*]] = icmp ugt i32 [[A6]], 11
; CHECK-NEXT: [[A18:%.*]] = icmp ugt i32 [[IN:%.*]], 8
; CHECK-NEXT: ret i1 [[A18]]
;
%a6 = add nuw i32 %in, 3
%a18 = icmp uge i32 %a6, 12
ret i1 %a18
}
define i1 @ult_add_ssubov(i32 %in) {
; CHECK-LABEL: @ult_add_ssubov(
; CHECK-NEXT: ret i1 false
;
%a6 = add nuw i32 %in, 71
%a18 = icmp ult i32 %a6, 3
ret i1 %a18
}
define i1 @ult_add_nonuw(i8 %in) {
; CHECK-LABEL: @ult_add_nonuw(
; CHECK-NEXT: [[A6:%.*]] = add i8 [[IN:%.*]], 71
; CHECK-NEXT: [[A18:%.*]] = icmp ult i8 [[A6]], 12
; CHECK-NEXT: ret i1 [[A18]]
;
%a6 = add i8 %in, 71
%a18 = icmp ult i8 %a6, 12
ret i1 %a18
}
define i1 @uge_add_nonuw(i32 %in) {
; CHECK-LABEL: @uge_add_nonuw(
; CHECK-NEXT: [[A6:%.*]] = add i32 [[IN:%.*]], 3
; CHECK-NEXT: [[A18:%.*]] = icmp ugt i32 [[A6]], 11
; CHECK-NEXT: ret i1 [[A18]]
;
%a6 = add i32 %in, 3
%a18 = icmp uge i32 %a6, 12
ret i1 %a18
}

View File

@ -1878,7 +1878,7 @@ define void @foo4(double* %A, double* %B, i32* %trigger) {
; AVX1-NEXT: br label [[FOR_INC]]
; AVX1: for.inc:
; AVX1-NEXT: [[INDVARS_IV_NEXT:%.*]] = or i64 [[INDVARS_IV]], 16
; AVX1-NEXT: [[CMP:%.*]] = icmp ult i64 [[INDVARS_IV_NEXT]], 10000
; AVX1-NEXT: [[CMP:%.*]] = icmp ult i64 [[INDVARS_IV]], 9984
; AVX1-NEXT: br i1 [[CMP]], label [[FOR_BODY_1:%.*]], label [[FOR_END:%.*]]
; AVX1: for.end:
; AVX1-NEXT: ret void
@ -1920,7 +1920,7 @@ define void @foo4(double* %A, double* %B, i32* %trigger) {
; AVX2-NEXT: br label [[FOR_INC]]
; AVX2: for.inc:
; AVX2-NEXT: [[INDVARS_IV_NEXT:%.*]] = or i64 [[INDVARS_IV]], 16
; AVX2-NEXT: [[CMP:%.*]] = icmp ult i64 [[INDVARS_IV_NEXT]], 10000
; AVX2-NEXT: [[CMP:%.*]] = icmp ult i64 [[INDVARS_IV]], 9984
; AVX2-NEXT: br i1 [[CMP]], label [[FOR_BODY_1:%.*]], label [[FOR_END:%.*]]
; AVX2: for.end:
; AVX2-NEXT: ret void
@ -2119,7 +2119,7 @@ define void @foo4(double* %A, double* %B, i32* %trigger) {
; AVX512-NEXT: br label [[FOR_INC_3]]
; AVX512: for.inc.3:
; AVX512-NEXT: [[INDVARS_IV_NEXT_3]] = add nsw i64 [[INDVARS_IV]], 64
; AVX512-NEXT: [[CMP_3:%.*]] = icmp ult i64 [[INDVARS_IV_NEXT_3]], 10000
; AVX512-NEXT: [[CMP_3:%.*]] = icmp ult i64 [[INDVARS_IV_NEXT_2]], 9984
; AVX512-NEXT: br i1 [[CMP_3]], label [[FOR_BODY]], label [[FOR_END]], !llvm.loop !52
;
entry:

View File

@ -12,7 +12,7 @@ define i32 @foo(i32* nocapture %A, i32* nocapture %B, i32 %n) {
; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N]], -1
; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], 4
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP0]], 3
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]]
; CHECK: vector.memcheck:
; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[N]], -1

View File

@ -18,7 +18,7 @@ define i32 @foo(float* nocapture %a, float* nocapture %b, i32 %n) nounwind uwtab
; CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N]], -1, !dbg !9
; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64, !dbg !9
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1, !dbg !9
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], 4, !dbg !9
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP0]], 3, !dbg !9
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]], !dbg !9
; CHECK: vector.memcheck:
; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[N]], -1, !dbg !9