llvm-project/polly/test/ScopInfo/non-precise-inv-load-2.ll

50 lines
1.7 KiB
LLVM
Raw Normal View History

; RUN: opt %loadPolly -polly-scops -polly-invariant-load-hoisting=true -analyze < %s | FileCheck %s
Model zext-extend instructions A zero-extended value can be interpreted as a piecewise defined signed value. If the value was non-negative it stays the same, otherwise it is the sum of the original value and 2^n where n is the bit-width of the original (or operand) type. Examples: zext i8 127 to i32 -> { [127] } zext i8 -1 to i32 -> { [256 + (-1)] } = { [255] } zext i8 %v to i32 -> [v] -> { [v] | v >= 0; [256 + v] | v < 0 } However, LLVM/Scalar Evolution uses zero-extend (potentially lead by a truncate) to represent some forms of modulo computation. The left-hand side of the condition in the code below would result in the SCEV "zext i1 <false, +, true>for.body" which is just another description of the C expression "i & 1 != 0" or, equivalently, "i % 2 != 0". for (i = 0; i < N; i++) if (i & 1 != 0 /* == i % 2 */) /* do something */ If we do not make the modulo explicit but only use the mechanism described above we will get the very restrictive assumption "N < 3", because for all values of N >= 3 the SCEVAddRecExpr operand of the zero-extend would wrap. Alternatively, we can make the modulo in the operand explicit in the resulting piecewise function and thereby avoid the assumption on N. For the example this would result in the following piecewise affine function: { [i0] -> [(1)] : 2*floor((-1 + i0)/2) = -1 + i0; [i0] -> [(0)] : 2*floor((i0)/2) = i0 } To this end we can first determine if the (immediate) operand of the zero-extend can wrap and, in case it might, we will use explicit modulo semantic to compute the result instead of emitting non-wrapping assumptions. Note that operands with large bit-widths are less likely to be negative because it would result in a very large access offset or loop bound after the zero-extend. To this end one can optimistically assume the operand to be positive and avoid the piecewise definition if the bit-width is bigger than some threshold (here MaxZextSmallBitWidth). We choose to go with a hybrid solution of all modeling techniques described above. For small bit-widths (up to MaxZextSmallBitWidth) we will model the wrapping explicitly and use a piecewise defined function. However, if the bit-width is bigger than MaxZextSmallBitWidth we will employ overflow assumptions and assume the "former negative" piece will not exist. llvm-svn: 267408
2016-04-25 22:01:36 +08:00
;
;
; CHECK: Invariant Accesses: {
; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 0]
; CHECK-NEXT: [c] -> { Stmt_for_body[i0] -> MemRef_I[-1 + c] };
; CHECK-NEXT: Execution Context: [c] -> { : c > 0 }
; CHECK-NEXT: }
; CHECK-NEXT: Context:
; CHECK-NEXT: [c] -> { : -128 <= c <= 127 }
; CHECK-NEXT: Assumed Context:
; CHECK-NEXT: [c] -> { : }
; CHECK-NEXT: Invalid Context:
; CHECK-NEXT: [c] -> { : c <= 0 }
;
; void f(int *A, int *I, unsigned char c) {
; for (int i = 0; i < 10; i++)
; A[i] += I[c - (char)1];
; }
;
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
define void @f(i32* %A, i32* %I, i8 zeroext %c) {
entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %for.inc ], [ 0, %entry ]
%exitcond = icmp ne i64 %indvars.iv, 10
br i1 %exitcond, label %for.body, label %for.end
for.body: ; preds = %for.cond
%sub = add i8 %c, -1
%conv = zext i8 %sub to i64
%arrayidx = getelementptr inbounds i32, i32* %I, i64 %conv
%tmp = load i32, i32* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
%tmp1 = load i32, i32* %arrayidx2, align 4
%add = add nsw i32 %tmp1, %tmp
store i32 %add, i32* %arrayidx2, align 4
br label %for.inc
for.inc: ; preds = %for.body
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
br label %for.cond
for.end: ; preds = %for.cond
ret void
}