llvm-project/llvm/test/Analysis/BasicAA/gep-and-alias-64.ll

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

44 lines
1.7 KiB
LLVM
Raw Normal View History

; RUN: opt -S -basic-aa -gvn < %s | FileCheck %s
[BasicAA] Support arbitrary pointer sizes (and fix an overflow bug) Motivated by the discussion in D38499, this patch updates BasicAA to support arbitrary pointer sizes by switching most remaining non-APInt calculations to use APInt. The size of these APInts is set to the maximum pointer size (maximum over all address spaces described by the data layout string). Most of this translation is straightforward, but this patch contains a fix for a bug that revealed itself during this translation process. In order for test/Analysis/BasicAA/gep-and-alias.ll to pass, which is run with 32-bit pointers, the intermediate calculations must be performed using 64-bit integers. This is because, as noted in the patch, when GetLinearExpression decomposes an expression into C1*V+C2, and we then multiply this by Scale, and distribute, to get (C1*Scale)*V + C2*Scale, it can be the case that, even through C1*V+C2 does not overflow for relevant values of V, (C2*Scale) can overflow. If this happens, later logic will draw invalid conclusions from the (base) offset value. Thus, when initially applying the APInt conversion, because the maximum pointer size in this test is 32 bits, it started failing. Suspicious, I created a 64-bit version of this test (included here), and that failed (miscompiled) on trunk for a similar reason (the multiplication can overflow). After fixing this overflow bug, the first test case (at least) in Analysis/BasicAA/q.bad.ll started failing. This is also a 32-bit test, and was relying on having 64-bit intermediate values to have BasicAA return an accurate result. In order to fix this problem, and because I believe that it is not uncommon to use i64 indexing expressions in 32-bit code (especially portable code using int64_t), it seems reasonable to always use at least 64-bit integers. In this way, we won't regress our analysis capabilities (and there's a command-line option added, so experimenting with this should be easy). As pointed out by Eli during the review, there are other potential overflow conditions that this patch does not address. Fixing those is left to follow-up work. Patch by me with contributions from Michael Ferguson (mferguson@cray.com). Differential Revision: https://reviews.llvm.org/D38662 llvm-svn: 350220
2019-01-03 00:28:09 +08:00
target datalayout = "e-m:o-p:64:64-f64:32:64-f80:128-n8:16:32-S128"
target triple = "x86_64-apple-macosx10.6.0"
; The load and store address in the loop body could alias so the load
; can't be hoisted above the store and out of the loop.
declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i32, i1)
define i64 @foo(i64 %x, i64 %z, i64 %n) {
entry:
%pool = alloca [59 x i64], align 4
%tmp = bitcast [59 x i64]* %pool to i8*
call void @llvm.memset.p0i8.i64(i8* nonnull %tmp, i8 0, i64 236, i32 4, i1 false)
%cmp3 = icmp eq i64 %n, 0
br i1 %cmp3, label %for.end, label %for.body.lr.ph
for.body.lr.ph: ; preds = %entry
%add = add i64 %z, %x
%and = and i64 %add, 9223372036854775807
%sub = add nsw i64 %and, -9223372036844814062
%arrayidx = getelementptr inbounds [59 x i64], [59 x i64]* %pool, i64 0, i64 %sub
%arrayidx1 = getelementptr inbounds [59 x i64], [59 x i64]* %pool, i64 0, i64 42
br label %for.body
for.body: ; preds = %for.body.lr.ph, %for.body
%i.04 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
store i64 %i.04, i64* %arrayidx, align 4
%tmp1 = load i64, i64* %arrayidx1, align 4
%inc = add nuw i64 %i.04, 1
%exitcond = icmp ne i64 %inc, %n
br i1 %exitcond, label %for.body, label %for.end.loopexit
for.end.loopexit: ; preds = %for.body
%lcssa = phi i64 [ %tmp1, %for.body ]
br label %for.end
for.end: ; preds = %for.end.loopexit, %entry
%s = phi i64 [ 0, %entry ], [ %lcssa, %for.end.loopexit ]
; CHECK: ret i64 %s
ret i64 %s
}