forked from OSchip/llvm-project
Teach lib/VMCore/ConstantFold.cpp how to set the inbounds keyword and
how to fold notionally-out-of-bounds array getelementptr indices instead of just doing these in lib/Analysis/ConstantFolding.cpp, because it can be done in a fairly general way without TargetData, and because not all constants are visited by lib/Analysis/ConstantFolding.cpp. This enables more constant folding. Also, set the "inbounds" flag when the getelementptr indices are one-past-the-end. llvm-svn: 81483
This commit is contained in:
parent
c91aeac18b
commit
21c6216c87
|
@ -207,12 +207,8 @@ static Constant *SymbolicallyEvaluateGEP(Constant* const* Ops, unsigned NumOps,
|
|||
if (Offset != 0)
|
||||
return 0;
|
||||
|
||||
// If the base is the start of a GlobalVariable and all the array indices
|
||||
// remain in their static bounds, the GEP is inbounds. We can check that
|
||||
// all indices are in bounds by just checking the first index only
|
||||
// because we've just normalized all the indices.
|
||||
Constant *C = isa<GlobalVariable>(Ptr) && NewIdxs[0]->isNullValue() ?
|
||||
ConstantExpr::getInBoundsGetElementPtr(Ptr, &NewIdxs[0], NewIdxs.size()) :
|
||||
// Create a GEP.
|
||||
Constant *C =
|
||||
ConstantExpr::getGetElementPtr(Ptr, &NewIdxs[0], NewIdxs.size());
|
||||
assert(cast<PointerType>(C->getType())->getElementType() == Ty &&
|
||||
"Computed GetElementPtr has unexpected type!");
|
||||
|
|
|
@ -12,9 +12,8 @@
|
|||
// ConstantExpr::get* methods to automatically fold constants when possible.
|
||||
//
|
||||
// The current constant folding implementation is implemented in two pieces: the
|
||||
// template-based folder for simple primitive constants like ConstantInt, and
|
||||
// the special case hackery that we use to symbolically evaluate expressions
|
||||
// that use ConstantExprs.
|
||||
// pieces that don't need TargetData, and the pieces that do. This is to avoid
|
||||
// a dependence in VMCore on Target.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
|
@ -24,6 +23,7 @@
|
|||
#include "llvm/DerivedTypes.h"
|
||||
#include "llvm/Function.h"
|
||||
#include "llvm/GlobalAlias.h"
|
||||
#include "llvm/GlobalVariable.h"
|
||||
#include "llvm/LLVMContext.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/Support/Compiler.h"
|
||||
|
@ -1673,8 +1673,28 @@ Constant *llvm::ConstantFoldCompareInstruction(LLVMContext &Context,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/// isInBoundsIndices - Test whether the given sequence of *normalized* indices
|
||||
/// is "inbounds".
|
||||
static bool isInBoundsIndices(Constant *const *Idxs, size_t NumIdx) {
|
||||
// No indices means nothing that could be out of bounds.
|
||||
if (NumIdx == 0) return true;
|
||||
|
||||
// If the first index is zero, it's in bounds.
|
||||
if (Idxs[0]->isNullValue()) return true;
|
||||
|
||||
// If the first index is one and all the rest are zero, it's in bounds,
|
||||
// by the one-past-the-end rule.
|
||||
if (!cast<ConstantInt>(Idxs[0])->isOne())
|
||||
return false;
|
||||
for (unsigned i = 1, e = NumIdx; i != e; ++i)
|
||||
if (!Idxs[i]->isNullValue())
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
Constant *llvm::ConstantFoldGetElementPtr(LLVMContext &Context,
|
||||
const Constant *C,
|
||||
bool inBounds,
|
||||
Constant* const *Idxs,
|
||||
unsigned NumIdx) {
|
||||
if (NumIdx == 0 ||
|
||||
|
@ -1746,9 +1766,13 @@ Constant *llvm::ConstantFoldGetElementPtr(LLVMContext &Context,
|
|||
|
||||
NewIndices.push_back(Combined);
|
||||
NewIndices.insert(NewIndices.end(), Idxs+1, Idxs+NumIdx);
|
||||
return ConstantExpr::getGetElementPtr(CE->getOperand(0),
|
||||
&NewIndices[0],
|
||||
NewIndices.size());
|
||||
return (inBounds && cast<GEPOperator>(CE)->isInBounds()) ?
|
||||
ConstantExpr::getInBoundsGetElementPtr(CE->getOperand(0),
|
||||
&NewIndices[0],
|
||||
NewIndices.size()) :
|
||||
ConstantExpr::getGetElementPtr(CE->getOperand(0),
|
||||
&NewIndices[0],
|
||||
NewIndices.size());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1764,7 +1788,10 @@ Constant *llvm::ConstantFoldGetElementPtr(LLVMContext &Context,
|
|||
if (const ArrayType *CAT =
|
||||
dyn_cast<ArrayType>(cast<PointerType>(C->getType())->getElementType()))
|
||||
if (CAT->getElementType() == SAT->getElementType())
|
||||
return ConstantExpr::getGetElementPtr(
|
||||
return inBounds ?
|
||||
ConstantExpr::getInBoundsGetElementPtr(
|
||||
(Constant*)CE->getOperand(0), Idxs, NumIdx) :
|
||||
ConstantExpr::getGetElementPtr(
|
||||
(Constant*)CE->getOperand(0), Idxs, NumIdx);
|
||||
}
|
||||
|
||||
|
@ -1789,5 +1816,71 @@ Constant *llvm::ConstantFoldGetElementPtr(LLVMContext &Context,
|
|||
return ConstantExpr::getIntToPtr(Base, CE->getType());
|
||||
}
|
||||
}
|
||||
|
||||
// Check to see if any array indices are not within the corresponding
|
||||
// notional array bounds. If so, try to determine if they can be factored
|
||||
// out into preceding dimensions.
|
||||
bool Unknown = false;
|
||||
SmallVector<Constant *, 8> NewIdxs;
|
||||
const Type *Ty = C->getType();
|
||||
const Type *Prev = 0;
|
||||
for (unsigned i = 0; i != NumIdx;
|
||||
Prev = Ty, Ty = cast<CompositeType>(Ty)->getTypeAtIndex(Idxs[i]), ++i) {
|
||||
if (ConstantInt *CI = dyn_cast<ConstantInt>(Idxs[i])) {
|
||||
if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty))
|
||||
if (ATy->getNumElements() <= INT64_MAX &&
|
||||
ATy->getNumElements() != 0 &&
|
||||
CI->getSExtValue() >= (int64_t)ATy->getNumElements()) {
|
||||
if (isa<SequentialType>(Prev)) {
|
||||
// It's out of range, but we can factor it into the prior
|
||||
// dimension.
|
||||
NewIdxs.resize(NumIdx);
|
||||
ConstantInt *Factor = ConstantInt::get(CI->getType(),
|
||||
ATy->getNumElements());
|
||||
NewIdxs[i] = ConstantExpr::getSRem(CI, Factor);
|
||||
|
||||
Constant *PrevIdx = Idxs[i-1];
|
||||
Constant *Div = ConstantExpr::getSDiv(CI, Factor);
|
||||
|
||||
// Before adding, extend both operands to i64 to avoid
|
||||
// overflow trouble.
|
||||
if (PrevIdx->getType() != Type::getInt64Ty(Context))
|
||||
PrevIdx = ConstantExpr::getSExt(PrevIdx,
|
||||
Type::getInt64Ty(Context));
|
||||
if (Div->getType() != Type::getInt64Ty(Context))
|
||||
Div = ConstantExpr::getSExt(Div,
|
||||
Type::getInt64Ty(Context));
|
||||
|
||||
NewIdxs[i-1] = ConstantExpr::getAdd(PrevIdx, Div);
|
||||
} else {
|
||||
// It's out of range, but the prior dimension is a struct
|
||||
// so we can't do anything about it.
|
||||
Unknown = true;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// We don't know if it's in range or not.
|
||||
Unknown = true;
|
||||
}
|
||||
}
|
||||
|
||||
// If we did any factoring, start over with the adjusted indices.
|
||||
if (!NewIdxs.empty()) {
|
||||
for (unsigned i = 0; i != NumIdx; ++i)
|
||||
if (!NewIdxs[i]) NewIdxs[i] = Idxs[i];
|
||||
return inBounds ?
|
||||
ConstantExpr::getGetElementPtr(const_cast<Constant*>(C),
|
||||
NewIdxs.data(), NewIdxs.size()) :
|
||||
ConstantExpr::getInBoundsGetElementPtr(const_cast<Constant*>(C),
|
||||
NewIdxs.data(), NewIdxs.size());
|
||||
}
|
||||
|
||||
// If all indices are known integers and normalized, we can do a simple
|
||||
// check for the "inbounds" property.
|
||||
if (!Unknown && !inBounds &&
|
||||
isa<GlobalVariable>(C) && isInBoundsIndices(Idxs, NumIdx))
|
||||
return ConstantExpr::getInBoundsGetElementPtr(const_cast<Constant*>(C),
|
||||
Idxs, NumIdx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -64,6 +64,7 @@ namespace llvm {
|
|||
const Constant *C1,
|
||||
const Constant *C2);
|
||||
Constant *ConstantFoldGetElementPtr(LLVMContext &Context, const Constant *C,
|
||||
bool inBounds,
|
||||
Constant* const *Idxs, unsigned NumIdx);
|
||||
} // End llvm namespace
|
||||
|
||||
|
|
|
@ -1489,7 +1489,8 @@ Constant *ConstantExpr::getGetElementPtrTy(const Type *ReqTy, Constant *C,
|
|||
"GEP indices invalid!");
|
||||
|
||||
if (Constant *FC = ConstantFoldGetElementPtr(
|
||||
ReqTy->getContext(), C, (Constant**)Idxs, NumIdx))
|
||||
ReqTy->getContext(), C, /*inBounds=*/false,
|
||||
(Constant**)Idxs, NumIdx))
|
||||
return FC; // Fold a few common cases...
|
||||
|
||||
assert(isa<PointerType>(C->getType()) &&
|
||||
|
@ -1518,7 +1519,8 @@ Constant *ConstantExpr::getInBoundsGetElementPtrTy(const Type *ReqTy,
|
|||
"GEP indices invalid!");
|
||||
|
||||
if (Constant *FC = ConstantFoldGetElementPtr(
|
||||
ReqTy->getContext(), C, (Constant**)Idxs, NumIdx))
|
||||
ReqTy->getContext(), C, /*inBounds=*/true,
|
||||
(Constant**)Idxs, NumIdx))
|
||||
return FC; // Fold a few common cases...
|
||||
|
||||
assert(isa<PointerType>(C->getType()) &&
|
||||
|
|
|
@ -1,11 +1,21 @@
|
|||
; RUN: llvm-as < %s | llvm-dis | llvm-as | llvm-dis
|
||||
; RUN: llvm-as < %s | llvm-dis | llvm-as | llvm-dis | FileCheck %s
|
||||
|
||||
; Verify that over-indexed getelementptrs are folded.
|
||||
@A = external global [2 x [3 x [5 x [7 x i32]]]]
|
||||
@B = global i32* getelementptr ([2 x [3 x [5 x [7 x i32]]]]* @A, i64 0, i64 0, i64 2, i64 1, i64 7523)
|
||||
; CHECK: @B = global i32* getelementptr ([2 x [3 x [5 x [7 x i32]]]]* @A, i64 36, i64 0, i64 1, i64 0, i64 5) ; <i32**> [#uses=0]
|
||||
@C = global i32* getelementptr ([2 x [3 x [5 x [7 x i32]]]]* @A, i64 3, i64 2, i64 0, i64 0, i64 7523)
|
||||
; CHECK: @C = global i32* getelementptr ([2 x [3 x [5 x [7 x i32]]]]* @A, i64 39, i64 1, i64 1, i64 4, i64 5) ; <i32**> [#uses=0]
|
||||
|
||||
;; Verify that i16 indices work.
|
||||
@x = external global {i32, i32}
|
||||
@y = global i32* getelementptr ({i32, i32}* @x, i16 42, i32 0)
|
||||
; CHECK: @y = global i32* getelementptr (%0* @x, i16 42, i32 0)
|
||||
|
||||
; see if i92 indices work too.
|
||||
define i32 *@test({i32, i32}* %t, i92 %n) {
|
||||
; CHECK: @test
|
||||
; CHECK: %B = getelementptr %0* %t, i92 %n, i32 0
|
||||
%B = getelementptr {i32, i32}* %t, i92 %n, i32 0
|
||||
ret i32* %B
|
||||
}
|
||||
|
|
|
@ -44,7 +44,11 @@ define void @frob() {
|
|||
store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 16), align 8
|
||||
; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 0, i64 2, i32 1, i64 2), align 8
|
||||
store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 17), align 8
|
||||
; CHECK: store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 1, i64 0, i32 0, i64 0), align 8
|
||||
; CHECK: store i32 1, i32* getelementptr inbounds ([3 x %struct.X]* @Y, i64 1, i64 0, i32 0, i64 0), align 8
|
||||
store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 18), align 8
|
||||
; CHECK: store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 2, i64 0, i32 0, i64 0), align 8
|
||||
store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 36), align 8
|
||||
; CHECK: store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 1, i64 0, i32 0, i64 1), align 8
|
||||
store i32 1, i32* getelementptr ([3 x %struct.X]* @Y, i64 0, i64 0, i32 0, i64 19), align 8
|
||||
ret void
|
||||
}
|
||||
|
|
|
@ -222,7 +222,7 @@ define i1 @test22() {
|
|||
getelementptr (i32* @B, i64 2)
|
||||
ret i1 %C
|
||||
; CHECK: @test22
|
||||
; CHECK: icmp ult (i32* getelementptr (i32* @A, i64 1), i32* getelementptr (i32* @B, i64 2))
|
||||
; CHECK: icmp ult (i32* getelementptr inbounds (i32* @A, i64 1), i32* getelementptr (i32* @B, i64 2))
|
||||
}
|
||||
|
||||
|
||||
|
@ -463,7 +463,7 @@ define i8* @test36() nounwind {
|
|||
@A37 = external constant [1 x i8]
|
||||
define i1 @test37() nounwind {
|
||||
; CHECK: @test37
|
||||
; CHECK: ret i1 icmp eq (i8* getelementptr ([1 x i8]* @A37, i64 0, i64 1), i8* getelementptr ([1 x i8]* @A37, i64 1, i64 0))
|
||||
; CHECK: ret i1 true
|
||||
%t = icmp eq i8* getelementptr ([1 x i8]* @A37, i64 0, i64 1),
|
||||
getelementptr ([1 x i8]* @A37, i64 1, i64 0)
|
||||
ret i1 %t
|
||||
|
|
Loading…
Reference in New Issue