[stack-safety] Local analysis implementation

Summary:
Analysis produces StackSafetyInfo which contains information with how allocas
and parameters were used in functions.

From prototype by Evgenii Stepanov and  Vlad Tsyrklevich.

Reviewers: eugenis, vlad.tsyrklevich, pcc, glider

Subscribers: hiraditya, llvm-commits

Differential Revision: https://reviews.llvm.org/D54504

llvm-svn: 347603
This commit is contained in:
Vitaly Buka 2018-11-26 21:57:59 +00:00
parent 4493fe1c1b
commit fa98c074b7
4 changed files with 929 additions and 8 deletions

View File

@ -19,8 +19,22 @@
namespace llvm {
/// Interface to access stack safety analysis results for single function.
class StackSafetyInfo {
public:
struct FunctionInfo;
private:
std::unique_ptr<FunctionInfo> Info;
public:
StackSafetyInfo();
StackSafetyInfo(FunctionInfo &&Info);
StackSafetyInfo(StackSafetyInfo &&);
StackSafetyInfo &operator=(StackSafetyInfo &&);
~StackSafetyInfo();
// TODO: Add useful for client methods.
void print(raw_ostream &O) const;
};

View File

@ -10,21 +10,388 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/StackSafetyAnalysis.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
#define DEBUG_TYPE "stack-safety"
AnalysisKey StackSafetyAnalysis::Key;
namespace {
void StackSafetyInfo::print(raw_ostream &O) const { O << "Not Implemented\n"; }
/// Rewrite an SCEV expression for a memory access address to an expression that
/// represents offset from the given alloca.
class AllocaOffsetRewriter : public SCEVRewriteVisitor<AllocaOffsetRewriter> {
const Value *AllocaPtr;
public:
AllocaOffsetRewriter(ScalarEvolution &SE, const Value *AllocaPtr)
: SCEVRewriteVisitor(SE), AllocaPtr(AllocaPtr) {}
const SCEV *visit(const SCEV *Expr) {
// Only re-write the expression if the alloca is used in an addition
// expression (it can be used in other types of expressions if it's cast to
// an int and passed as an argument.)
if (!isa<SCEVAddRecExpr>(Expr) && !isa<SCEVAddExpr>(Expr) &&
!isa<SCEVUnknown>(Expr))
return Expr;
return SCEVRewriteVisitor<AllocaOffsetRewriter>::visit(Expr);
}
const SCEV *visitUnknown(const SCEVUnknown *Expr) {
// FIXME: look through one or several levels of definitions?
// This can be inttoptr(AllocaPtr) and SCEV would not unwrap
// it for us.
if (Expr->getValue() == AllocaPtr)
return SE.getZero(Expr->getType());
return Expr;
}
};
/// Describes use of address in as a function call argument.
struct PassAsArgInfo {
/// Function being called.
const GlobalValue *Callee = nullptr;
/// Index of argument which pass address.
size_t ParamNo = 0;
// Offset range of address from base address (alloca or calling function
// argument).
// Range should never set to empty-set, that is an invalid access range
// that can cause empty-set to be propagated with ConstantRange::add
ConstantRange Offset;
PassAsArgInfo(const GlobalValue *Callee, size_t ParamNo, ConstantRange Offset)
: Callee(Callee), ParamNo(ParamNo), Offset(Offset) {}
StringRef getName() const { return Callee->getName(); }
};
raw_ostream &operator<<(raw_ostream &OS, const PassAsArgInfo &P) {
return OS << "@" << P.getName() << "(arg" << P.ParamNo << ", " << P.Offset
<< ")";
}
/// Describe uses of address (alloca or parameter) inside of the function.
struct UseInfo {
// Access range if the address (alloca or parameters).
// It is allowed to be empty-set when there are no known accesses.
ConstantRange Range;
// List of calls which pass address as an argument.
SmallVector<PassAsArgInfo, 4> Calls;
explicit UseInfo(unsigned PointerSize) : Range{PointerSize, false} {}
void updateRange(ConstantRange R) { Range = Range.unionWith(R); }
};
raw_ostream &operator<<(raw_ostream &OS, const UseInfo &U) {
OS << U.Range;
for (auto &Call : U.Calls)
OS << ", " << Call;
return OS;
}
struct AllocaInfo {
const AllocaInst *AI = nullptr;
uint64_t Size = 0;
UseInfo Use;
AllocaInfo(unsigned PointerSize, const AllocaInst *AI, uint64_t Size)
: AI(AI), Size(Size), Use(PointerSize) {}
StringRef getName() const { return AI->getName(); }
};
raw_ostream &operator<<(raw_ostream &OS, const AllocaInfo &A) {
return OS << A.getName() << "[" << A.Size << "]: " << A.Use;
}
struct ParamInfo {
const Argument *Arg = nullptr;
UseInfo Use;
explicit ParamInfo(unsigned PointerSize, const Argument *Arg)
: Arg(Arg), Use(PointerSize) {}
StringRef getName() const { return Arg ? Arg->getName() : "<N/A>"; }
};
raw_ostream &operator<<(raw_ostream &OS, const ParamInfo &P) {
return OS << P.getName() << "[]: " << P.Use;
}
/// Calculate the allocation size of a given alloca. Returns 0 if the
/// size can not be statically determined.
uint64_t getStaticAllocaAllocationSize(const AllocaInst *AI) {
const DataLayout &DL = AI->getModule()->getDataLayout();
uint64_t Size = DL.getTypeAllocSize(AI->getAllocatedType());
if (AI->isArrayAllocation()) {
auto C = dyn_cast<ConstantInt>(AI->getArraySize());
if (!C)
return 0;
Size *= C->getZExtValue();
}
return Size;
}
} // end anonymous namespace
/// Describes uses of allocas and parameters inside of a single function.
struct StackSafetyInfo::FunctionInfo {
// May be a Function or a GlobalAlias
const GlobalValue *GV = nullptr;
// Informations about allocas uses.
SmallVector<AllocaInfo, 4> Allocas;
// Informations about parameters uses.
SmallVector<ParamInfo, 4> Params;
// TODO: describe return value as depending on one or more of its arguments.
FunctionInfo(const StackSafetyInfo &SSI) : FunctionInfo(*SSI.Info) {}
explicit FunctionInfo(const Function *F) : GV(F){};
FunctionInfo(FunctionInfo &&) = default;
bool IsDSOLocal() const { return GV->isDSOLocal(); };
bool IsInterposable() const { return GV->isInterposable(); };
StringRef getName() const { return GV->getName(); }
void print(raw_ostream &O) const {
O << " @" << getName() << (IsDSOLocal() ? "" : " dso_preemptable")
<< (IsInterposable() ? " interposable" : "") << "\n";
O << " args uses:\n";
for (auto &P : Params)
O << " " << P << "\n";
O << " allocas uses:\n";
for (auto &AS : Allocas)
O << " " << AS << "\n";
}
private:
FunctionInfo(const FunctionInfo &) = default;
};
namespace {
class StackSafetyLocalAnalysis {
const Function &F;
const DataLayout &DL;
ScalarEvolution &SE;
unsigned PointerSize = 0;
const ConstantRange UnknownRange;
ConstantRange offsetFromAlloca(Value *Addr, const Value *AllocaPtr);
ConstantRange getAccessRange(Value *Addr, const Value *AllocaPtr,
uint64_t AccessSize);
ConstantRange getMemIntrinsicAccessRange(const MemIntrinsic *MI, const Use &U,
const Value *AllocaPtr);
bool analyzeAllUses(const Value *Ptr, UseInfo &AS);
ConstantRange getRange(uint64_t Lower, uint64_t Upper) const {
return ConstantRange(APInt(PointerSize, Lower), APInt(PointerSize, Upper));
}
public:
StackSafetyLocalAnalysis(const Function &F, ScalarEvolution &SE)
: F(F), DL(F.getParent()->getDataLayout()), SE(SE),
PointerSize(DL.getPointerSizeInBits()),
UnknownRange(PointerSize, true) {}
// Run the transformation on the associated function.
StackSafetyInfo run();
};
ConstantRange
StackSafetyLocalAnalysis::offsetFromAlloca(Value *Addr,
const Value *AllocaPtr) {
if (!SE.isSCEVable(Addr->getType()))
return UnknownRange;
AllocaOffsetRewriter Rewriter(SE, AllocaPtr);
const SCEV *Expr = Rewriter.visit(SE.getSCEV(Addr));
ConstantRange Offset = SE.getUnsignedRange(Expr).zextOrTrunc(PointerSize);
assert(!Offset.isEmptySet());
return Offset;
}
ConstantRange StackSafetyLocalAnalysis::getAccessRange(Value *Addr,
const Value *AllocaPtr,
uint64_t AccessSize) {
if (!SE.isSCEVable(Addr->getType()))
return UnknownRange;
AllocaOffsetRewriter Rewriter(SE, AllocaPtr);
const SCEV *Expr = Rewriter.visit(SE.getSCEV(Addr));
ConstantRange AccessStartRange =
SE.getUnsignedRange(Expr).zextOrTrunc(PointerSize);
ConstantRange SizeRange = getRange(0, AccessSize);
ConstantRange AccessRange = AccessStartRange.add(SizeRange);
assert(!AccessRange.isEmptySet());
return AccessRange;
}
ConstantRange StackSafetyLocalAnalysis::getMemIntrinsicAccessRange(
const MemIntrinsic *MI, const Use &U, const Value *AllocaPtr) {
if (auto MTI = dyn_cast<MemTransferInst>(MI)) {
if (MTI->getRawSource() != U && MTI->getRawDest() != U)
return getRange(0, 1);
} else {
if (MI->getRawDest() != U)
return getRange(0, 1);
}
const auto *Len = dyn_cast<ConstantInt>(MI->getLength());
// Non-constant size => unsafe. FIXME: try SCEV getRange.
if (!Len)
return UnknownRange;
ConstantRange AccessRange = getAccessRange(U, AllocaPtr, Len->getZExtValue());
return AccessRange;
}
/// The function analyzes all local uses of Ptr (alloca or argument) and
/// calculates local access range and all function calls where it was used.
bool StackSafetyLocalAnalysis::analyzeAllUses(const Value *Ptr, UseInfo &US) {
SmallPtrSet<const Value *, 16> Visited;
SmallVector<const Value *, 8> WorkList;
WorkList.push_back(Ptr);
// A DFS search through all uses of the alloca in bitcasts/PHI/GEPs/etc.
while (!WorkList.empty()) {
const Value *V = WorkList.pop_back_val();
for (const Use &UI : V->uses()) {
auto I = cast<const Instruction>(UI.getUser());
assert(V == UI.get());
switch (I->getOpcode()) {
case Instruction::Load: {
US.updateRange(
getAccessRange(UI, Ptr, DL.getTypeStoreSize(I->getType())));
break;
}
case Instruction::VAArg:
// "va-arg" from a pointer is safe.
break;
case Instruction::Store: {
if (V == I->getOperand(0)) {
// Stored the pointer - conservatively assume it may be unsafe.
US.updateRange(UnknownRange);
return false;
}
US.updateRange(getAccessRange(
UI, Ptr, DL.getTypeStoreSize(I->getOperand(0)->getType())));
break;
}
case Instruction::Ret:
// Information leak.
// FIXME: Process parameters correctly. This is a leak only if we return
// alloca.
US.updateRange(UnknownRange);
return false;
case Instruction::Call:
case Instruction::Invoke: {
ImmutableCallSite CS(I);
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
II->getIntrinsicID() == Intrinsic::lifetime_end)
break;
}
if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
US.updateRange(getMemIntrinsicAccessRange(MI, UI, Ptr));
break;
}
// FIXME: consult devirt?
// Do not follow aliases, otherwise we could inadvertently follow
// dso_preemptable aliases or aliases with interposable linkage.
const GlobalValue *Callee = dyn_cast<GlobalValue>(
CS.getCalledValue()->stripPointerCastsNoFollowAliases());
if (!Callee) {
US.updateRange(UnknownRange);
return false;
}
assert(isa<Function>(Callee) || isa<GlobalAlias>(Callee));
ImmutableCallSite::arg_iterator B = CS.arg_begin(), E = CS.arg_end();
for (ImmutableCallSite::arg_iterator A = B; A != E; ++A) {
if (A->get() == V) {
ConstantRange Offset = offsetFromAlloca(UI, Ptr);
US.Calls.emplace_back(Callee, A - B, Offset);
}
}
break;
}
default:
if (Visited.insert(I).second)
WorkList.push_back(cast<const Instruction>(I));
}
}
}
return true;
}
StackSafetyInfo StackSafetyLocalAnalysis::run() {
StackSafetyInfo::FunctionInfo Info(&F);
assert(!F.isDeclaration() &&
"Can't run StackSafety on a function declaration");
LLVM_DEBUG(dbgs() << "[StackSafety] " << F.getName() << "\n");
for (auto &I : instructions(F)) {
if (auto AI = dyn_cast<AllocaInst>(&I)) {
Info.Allocas.emplace_back(PointerSize, AI,
getStaticAllocaAllocationSize(AI));
AllocaInfo &AS = Info.Allocas.back();
analyzeAllUses(AI, AS.Use);
}
}
for (const Argument &A : make_range(F.arg_begin(), F.arg_end())) {
Info.Params.emplace_back(PointerSize, &A);
ParamInfo &PS = Info.Params.back();
analyzeAllUses(&A, PS.Use);
}
LLVM_DEBUG(dbgs() << "[StackSafety] done\n");
LLVM_DEBUG(Info.print(dbgs()));
return StackSafetyInfo(std::move(Info));
}
} // end anonymous namespace
StackSafetyInfo::StackSafetyInfo() = default;
StackSafetyInfo::StackSafetyInfo(StackSafetyInfo &&) = default;
StackSafetyInfo &StackSafetyInfo::operator=(StackSafetyInfo &&) = default;
StackSafetyInfo::StackSafetyInfo(FunctionInfo &&Info)
: Info(new FunctionInfo(std::move(Info))) {}
StackSafetyInfo::~StackSafetyInfo() = default;
void StackSafetyInfo::print(raw_ostream &O) const { Info->print(O); }
AnalysisKey StackSafetyAnalysis::Key;
StackSafetyInfo StackSafetyAnalysis::run(Function &F,
FunctionAnalysisManager &AM) {
return StackSafetyInfo();
StackSafetyLocalAnalysis SSLA(F, AM.getResult<ScalarEvolutionAnalysis>(F));
return SSLA.run();
}
PreservedAnalyses StackSafetyPrinterPass::run(Function &F,
@ -49,7 +416,12 @@ void StackSafetyInfoWrapperPass::print(raw_ostream &O, const Module *M) const {
SSI.print(O);
}
bool StackSafetyInfoWrapperPass::runOnFunction(Function &F) { return false; }
bool StackSafetyInfoWrapperPass::runOnFunction(Function &F) {
StackSafetyLocalAnalysis SSLA(
F, getAnalysis<ScalarEvolutionWrapperPass>().getSE());
SSI = StackSafetyInfo(SSLA.run());
return false;
}
static const char LocalPassArg[] = "stack-safety-local";
static const char LocalPassName[] = "Stack Safety Local Analysis";

View File

@ -4,10 +4,345 @@
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
; CHECK: 'Stack Safety Local Analysis' for function 'Foo'
; CHECK-NEXT: Not Implemented
@sink = global i8* null, align 8
define dso_local void @Foo() {
declare void @llvm.memset.p0i8.i32(i8* %dest, i8 %val, i32 %len, i1 %isvolatile)
declare void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 %len, i1 %isvolatile)
declare void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 %len, i1 %isvolatile)
; Address leaked.
define void @LeakAddress() {
; CHECK-LABEL: @LeakAddress dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[4]: full-set{{$}}
; CHECK-NOT: ]:
entry:
%x = alloca i32, align 4
%x1 = bitcast i32* %x to i8*
store i8* %x1, i8** @sink, align 8
ret void
}
define void @StoreInBounds() {
; CHECK-LABEL: @StoreInBounds dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[4]: [0,1){{$}}
; CHECK-NOT: ]:
entry:
%x = alloca i32, align 4
%x1 = bitcast i32* %x to i8*
store i8 0, i8* %x1, align 1
ret void
}
define void @StoreInBounds2() {
; CHECK-LABEL: @StoreInBounds2 dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[4]: [0,4){{$}}
; CHECK-NOT: ]:
entry:
%x = alloca i32, align 4
store i32 0, i32* %x, align 4
ret void
}
define void @StoreInBounds3() {
; CHECK-LABEL: @StoreInBounds3 dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[4]: [2,3){{$}}
; CHECK-NOT: ]:
entry:
%x = alloca i32, align 4
%x1 = bitcast i32* %x to i8*
%x2 = getelementptr i8, i8* %x1, i64 2
store i8 0, i8* %x2, align 1
ret void
}
; FIXME: ScalarEvolution does not look through ptrtoint/inttoptr.
define void @StoreInBounds4() {
; CHECK-LABEL: @StoreInBounds4 dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[4]: [2,-1){{$}}
; CHECK-NOT: ]:
entry:
%x = alloca i32, align 4
%x1 = ptrtoint i32* %x to i64
%x2 = add i64 %x1, 2
%x3 = inttoptr i64 %x2 to i8*
store i8 0, i8* %x3, align 1
ret void
}
define void @StoreOutOfBounds() {
; CHECK-LABEL: @StoreOutOfBounds dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[4]: [2,6){{$}}
; CHECK-NOT: ]:
entry:
%x = alloca i32, align 4
%x1 = bitcast i32* %x to i8*
%x2 = getelementptr i8, i8* %x1, i64 2
%x3 = bitcast i8* %x2 to i32*
store i32 0, i32* %x3, align 1
ret void
}
; There is no difference in load vs store handling.
define void @LoadInBounds() {
; CHECK-LABEL: @LoadInBounds dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[4]: [0,1){{$}}
; CHECK-NOT: ]:
entry:
%x = alloca i32, align 4
%x1 = bitcast i32* %x to i8*
%v = load i8, i8* %x1, align 1
ret void
}
define void @LoadOutOfBounds() {
; CHECK-LABEL: @LoadOutOfBounds dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[4]: [2,6){{$}}
; CHECK-NOT: ]:
entry:
%x = alloca i32, align 4
%x1 = bitcast i32* %x to i8*
%x2 = getelementptr i8, i8* %x1, i64 2
%x3 = bitcast i8* %x2 to i32*
%v = load i32, i32* %x3, align 1
ret void
}
; Leak through ret.
define i8* @Ret() {
; CHECK-LABEL: @Ret dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[4]: full-set{{$}}
; CHECK-NOT: ]:
entry:
%x = alloca i32, align 4
%x1 = bitcast i32* %x to i8*
%x2 = getelementptr i8, i8* %x1, i64 2
ret i8* %x2
}
declare void @Foo(i16* %p)
define void @DirectCall() {
; CHECK-LABEL: @DirectCall dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[8]: empty-set, @Foo(arg0, [2,3)){{$}}
; CHECK-NOT: ]:
entry:
%x = alloca i64, align 4
%x1 = bitcast i64* %x to i16*
%x2 = getelementptr i16, i16* %x1, i64 1
call void @Foo(i16* %x2);
ret void
}
; Indirect calls can not be analyzed (yet).
; FIXME: %p[]: full-set looks invalid
define void @IndirectCall(void (i8*)* %p) {
; CHECK-LABEL: @IndirectCall dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: p[]: full-set{{$}}
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[4]: full-set{{$}}
; CHECK-NOT: ]:
entry:
%x = alloca i32, align 4
%x1 = bitcast i32* %x to i8*
call void %p(i8* %x1);
ret void
}
define void @NonConstantOffset(i1 zeroext %z) {
; CHECK-LABEL: @NonConstantOffset dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: z[]: full-set{{$}}
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[4]: [0,4){{$}}
; CHECK-NOT: ]:
entry:
%x = alloca i32, align 4
%x1 = bitcast i32* %x to i8*
%idx = select i1 %z, i64 1, i64 2
%x2 = getelementptr i8, i8* %x1, i64 %idx
store i8 0, i8* %x2, align 1
ret void
}
define void @NonConstantOffsetOOB(i1 zeroext %z) {
; CHECK-LABEL: @NonConstantOffsetOOB dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: z[]: full-set{{$}}
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[4]: [0,6){{$}}
; CHECK-NOT: ]:
entry:
%x = alloca i32, align 4
%x1 = bitcast i32* %x to i8*
%idx = select i1 %z, i64 1, i64 4
%x2 = getelementptr i8, i8* %x1, i64 %idx
store i8 0, i8* %x2, align 1
ret void
}
define void @ArrayAlloca() {
; CHECK-LABEL: @ArrayAlloca dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[40]: [36,40){{$}}
; CHECK-NOT: ]:
entry:
%x = alloca i32, i32 10, align 4
%x1 = bitcast i32* %x to i8*
%x2 = getelementptr i8, i8* %x1, i64 36
%x3 = bitcast i8* %x2 to i32*
store i32 0, i32* %x3, align 1
ret void
}
define void @ArrayAllocaOOB() {
; CHECK-LABEL: @ArrayAllocaOOB dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[40]: [37,41){{$}}
; CHECK-NOT: ]:
entry:
%x = alloca i32, i32 10, align 4
%x1 = bitcast i32* %x to i8*
%x2 = getelementptr i8, i8* %x1, i64 37
%x3 = bitcast i8* %x2 to i32*
store i32 0, i32* %x3, align 1
ret void
}
define void @DynamicAllocaUnused(i64 %size) {
; CHECK-LABEL: @DynamicAllocaUnused dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: size[]: empty-set{{$}}
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[0]: empty-set{{$}}
; CHECK-NOT: ]:
entry:
%x = alloca i32, i64 %size, align 16
ret void
}
; Dynamic alloca with unknown size.
define void @DynamicAlloca(i64 %size) {
; CHECK-LABEL: @DynamicAlloca dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: size[]: [0,-12){{$}}
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[0]: [0,4){{$}}
; CHECK-NOT: ]:
entry:
%x = alloca i32, i64 %size, align 16
store i32 0, i32* %x, align 1
ret void
}
; Dynamic alloca with limited size.
; FIXME: could be proved safe. Implement.
define void @DynamicAllocaFiniteSizeRange(i1 zeroext %z) {
; CHECK-LABEL: @DynamicAllocaFiniteSizeRange dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: z[]: [0,-12){{$}}
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[0]: [0,4){{$}}
; CHECK-NOT: ]:
entry:
%size = select i1 %z, i64 3, i64 5
%x = alloca i32, i64 %size, align 16
store i32 0, i32* %x, align 1
ret void
}
define signext i8 @SimpleLoop() {
; CHECK-LABEL: @SimpleLoop dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[10]: [0,10){{$}}
; CHECK-NOT: ]:
entry:
%x = alloca [10 x i8], align 1
%0 = getelementptr inbounds [10 x i8], [10 x i8]* %x, i64 0, i64 0
%lftr.limit = getelementptr inbounds [10 x i8], [10 x i8]* %x, i64 0, i64 10
br label %for.body
for.body:
%sum.010 = phi i8 [ 0, %entry ], [ %add, %for.body ]
%p.09 = phi i8* [ %0, %entry ], [ %incdec.ptr, %for.body ]
%incdec.ptr = getelementptr inbounds i8, i8* %p.09, i64 1
%1 = load volatile i8, i8* %p.09, align 1
%add = add i8 %1, %sum.010
%exitcond = icmp eq i8* %incdec.ptr, %lftr.limit
br i1 %exitcond, label %for.cond.cleanup, label %for.body
for.cond.cleanup:
ret i8 %add
}
; OOB in a loop.
define signext i8 @SimpleLoopOOB() {
; CHECK-LABEL: @SimpleLoopOOB dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[10]: [0,11){{$}}
; CHECK-NOT: ]:
entry:
%x = alloca [10 x i8], align 1
%0 = getelementptr inbounds [10 x i8], [10 x i8]* %x, i64 0, i64 0
; 11 iterations
%lftr.limit = getelementptr inbounds [10 x i8], [10 x i8]* %x, i64 0, i64 11
br label %for.body
for.body:
%sum.010 = phi i8 [ 0, %entry ], [ %add, %for.body ]
%p.09 = phi i8* [ %0, %entry ], [ %incdec.ptr, %for.body ]
%incdec.ptr = getelementptr inbounds i8, i8* %p.09, i64 1
%1 = load volatile i8, i8* %p.09, align 1
%add = add i8 %1, %sum.010
%exitcond = icmp eq i8* %incdec.ptr, %lftr.limit
br i1 %exitcond, label %for.cond.cleanup, label %for.body
for.cond.cleanup:
ret i8 %add
}
; FIXME: we don't understand that %sz in the memset call is limited to 128 by the preceding check.
define dso_local void @SizeCheck(i32 %sz) {
; CHECK-LABEL: @SizeCheck{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: sz[]: [0,1){{$}}
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x1[128]: full-set{{$}}
; CHECK-NOT: ]:
entry:
%x1 = alloca [128 x i8], align 16
%x1.sub = getelementptr inbounds [128 x i8], [128 x i8]* %x1, i64 0, i64 0
%cmp = icmp slt i32 %sz, 129
br i1 %cmp, label %if.then, label %if.end
if.then:
call void @llvm.memset.p0i8.i32(i8* nonnull align 16 %x1.sub, i8 0, i32 %sz, i1 false)
br label %if.end
if.end:
ret void
}

View File

@ -0,0 +1,200 @@
; RUN: opt -S -analyze -stack-safety-local < %s | FileCheck %s
; RUN: opt -S -passes="print<stack-safety-local>" -disable-output < %s 2>&1 | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
declare void @llvm.memset.p0i8.i32(i8* %dest, i8 %val, i32 %len, i1 %isvolatile)
declare void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 %len, i1 %isvolatile)
declare void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 %len, i1 %isvolatile)
define void @MemsetInBounds() {
; CHECK-LABEL: MemsetInBounds dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[4]: [0,4){{$}}
; CHECK-NOT: ]:
entry:
%x = alloca i32, align 4
%x1 = bitcast i32* %x to i8*
call void @llvm.memset.p0i8.i32(i8* %x1, i8 42, i32 4, i1 false)
ret void
}
; Volatile does not matter for access bounds.
define void @VolatileMemsetInBounds() {
; CHECK-LABEL: VolatileMemsetInBounds dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[4]: [0,4){{$}}
; CHECK-NOT: ]:
entry:
%x = alloca i32, align 4
%x1 = bitcast i32* %x to i8*
call void @llvm.memset.p0i8.i32(i8* %x1, i8 42, i32 4, i1 true)
ret void
}
define void @MemsetOutOfBounds() {
; CHECK-LABEL: MemsetOutOfBounds dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[4]: [0,5){{$}}
; CHECK-NOT: ]:
entry:
%x = alloca i32, align 4
%x1 = bitcast i32* %x to i8*
call void @llvm.memset.p0i8.i32(i8* %x1, i8 42, i32 5, i1 false)
ret void
}
define void @MemsetNonConst(i32 %size) {
; CHECK-LABEL: MemsetNonConst dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: size[]: [0,1){{$}}
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[4]: full-set{{$}}
; CHECK-NOT: ]:
entry:
%x = alloca i32, align 4
%x1 = bitcast i32* %x to i8*
call void @llvm.memset.p0i8.i32(i8* %x1, i8 42, i32 %size, i1 false)
ret void
}
; FIXME: memintrinsics should look at size range when possible
; Right now we refuse any non-constant size.
define void @MemsetNonConstInBounds(i1 zeroext %z) {
; CHECK-LABEL: MemsetNonConstInBounds dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: z[]: [0,1){{$}}
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[4]: full-set{{$}}
; CHECK-NOT: ]:
entry:
%x = alloca i32, align 4
%x1 = bitcast i32* %x to i8*
%size = select i1 %z, i32 3, i32 4
call void @llvm.memset.p0i8.i32(i8* %x1, i8 42, i32 %size, i1 false)
ret void
}
define void @MemcpyInBounds() {
; CHECK-LABEL: MemcpyInBounds dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[4]: [0,4){{$}}
; CHECK-NEXT: y[4]: [0,4){{$}}
; CHECK-NOT: ]:
entry:
%x = alloca i32, align 4
%y = alloca i32, align 4
%x1 = bitcast i32* %x to i8*
%y1 = bitcast i32* %y to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %x1, i8* %y1, i32 4, i1 false)
ret void
}
define void @MemcpySrcOutOfBounds() {
; CHECK-LABEL: MemcpySrcOutOfBounds dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[8]: [0,5){{$}}
; CHECK-NEXT: y[4]: [0,5){{$}}
; CHECK-NOT: ]:
entry:
%x = alloca i64, align 4
%y = alloca i32, align 4
%x1 = bitcast i64* %x to i8*
%y1 = bitcast i32* %y to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %x1, i8* %y1, i32 5, i1 false)
ret void
}
define void @MemcpyDstOutOfBounds() {
; CHECK-LABEL: MemcpyDstOutOfBounds dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[4]: [0,5){{$}}
; CHECK-NEXT: y[8]: [0,5){{$}}
; CHECK-NOT: ]:
entry:
%x = alloca i32, align 4
%y = alloca i64, align 4
%x1 = bitcast i32* %x to i8*
%y1 = bitcast i64* %y to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %x1, i8* %y1, i32 5, i1 false)
ret void
}
define void @MemcpyBothOutOfBounds() {
; CHECK-LABEL: MemcpyBothOutOfBounds dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[4]: [0,9){{$}}
; CHECK-NEXT: y[8]: [0,9){{$}}
; CHECK-NOT: ]:
entry:
%x = alloca i32, align 4
%y = alloca i64, align 4
%x1 = bitcast i32* %x to i8*
%y1 = bitcast i64* %y to i8*
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %x1, i8* %y1, i32 9, i1 false)
ret void
}
define void @MemcpySelfInBounds() {
; CHECK-LABEL: MemcpySelfInBounds dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[8]: [0,8){{$}}
; CHECK-NOT: ]:
entry:
%x = alloca i64, align 4
%x1 = bitcast i64* %x to i8*
%x2 = getelementptr i8, i8* %x1, i64 5
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %x1, i8* %x2, i32 3, i1 false)
ret void
}
define void @MemcpySelfSrcOutOfBounds() {
; CHECK-LABEL: MemcpySelfSrcOutOfBounds dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[8]: [0,9){{$}}
; CHECK-NOT: ]:
entry:
%x = alloca i64, align 4
%x1 = bitcast i64* %x to i8*
%x2 = getelementptr i8, i8* %x1, i64 5
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %x1, i8* %x2, i32 4, i1 false)
ret void
}
define void @MemcpySelfDstOutOfBounds() {
; CHECK-LABEL: MemcpySelfDstOutOfBounds dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[8]: [0,9){{$}}
; CHECK-NOT: ]:
entry:
%x = alloca i64, align 4
%x1 = bitcast i64* %x to i8*
%x2 = getelementptr i8, i8* %x1, i64 5
call void @llvm.memcpy.p0i8.p0i8.i32(i8* %x2, i8* %x1, i32 4, i1 false)
ret void
}
define void @MemmoveSelfBothOutOfBounds() {
; CHECK-LABEL: MemmoveSelfBothOutOfBounds dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[8]: [0,14){{$}}
; CHECK-NOT: ]:
entry:
%x = alloca i64, align 4
%x1 = bitcast i64* %x to i8*
%x2 = getelementptr i8, i8* %x1, i64 5
call void @llvm.memmove.p0i8.p0i8.i32(i8* %x1, i8* %x2, i32 9, i1 false)
ret void
}