diff --git a/llvm/lib/Analysis/StackSafetyAnalysis.cpp b/llvm/lib/Analysis/StackSafetyAnalysis.cpp index 7f5bedabbd80..d85da765b7fe 100644 --- a/llvm/lib/Analysis/StackSafetyAnalysis.cpp +++ b/llvm/lib/Analysis/StackSafetyAnalysis.cpp @@ -131,7 +131,10 @@ raw_ostream &operator<<(raw_ostream &OS, const ParamInfo &P) { /// size can not be statically determined. uint64_t getStaticAllocaAllocationSize(const AllocaInst *AI) { const DataLayout &DL = AI->getModule()->getDataLayout(); - uint64_t Size = DL.getTypeAllocSize(AI->getAllocatedType()); + TypeSize TS = DL.getTypeAllocSize(AI->getAllocatedType()); + if (TS.isScalable()) + return 0; + uint64_t Size = TS.getFixedSize(); if (AI->isArrayAllocation()) { auto C = dyn_cast(AI->getArraySize()); if (!C) @@ -211,7 +214,9 @@ class StackSafetyLocalAnalysis { ConstantRange offsetFromAlloca(Value *Addr, const Value *AllocaPtr); ConstantRange getAccessRange(Value *Addr, const Value *AllocaPtr, - uint64_t AccessSize); + ConstantRange SizeRange); + ConstantRange getAccessRange(Value *Addr, const Value *AllocaPtr, + TypeSize Size); ConstantRange getMemIntrinsicAccessRange(const MemIntrinsic *MI, const Use &U, const Value *AllocaPtr); @@ -244,9 +249,9 @@ StackSafetyLocalAnalysis::offsetFromAlloca(Value *Addr, return Offset; } -ConstantRange StackSafetyLocalAnalysis::getAccessRange(Value *Addr, - const Value *AllocaPtr, - uint64_t AccessSize) { +ConstantRange +StackSafetyLocalAnalysis::getAccessRange(Value *Addr, const Value *AllocaPtr, + ConstantRange SizeRange) { if (!SE.isSCEVable(Addr->getType())) return UnknownRange; @@ -255,12 +260,20 @@ ConstantRange StackSafetyLocalAnalysis::getAccessRange(Value *Addr, ConstantRange AccessStartRange = SE.getUnsignedRange(Expr).zextOrTrunc(PointerSize); - ConstantRange SizeRange = getRange(0, AccessSize); ConstantRange AccessRange = AccessStartRange.add(SizeRange); assert(!AccessRange.isEmptySet()); return AccessRange; } +ConstantRange StackSafetyLocalAnalysis::getAccessRange(Value *Addr, + const Value *AllocaPtr, + TypeSize Size) { + ConstantRange SizeRange = Size.isScalable() + ? ConstantRange::getFull(PointerSize) + : getRange(0, Size.getFixedSize()); + return getAccessRange(Addr, AllocaPtr, SizeRange); +} + ConstantRange StackSafetyLocalAnalysis::getMemIntrinsicAccessRange( const MemIntrinsic *MI, const Use &U, const Value *AllocaPtr) { if (auto MTI = dyn_cast(MI)) { @@ -274,7 +287,8 @@ ConstantRange StackSafetyLocalAnalysis::getMemIntrinsicAccessRange( // Non-constant size => unsafe. FIXME: try SCEV getRange. if (!Len) return UnknownRange; - ConstantRange AccessRange = getAccessRange(U, AllocaPtr, Len->getZExtValue()); + ConstantRange AccessRange = + getAccessRange(U, AllocaPtr, getRange(0, Len->getZExtValue())); return AccessRange; } diff --git a/llvm/test/Analysis/StackSafetyAnalysis/local.ll b/llvm/test/Analysis/StackSafetyAnalysis/local.ll index 814b97d29638..b751297644c8 100644 --- a/llvm/test/Analysis/StackSafetyAnalysis/local.ll +++ b/llvm/test/Analysis/StackSafetyAnalysis/local.ll @@ -349,3 +349,22 @@ if.then: if.end: ret void } + +; FIXME: scalable allocas are considered to be of size zero, and scalable accesses to be full-range. +; This effectively disables safety analysis for scalable allocations. +define void @Scalable(* %p, * %unused, %v) { +; CHECK-LABEL: @Scalable dso_preemptable{{$}} +; CHECK-NEXT: args uses: +; CHECK-NEXT: p[]: full-set +; CHECK-NEXT: unused[]: empty-set +; CHECK-NEXT: v[]: full-set +; CHECK-NEXT: allocas uses: +; CHECK-NEXT: x[0]: [0,1){{$}} +; CHECK-NOT: ]: +entry: + %x = alloca , align 4 + %x1 = bitcast * %x to i8* + store i8 0, i8* %x1, align 1 + store %v, * %p, align 4 + ret void +}