Fix StackSafetyAnalysis crash with scalable vector types.

Summary:
Treat scalable allocas as if they have storage size of 0, and
scalable-typed memory accesses as if their range is unlimited.

This is not a proper support of scalable vector types in the analysis -
we can do better, but not today.

Reviewers: vitalybuka

Subscribers: hiraditya, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D73394
This commit is contained in:
Evgenii Stepanov 2020-01-24 16:36:13 -08:00
parent 8e3f59b45a
commit c3b80adcee
2 changed files with 40 additions and 7 deletions

View File

@ -131,7 +131,10 @@ raw_ostream &operator<<(raw_ostream &OS, const ParamInfo &P) {
/// size can not be statically determined.
uint64_t getStaticAllocaAllocationSize(const AllocaInst *AI) {
const DataLayout &DL = AI->getModule()->getDataLayout();
uint64_t Size = DL.getTypeAllocSize(AI->getAllocatedType());
TypeSize TS = DL.getTypeAllocSize(AI->getAllocatedType());
if (TS.isScalable())
return 0;
uint64_t Size = TS.getFixedSize();
if (AI->isArrayAllocation()) {
auto C = dyn_cast<ConstantInt>(AI->getArraySize());
if (!C)
@ -211,7 +214,9 @@ class StackSafetyLocalAnalysis {
ConstantRange offsetFromAlloca(Value *Addr, const Value *AllocaPtr);
ConstantRange getAccessRange(Value *Addr, const Value *AllocaPtr,
uint64_t AccessSize);
ConstantRange SizeRange);
ConstantRange getAccessRange(Value *Addr, const Value *AllocaPtr,
TypeSize Size);
ConstantRange getMemIntrinsicAccessRange(const MemIntrinsic *MI, const Use &U,
const Value *AllocaPtr);
@ -244,9 +249,9 @@ StackSafetyLocalAnalysis::offsetFromAlloca(Value *Addr,
return Offset;
}
ConstantRange StackSafetyLocalAnalysis::getAccessRange(Value *Addr,
const Value *AllocaPtr,
uint64_t AccessSize) {
ConstantRange
StackSafetyLocalAnalysis::getAccessRange(Value *Addr, const Value *AllocaPtr,
ConstantRange SizeRange) {
if (!SE.isSCEVable(Addr->getType()))
return UnknownRange;
@ -255,12 +260,20 @@ ConstantRange StackSafetyLocalAnalysis::getAccessRange(Value *Addr,
ConstantRange AccessStartRange =
SE.getUnsignedRange(Expr).zextOrTrunc(PointerSize);
ConstantRange SizeRange = getRange(0, AccessSize);
ConstantRange AccessRange = AccessStartRange.add(SizeRange);
assert(!AccessRange.isEmptySet());
return AccessRange;
}
ConstantRange StackSafetyLocalAnalysis::getAccessRange(Value *Addr,
const Value *AllocaPtr,
TypeSize Size) {
ConstantRange SizeRange = Size.isScalable()
? ConstantRange::getFull(PointerSize)
: getRange(0, Size.getFixedSize());
return getAccessRange(Addr, AllocaPtr, SizeRange);
}
ConstantRange StackSafetyLocalAnalysis::getMemIntrinsicAccessRange(
const MemIntrinsic *MI, const Use &U, const Value *AllocaPtr) {
if (auto MTI = dyn_cast<MemTransferInst>(MI)) {
@ -274,7 +287,8 @@ ConstantRange StackSafetyLocalAnalysis::getMemIntrinsicAccessRange(
// Non-constant size => unsafe. FIXME: try SCEV getRange.
if (!Len)
return UnknownRange;
ConstantRange AccessRange = getAccessRange(U, AllocaPtr, Len->getZExtValue());
ConstantRange AccessRange =
getAccessRange(U, AllocaPtr, getRange(0, Len->getZExtValue()));
return AccessRange;
}

View File

@ -349,3 +349,22 @@ if.then:
if.end:
ret void
}
; FIXME: scalable allocas are considered to be of size zero, and scalable accesses to be full-range.
; This effectively disables safety analysis for scalable allocations.
define void @Scalable(<vscale x 4 x i32>* %p, <vscale x 4 x i32>* %unused, <vscale x 4 x i32> %v) {
; CHECK-LABEL: @Scalable dso_preemptable{{$}}
; CHECK-NEXT: args uses:
; CHECK-NEXT: p[]: full-set
; CHECK-NEXT: unused[]: empty-set
; CHECK-NEXT: v[]: full-set
; CHECK-NEXT: allocas uses:
; CHECK-NEXT: x[0]: [0,1){{$}}
; CHECK-NOT: ]:
entry:
%x = alloca <vscale x 4 x i32>, align 4
%x1 = bitcast <vscale x 4 x i32>* %x to i8*
store i8 0, i8* %x1, align 1
store <vscale x 4 x i32> %v, <vscale x 4 x i32>* %p, align 4
ret void
}