[analyzer] Cache offset computation for MemRegion

Achieves almost a 200% speedup on the example where the performance of
visitors was problematic.

Performance on sqlite3 is unaffected.

rdar://38818362

Differential Revision: https://reviews.llvm.org/D45113

llvm-svn: 328911
This commit is contained in:
George Karpenkov 2018-03-31 01:20:07 +00:00
parent 137ca91f52
commit fa4d18c7e3
2 changed files with 37 additions and 31 deletions

View File

@ -86,6 +86,7 @@ public:
private:
const Kind kind;
mutable Optional<RegionOffset> cachedOffset;
protected:
MemRegion(Kind k) : kind(k) {}

View File

@ -1259,47 +1259,46 @@ static bool isImmediateBase(const CXXRecordDecl *Child,
return false;
}
RegionOffset MemRegion::getAsOffset() const {
const MemRegion *R = this;
static RegionOffset calculateOffset(const MemRegion *R) {
const MemRegion *SymbolicOffsetBase = nullptr;
int64_t Offset = 0;
while (1) {
switch (R->getKind()) {
case CodeSpaceRegionKind:
case StackLocalsSpaceRegionKind:
case StackArgumentsSpaceRegionKind:
case HeapSpaceRegionKind:
case UnknownSpaceRegionKind:
case StaticGlobalSpaceRegionKind:
case GlobalInternalSpaceRegionKind:
case GlobalSystemSpaceRegionKind:
case GlobalImmutableSpaceRegionKind:
case MemRegion::CodeSpaceRegionKind:
case MemRegion::StackLocalsSpaceRegionKind:
case MemRegion::StackArgumentsSpaceRegionKind:
case MemRegion::HeapSpaceRegionKind:
case MemRegion::UnknownSpaceRegionKind:
case MemRegion::StaticGlobalSpaceRegionKind:
case MemRegion::GlobalInternalSpaceRegionKind:
case MemRegion::GlobalSystemSpaceRegionKind:
case MemRegion::GlobalImmutableSpaceRegionKind:
// Stores can bind directly to a region space to set a default value.
assert(Offset == 0 && !SymbolicOffsetBase);
goto Finish;
case FunctionCodeRegionKind:
case BlockCodeRegionKind:
case BlockDataRegionKind:
case MemRegion::FunctionCodeRegionKind:
case MemRegion::BlockCodeRegionKind:
case MemRegion::BlockDataRegionKind:
// These will never have bindings, but may end up having values requested
// if the user does some strange casting.
if (Offset != 0)
SymbolicOffsetBase = R;
goto Finish;
case SymbolicRegionKind:
case AllocaRegionKind:
case CompoundLiteralRegionKind:
case CXXThisRegionKind:
case StringRegionKind:
case ObjCStringRegionKind:
case VarRegionKind:
case CXXTempObjectRegionKind:
case MemRegion::SymbolicRegionKind:
case MemRegion::AllocaRegionKind:
case MemRegion::CompoundLiteralRegionKind:
case MemRegion::CXXThisRegionKind:
case MemRegion::StringRegionKind:
case MemRegion::ObjCStringRegionKind:
case MemRegion::VarRegionKind:
case MemRegion::CXXTempObjectRegionKind:
// Usual base regions.
goto Finish;
case ObjCIvarRegionKind:
case MemRegion::ObjCIvarRegionKind:
// This is a little strange, but it's a compromise between
// ObjCIvarRegions having unknown compile-time offsets (when using the
// non-fragile runtime) and yet still being distinct, non-overlapping
@ -1307,14 +1306,14 @@ RegionOffset MemRegion::getAsOffset() const {
// of computing offsets.
goto Finish;
case CXXBaseObjectRegionKind: {
case MemRegion::CXXBaseObjectRegionKind: {
const CXXBaseObjectRegion *BOR = cast<CXXBaseObjectRegion>(R);
R = BOR->getSuperRegion();
QualType Ty;
bool RootIsSymbolic = false;
if (const TypedValueRegion *TVR = dyn_cast<TypedValueRegion>(R)) {
Ty = TVR->getDesugaredValueType(getContext());
Ty = TVR->getDesugaredValueType(R->getContext());
} else if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R)) {
// If our base region is symbolic, we don't know what type it really is.
// Pretend the type of the symbol is the true dynamic type.
@ -1348,17 +1347,17 @@ RegionOffset MemRegion::getAsOffset() const {
continue;
CharUnits BaseOffset;
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Child);
const ASTRecordLayout &Layout = R->getContext().getASTRecordLayout(Child);
if (BOR->isVirtual())
BaseOffset = Layout.getVBaseClassOffset(BOR->getDecl());
else
BaseOffset = Layout.getBaseClassOffset(BOR->getDecl());
// The base offset is in chars, not in bits.
Offset += BaseOffset.getQuantity() * getContext().getCharWidth();
Offset += BaseOffset.getQuantity() * R->getContext().getCharWidth();
break;
}
case ElementRegionKind: {
case MemRegion::ElementRegionKind: {
const ElementRegion *ER = cast<ElementRegion>(R);
R = ER->getSuperRegion();
@ -1379,14 +1378,14 @@ RegionOffset MemRegion::getAsOffset() const {
int64_t i = CI->getValue().getSExtValue();
// This type size is in bits.
Offset += i * getContext().getTypeSize(EleTy);
Offset += i * R->getContext().getTypeSize(EleTy);
} else {
// We cannot compute offset for non-concrete index.
SymbolicOffsetBase = R;
}
break;
}
case FieldRegionKind: {
case MemRegion::FieldRegionKind: {
const FieldRegion *FR = cast<FieldRegion>(R);
R = FR->getSuperRegion();
@ -1412,7 +1411,7 @@ RegionOffset MemRegion::getAsOffset() const {
if (FR->getDecl() == *FI)
break;
}
const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
const ASTRecordLayout &Layout = R->getContext().getASTRecordLayout(RD);
// This is offset in bits.
Offset += Layout.getFieldOffset(idx);
break;
@ -1426,6 +1425,12 @@ RegionOffset MemRegion::getAsOffset() const {
return RegionOffset(R, Offset);
}
RegionOffset MemRegion::getAsOffset() const {
if (!cachedOffset)
cachedOffset = calculateOffset(this);
return *cachedOffset;
}
//===----------------------------------------------------------------------===//
// BlockDataRegion
//===----------------------------------------------------------------------===//