forked from OSchip/llvm-project
[MemDepAnalysis/VNCoercion] Move static method to its only use. [NFCI]
Static method MemoryDependenceResults::getLoadLoadClobberFullWidthSize does not have or use any info specific to MemoryDependenceResults. Move it to its only user: VNCoercion.
This commit is contained in:
parent
128e1ebd93
commit
9f6c6ee6b9
|
@ -468,18 +468,6 @@ public:
|
|||
/// with the same queried instruction.
|
||||
MemDepResult getInvariantGroupPointerDependency(LoadInst *LI, BasicBlock *BB);
|
||||
|
||||
/// Looks at a memory location for a load (specified by MemLocBase, Offs, and
|
||||
/// Size) and compares it against a load.
|
||||
///
|
||||
/// If the specified load could be safely widened to a larger integer load
|
||||
/// that is 1) still efficient, 2) safe for the target, and 3) would provide
|
||||
/// the specified memory location value, then this function returns the size
|
||||
/// in bytes of the load width to use. If not, this returns zero.
|
||||
static unsigned getLoadLoadClobberFullWidthSize(const Value *MemLocBase,
|
||||
int64_t MemLocOffs,
|
||||
unsigned MemLocSize,
|
||||
const LoadInst *LI);
|
||||
|
||||
/// Release memory in caches.
|
||||
void releaseMemory();
|
||||
|
||||
|
|
|
@ -238,83 +238,6 @@ MemDepResult MemoryDependenceResults::getCallDependencyFrom(
|
|||
return MemDepResult::getNonFuncLocal();
|
||||
}
|
||||
|
||||
unsigned MemoryDependenceResults::getLoadLoadClobberFullWidthSize(
|
||||
const Value *MemLocBase, int64_t MemLocOffs, unsigned MemLocSize,
|
||||
const LoadInst *LI) {
|
||||
// We can only extend simple integer loads.
|
||||
if (!isa<IntegerType>(LI->getType()) || !LI->isSimple())
|
||||
return 0;
|
||||
|
||||
// Load widening is hostile to ThreadSanitizer: it may cause false positives
|
||||
// or make the reports more cryptic (access sizes are wrong).
|
||||
if (LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread))
|
||||
return 0;
|
||||
|
||||
const DataLayout &DL = LI->getModule()->getDataLayout();
|
||||
|
||||
// Get the base of this load.
|
||||
int64_t LIOffs = 0;
|
||||
const Value *LIBase =
|
||||
GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, DL);
|
||||
|
||||
// If the two pointers are not based on the same pointer, we can't tell that
|
||||
// they are related.
|
||||
if (LIBase != MemLocBase)
|
||||
return 0;
|
||||
|
||||
// Okay, the two values are based on the same pointer, but returned as
|
||||
// no-alias. This happens when we have things like two byte loads at "P+1"
|
||||
// and "P+3". Check to see if increasing the size of the "LI" load up to its
|
||||
// alignment (or the largest native integer type) will allow us to load all
|
||||
// the bits required by MemLoc.
|
||||
|
||||
// If MemLoc is before LI, then no widening of LI will help us out.
|
||||
if (MemLocOffs < LIOffs)
|
||||
return 0;
|
||||
|
||||
// Get the alignment of the load in bytes. We assume that it is safe to load
|
||||
// any legal integer up to this size without a problem. For example, if we're
|
||||
// looking at an i8 load on x86-32 that is known 1024 byte aligned, we can
|
||||
// widen it up to an i32 load. If it is known 2-byte aligned, we can widen it
|
||||
// to i16.
|
||||
unsigned LoadAlign = LI->getAlignment();
|
||||
|
||||
int64_t MemLocEnd = MemLocOffs + MemLocSize;
|
||||
|
||||
// If no amount of rounding up will let MemLoc fit into LI, then bail out.
|
||||
if (LIOffs + LoadAlign < MemLocEnd)
|
||||
return 0;
|
||||
|
||||
// This is the size of the load to try. Start with the next larger power of
|
||||
// two.
|
||||
unsigned NewLoadByteSize = LI->getType()->getPrimitiveSizeInBits() / 8U;
|
||||
NewLoadByteSize = NextPowerOf2(NewLoadByteSize);
|
||||
|
||||
while (true) {
|
||||
// If this load size is bigger than our known alignment or would not fit
|
||||
// into a native integer register, then we fail.
|
||||
if (NewLoadByteSize > LoadAlign ||
|
||||
!DL.fitsInLegalInteger(NewLoadByteSize * 8))
|
||||
return 0;
|
||||
|
||||
if (LIOffs + NewLoadByteSize > MemLocEnd &&
|
||||
(LI->getParent()->getParent()->hasFnAttribute(
|
||||
Attribute::SanitizeAddress) ||
|
||||
LI->getParent()->getParent()->hasFnAttribute(
|
||||
Attribute::SanitizeHWAddress)))
|
||||
// We will be reading past the location accessed by the original program.
|
||||
// While this is safe in a regular build, Address Safety analysis tools
|
||||
// may start reporting false warnings. So, don't do widening.
|
||||
return 0;
|
||||
|
||||
// If a load of this width would include all of MemLoc, then we succeed.
|
||||
if (LIOffs + NewLoadByteSize >= MemLocEnd)
|
||||
return NewLoadByteSize;
|
||||
|
||||
NewLoadByteSize <<= 1;
|
||||
}
|
||||
}
|
||||
|
||||
static bool isVolatile(Instruction *Inst) {
|
||||
if (auto *LI = dyn_cast<LoadInst>(Inst))
|
||||
return LI->isVolatile();
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
#include "llvm/Transforms/Utils/VNCoercion.h"
|
||||
#include "llvm/Analysis/AliasAnalysis.h"
|
||||
#include "llvm/Analysis/ConstantFolding.h"
|
||||
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
|
||||
#include "llvm/Analysis/ValueTracking.h"
|
||||
#include "llvm/IR/IRBuilder.h"
|
||||
#include "llvm/IR/IntrinsicInst.h"
|
||||
|
@ -240,6 +239,91 @@ int analyzeLoadFromClobberingStore(Type *LoadTy, Value *LoadPtr,
|
|||
DL);
|
||||
}
|
||||
|
||||
/// Looks at a memory location for a load (specified by MemLocBase, Offs, and
|
||||
/// Size) and compares it against a load.
|
||||
///
|
||||
/// If the specified load could be safely widened to a larger integer load
|
||||
/// that is 1) still efficient, 2) safe for the target, and 3) would provide
|
||||
/// the specified memory location value, then this function returns the size
|
||||
/// in bytes of the load width to use. If not, this returns zero.
|
||||
static unsigned getLoadLoadClobberFullWidthSize(const Value *MemLocBase,
|
||||
int64_t MemLocOffs,
|
||||
unsigned MemLocSize,
|
||||
const LoadInst *LI) {
|
||||
// We can only extend simple integer loads.
|
||||
if (!isa<IntegerType>(LI->getType()) || !LI->isSimple())
|
||||
return 0;
|
||||
|
||||
// Load widening is hostile to ThreadSanitizer: it may cause false positives
|
||||
// or make the reports more cryptic (access sizes are wrong).
|
||||
if (LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread))
|
||||
return 0;
|
||||
|
||||
const DataLayout &DL = LI->getModule()->getDataLayout();
|
||||
|
||||
// Get the base of this load.
|
||||
int64_t LIOffs = 0;
|
||||
const Value *LIBase =
|
||||
GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, DL);
|
||||
|
||||
// If the two pointers are not based on the same pointer, we can't tell that
|
||||
// they are related.
|
||||
if (LIBase != MemLocBase)
|
||||
return 0;
|
||||
|
||||
// Okay, the two values are based on the same pointer, but returned as
|
||||
// no-alias. This happens when we have things like two byte loads at "P+1"
|
||||
// and "P+3". Check to see if increasing the size of the "LI" load up to its
|
||||
// alignment (or the largest native integer type) will allow us to load all
|
||||
// the bits required by MemLoc.
|
||||
|
||||
// If MemLoc is before LI, then no widening of LI will help us out.
|
||||
if (MemLocOffs < LIOffs)
|
||||
return 0;
|
||||
|
||||
// Get the alignment of the load in bytes. We assume that it is safe to load
|
||||
// any legal integer up to this size without a problem. For example, if we're
|
||||
// looking at an i8 load on x86-32 that is known 1024 byte aligned, we can
|
||||
// widen it up to an i32 load. If it is known 2-byte aligned, we can widen it
|
||||
// to i16.
|
||||
unsigned LoadAlign = LI->getAlignment();
|
||||
|
||||
int64_t MemLocEnd = MemLocOffs + MemLocSize;
|
||||
|
||||
// If no amount of rounding up will let MemLoc fit into LI, then bail out.
|
||||
if (LIOffs + LoadAlign < MemLocEnd)
|
||||
return 0;
|
||||
|
||||
// This is the size of the load to try. Start with the next larger power of
|
||||
// two.
|
||||
unsigned NewLoadByteSize = LI->getType()->getPrimitiveSizeInBits() / 8U;
|
||||
NewLoadByteSize = NextPowerOf2(NewLoadByteSize);
|
||||
|
||||
while (true) {
|
||||
// If this load size is bigger than our known alignment or would not fit
|
||||
// into a native integer register, then we fail.
|
||||
if (NewLoadByteSize > LoadAlign ||
|
||||
!DL.fitsInLegalInteger(NewLoadByteSize * 8))
|
||||
return 0;
|
||||
|
||||
if (LIOffs + NewLoadByteSize > MemLocEnd &&
|
||||
(LI->getParent()->getParent()->hasFnAttribute(
|
||||
Attribute::SanitizeAddress) ||
|
||||
LI->getParent()->getParent()->hasFnAttribute(
|
||||
Attribute::SanitizeHWAddress)))
|
||||
// We will be reading past the location accessed by the original program.
|
||||
// While this is safe in a regular build, Address Safety analysis tools
|
||||
// may start reporting false warnings. So, don't do widening.
|
||||
return 0;
|
||||
|
||||
// If a load of this width would include all of MemLoc, then we succeed.
|
||||
if (LIOffs + NewLoadByteSize >= MemLocEnd)
|
||||
return NewLoadByteSize;
|
||||
|
||||
NewLoadByteSize <<= 1;
|
||||
}
|
||||
}
|
||||
|
||||
/// This function is called when we have a
|
||||
/// memdep query of a load that ends up being clobbered by another load. See if
|
||||
/// the other load can feed into the second load.
|
||||
|
@ -267,8 +351,8 @@ int analyzeLoadFromClobberingLoad(Type *LoadTy, Value *LoadPtr, LoadInst *DepLI,
|
|||
GetPointerBaseWithConstantOffset(LoadPtr, LoadOffs, DL);
|
||||
unsigned LoadSize = DL.getTypeStoreSize(LoadTy);
|
||||
|
||||
unsigned Size = MemoryDependenceResults::getLoadLoadClobberFullWidthSize(
|
||||
LoadBase, LoadOffs, LoadSize, DepLI);
|
||||
unsigned Size =
|
||||
getLoadLoadClobberFullWidthSize(LoadBase, LoadOffs, LoadSize, DepLI);
|
||||
if (Size == 0)
|
||||
return -1;
|
||||
|
||||
|
|
Loading…
Reference in New Issue