forked from OSchip/llvm-project
parent
ccd4e5e016
commit
02a2bb2f54
|
@ -884,7 +884,7 @@ ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
|
|||
getBestAAResults().alias(MemoryLocation(*CI), MemoryLocation(Object));
|
||||
if (AR != MustAlias)
|
||||
IsMustAlias = false;
|
||||
// Operand doesnt alias 'Object', continue looking for other aliases
|
||||
// Operand doesn't alias 'Object', continue looking for other aliases
|
||||
if (AR == NoAlias)
|
||||
continue;
|
||||
// Operand aliases 'Object', but call doesn't modify it. Strengthen
|
||||
|
@ -1019,7 +1019,7 @@ ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
|
|||
// heap state at the point the guard is issued needs to be consistent in case
|
||||
// the guard invokes the "deopt" continuation.
|
||||
|
||||
// NB! This function is *not* commutative, so we specical case two
|
||||
// NB! This function is *not* commutative, so we special case two
|
||||
// possibilities for guard intrinsics.
|
||||
|
||||
if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
|
||||
|
|
|
@ -596,7 +596,7 @@ void GlobalsAAResult::AnalyzeCallGraph(CallGraph &CG, Module &M) {
|
|||
}
|
||||
|
||||
// All non-call instructions we use the primary predicates for whether
|
||||
// thay read or write memory.
|
||||
// they read or write memory.
|
||||
if (I.mayReadFromMemory())
|
||||
FI.addModRefInfo(ModRefInfo::Ref);
|
||||
if (I.mayWriteToMemory())
|
||||
|
@ -790,10 +790,10 @@ bool GlobalsAAResult::isNonEscapingGlobalNoAlias(const GlobalValue *GV,
|
|||
}
|
||||
|
||||
// FIXME: It would be good to handle other obvious no-alias cases here, but
|
||||
// it isn't clear how to do so reasonbly without building a small version
|
||||
// it isn't clear how to do so reasonably without building a small version
|
||||
// of BasicAA into this code. We could recurse into AAResultBase::alias
|
||||
// here but that seems likely to go poorly as we're inside the
|
||||
// implementation of such a query. Until then, just conservatievly retun
|
||||
// implementation of such a query. Until then, just conservatively return
|
||||
// false.
|
||||
return false;
|
||||
} while (!Inputs.empty());
|
||||
|
|
|
@ -1009,7 +1009,7 @@ bool InductionDescriptor::isInductionPHI(PHINode *Phi, const Loop *TheLoop,
|
|||
// If we started from an UnknownSCEV, and managed to build an addRecurrence
|
||||
// only after enabling Assume with PSCEV, this means we may have encountered
|
||||
// cast instructions that required adding a runtime check in order to
|
||||
// guarantee the correctness of the AddRecurence respresentation of the
|
||||
// guarantee the correctness of the AddRecurrence respresentation of the
|
||||
// induction.
|
||||
if (PhiScev != AR && SymbolicPhi) {
|
||||
SmallVector<Instruction *, 2> Casts;
|
||||
|
|
|
@ -76,7 +76,7 @@ static cl::opt<int> LocallyHotCallSiteThreshold(
|
|||
|
||||
static cl::opt<int> ColdCallSiteRelFreq(
|
||||
"cold-callsite-rel-freq", cl::Hidden, cl::init(2), cl::ZeroOrMore,
|
||||
cl::desc("Maxmimum block frequency, expressed as a percentage of caller's "
|
||||
cl::desc("Maximum block frequency, expressed as a percentage of caller's "
|
||||
"entry frequency, for a callsite to be cold in the absence of "
|
||||
"profile information."));
|
||||
|
||||
|
@ -1675,7 +1675,7 @@ ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
|
|||
/// blocks to see if all their incoming edges are dead or not.
|
||||
void CallAnalyzer::findDeadBlocks(BasicBlock *CurrBB, BasicBlock *NextBB) {
|
||||
auto IsEdgeDead = [&](BasicBlock *Pred, BasicBlock *Succ) {
|
||||
// A CFG edge is dead if the predecessor is dead or the predessor has a
|
||||
// A CFG edge is dead if the predecessor is dead or the predecessor has a
|
||||
// known successor which is not the one under exam.
|
||||
return (DeadBlocks.count(Pred) ||
|
||||
(KnownSuccessors[Pred] && KnownSuccessors[Pred] != Succ));
|
||||
|
|
|
@ -624,7 +624,7 @@ bool LazyValueInfoImpl::solveBlockValueImpl(ValueLatticeElement &Res,
|
|||
// and the like to prove non-nullness, but it's not clear that's worth it
|
||||
// compile time wise. The context-insensitive value walk done inside
|
||||
// isKnownNonZero gets most of the profitable cases at much less expense.
|
||||
// This does mean that we have a sensativity to where the defining
|
||||
// This does mean that we have a sensitivity to where the defining
|
||||
// instruction is placed, even if it could legally be hoisted much higher.
|
||||
// That is unfortunate.
|
||||
PointerType *PT = dyn_cast<PointerType>(BBI->getType());
|
||||
|
|
|
@ -2251,7 +2251,7 @@ void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
|
|||
|
||||
// Match the types so we can compare the stride and the BETakenCount.
|
||||
// The Stride can be positive/negative, so we sign extend Stride;
|
||||
// The backdgeTakenCount is non-negative, so we zero extend BETakenCount.
|
||||
// The backedgeTakenCount is non-negative, so we zero extend BETakenCount.
|
||||
const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout();
|
||||
uint64_t StrideTypeSize = DL.getTypeAllocSize(StrideExpr->getType());
|
||||
uint64_t BETypeSize = DL.getTypeAllocSize(BETakenCount->getType());
|
||||
|
|
|
@ -598,7 +598,7 @@ void MemorySSAUpdater::applyUpdates(ArrayRef<CFGUpdate> Updates,
|
|||
|
||||
if (!RevDeleteUpdates.empty()) {
|
||||
// Update for inserted edges: use newDT and snapshot CFG as if deletes had
|
||||
// not occured.
|
||||
// not occurred.
|
||||
// FIXME: This creates a new DT, so it's more expensive to do mix
|
||||
// delete/inserts vs just inserts. We can do an incremental update on the DT
|
||||
// to revert deletes, than re-delete the edges. Teaching DT to do this, is
|
||||
|
@ -696,7 +696,7 @@ void MemorySSAUpdater::applyInsertUpdates(ArrayRef<CFGUpdate> Updates,
|
|||
|
||||
// Map a BB to its predecessors: added + previously existing. To get a
|
||||
// deterministic order, store predecessors as SetVectors. The order in each
|
||||
// will be defined by teh order in Updates (fixed) and the order given by
|
||||
// will be defined by the order in Updates (fixed) and the order given by
|
||||
// children<> (also fixed). Since we further iterate over these ordered sets,
|
||||
// we lose the information of multiple edges possibly existing between two
|
||||
// blocks, so we'll keep and EdgeCount map for that.
|
||||
|
|
|
@ -991,7 +991,7 @@ void InterleavedAccessInfo::analyzeInterleaving(
|
|||
// that all the pointers in the group don't wrap.
|
||||
// So we check only group member 0 (which is always guaranteed to exist),
|
||||
// and group member Factor - 1; If the latter doesn't exist we rely on
|
||||
// peeling (if it is a non-reveresed accsess -- see Case 3).
|
||||
// peeling (if it is a non-reversed accsess -- see Case 3).
|
||||
Value *FirstMemberPtr = getLoadStorePointerOperand(Group->getMember(0));
|
||||
if (!getPtrStride(PSE, FirstMemberPtr, TheLoop, Strides, /*Assume=*/false,
|
||||
/*ShouldCheckWrap=*/true)) {
|
||||
|
|
Loading…
Reference in New Issue