Modify ModRefInfo values using static inline method abstractions [NFC].

Summary:
The aim is to make ModRefInfo checks and changes more intuitive
and less error prone using inline methods that abstract the bit operations.

Ideally ModRefInfo would become an enum class, but that change will require
a wider set of changes into FunctionModRefBehavior.

Reviewers: sanjoy, george.burgess.iv, dberlin, hfinkel

Subscribers: nlopes, llvm-commits

Differential Revision: https://reviews.llvm.org/D40749

llvm-svn: 319821
This commit is contained in:
Alina Sbirlea 2017-12-05 20:12:23 +00:00
parent f6ae323ddf
commit 63d2250a42
16 changed files with 171 additions and 125 deletions

View File

@ -95,19 +95,60 @@ enum AliasResult {
///
/// This is no access at all, a modification, a reference, or both
/// a modification and a reference. These are specifically structured such that
/// they form a two bit matrix and bit-tests for 'mod' or 'ref' work with any
/// of the possible values.
/// they form a two bit matrix and bit-tests for 'mod' or 'ref'
/// work with any of the possible values.
enum ModRefInfo {
/// The access neither references nor modifies the value stored in memory.
MRI_NoModRef = 0,
/// The access references the value stored in memory.
/// The access may reference the value stored in memory.
MRI_Ref = 1,
/// The access modifies the value stored in memory.
/// The access may modify the value stored in memory.
MRI_Mod = 2,
/// The access both references and modifies the value stored in memory.
MRI_ModRef = MRI_Ref | MRI_Mod
/// The access may reference and may modify the value stored in memory.
MRI_ModRef = MRI_Ref | MRI_Mod,
};
LLVM_NODISCARD inline bool isNoModRef(const ModRefInfo MRI) {
return MRI == MRI_NoModRef;
}
LLVM_NODISCARD inline bool isModOrRefSet(const ModRefInfo MRI) {
return MRI & MRI_ModRef;
}
LLVM_NODISCARD inline bool isModAndRefSet(const ModRefInfo MRI) {
return (MRI & MRI_ModRef) == MRI_ModRef;
}
LLVM_NODISCARD inline bool isModSet(const ModRefInfo MRI) {
return MRI & MRI_Mod;
}
LLVM_NODISCARD inline bool isRefSet(const ModRefInfo MRI) {
return MRI & MRI_Ref;
}
LLVM_NODISCARD inline ModRefInfo setRef(const ModRefInfo MRI) {
return ModRefInfo(MRI | MRI_Ref);
}
LLVM_NODISCARD inline ModRefInfo setMod(const ModRefInfo MRI) {
return ModRefInfo(MRI | MRI_Mod);
}
LLVM_NODISCARD inline ModRefInfo setModAndRef(const ModRefInfo MRI) {
return ModRefInfo(MRI | MRI_ModRef);
}
LLVM_NODISCARD inline ModRefInfo clearMod(const ModRefInfo MRI) {
return ModRefInfo(MRI & MRI_Ref);
}
LLVM_NODISCARD inline ModRefInfo clearRef(const ModRefInfo MRI) {
return ModRefInfo(MRI & MRI_Mod);
}
LLVM_NODISCARD inline ModRefInfo unionModRef(const ModRefInfo MRI1,
const ModRefInfo MRI2) {
return ModRefInfo(MRI1 | MRI2);
}
LLVM_NODISCARD inline ModRefInfo intersectModRef(const ModRefInfo MRI1,
const ModRefInfo MRI2) {
return ModRefInfo(MRI1 & MRI2);
}
/// The locations at which a function might access memory.
///
/// These are primarily used in conjunction with the \c AccessKind bits to
@ -187,6 +228,15 @@ enum FunctionModRefBehavior {
FMRB_UnknownModRefBehavior = FMRL_Anywhere | MRI_ModRef
};
// Wrapper method strips bits significant only in FunctionModRefBehavior,
// to obtain a valid ModRefInfo. The benefit of using the wrapper is that if
// ModRefInfo enum changes, the wrapper can be updated to & with the new enum
// entry with all bits set to 1.
LLVM_NODISCARD inline ModRefInfo
createModRefInfo(const FunctionModRefBehavior FMRB) {
return ModRefInfo(FMRB & MRI_ModRef);
}
class AAResults {
public:
// Make these results default constructable and movable. We have to spell
@ -520,14 +570,7 @@ public:
const Optional<MemoryLocation> &OptLoc) {
if (OptLoc == None) {
if (auto CS = ImmutableCallSite(I)) {
auto MRB = getModRefBehavior(CS);
if ((MRB & MRI_ModRef) == MRI_ModRef)
return MRI_ModRef;
if (MRB & MRI_Ref)
return MRI_Ref;
if (MRB & MRI_Mod)
return MRI_Mod;
return MRI_NoModRef;
return createModRefInfo(getModRefBehavior(CS));
}
}
@ -570,7 +613,7 @@ public:
/// \brief Return information about whether a particular call site modifies
/// or reads the specified memory location \p MemLoc before instruction \p I
/// in a BasicBlock. A ordered basic block \p OBB can be used to speed up
/// in a BasicBlock. An ordered basic block \p OBB can be used to speed up
/// instruction ordering queries inside the BasicBlock containing \p I.
ModRefInfo callCapturesBefore(const Instruction *I,
const MemoryLocation &MemLoc, DominatorTree *DT,

View File

@ -122,10 +122,10 @@ ModRefInfo AAResults::getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
ModRefInfo Result = MRI_ModRef;
for (const auto &AA : AAs) {
Result = ModRefInfo(Result & AA->getArgModRefInfo(CS, ArgIdx));
Result = intersectModRef(Result, AA->getArgModRefInfo(CS, ArgIdx));
// Early-exit the moment we reach the bottom of the lattice.
if (Result == MRI_NoModRef)
if (isNoModRef(Result))
return Result;
}
@ -146,8 +146,9 @@ ModRefInfo AAResults::getModRefInfo(Instruction *I, ImmutableCallSite Call) {
// is that if the call references what this instruction
// defines, it must be clobbered by this location.
const MemoryLocation DefLoc = MemoryLocation::get(I);
if (getModRefInfo(Call, DefLoc) != MRI_NoModRef)
return MRI_ModRef;
ModRefInfo MR = getModRefInfo(Call, DefLoc);
if (isModOrRefSet(MR))
return setModAndRef(MR);
}
return MRI_NoModRef;
}
@ -157,10 +158,10 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS,
ModRefInfo Result = MRI_ModRef;
for (const auto &AA : AAs) {
Result = ModRefInfo(Result & AA->getModRefInfo(CS, Loc));
Result = intersectModRef(Result, AA->getModRefInfo(CS, Loc));
// Early-exit the moment we reach the bottom of the lattice.
if (Result == MRI_NoModRef)
if (isNoModRef(Result))
return Result;
}
@ -172,9 +173,9 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS,
return MRI_NoModRef;
if (onlyReadsMemory(MRB))
Result = ModRefInfo(Result & MRI_Ref);
Result = clearMod(Result);
else if (doesNotReadMemory(MRB))
Result = ModRefInfo(Result & MRI_Mod);
Result = clearRef(Result);
if (onlyAccessesArgPointees(MRB) || onlyAccessesInaccessibleOrArgMem(MRB)) {
bool DoesAlias = false;
@ -190,20 +191,21 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS,
if (ArgAlias != NoAlias) {
ModRefInfo ArgMask = getArgModRefInfo(CS, ArgIdx);
DoesAlias = true;
AllArgsMask = ModRefInfo(AllArgsMask | ArgMask);
AllArgsMask = unionModRef(AllArgsMask, ArgMask);
}
}
}
// Return MRI_NoModRef if no alias found with any argument.
if (!DoesAlias)
return MRI_NoModRef;
Result = ModRefInfo(Result & AllArgsMask);
// Logical & between other AA analyses and argument analysis.
Result = intersectModRef(Result, AllArgsMask);
}
// If Loc is a constant memory location, the call definitely could not
// modify the memory location.
if ((Result & MRI_Mod) &&
pointsToConstantMemory(Loc, /*OrLocal*/ false))
Result = ModRefInfo(Result & ~MRI_Mod);
if (isModSet(Result) && pointsToConstantMemory(Loc, /*OrLocal*/ false))
Result = clearMod(Result);
return Result;
}
@ -213,10 +215,10 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1,
ModRefInfo Result = MRI_ModRef;
for (const auto &AA : AAs) {
Result = ModRefInfo(Result & AA->getModRefInfo(CS1, CS2));
Result = intersectModRef(Result, AA->getModRefInfo(CS1, CS2));
// Early-exit the moment we reach the bottom of the lattice.
if (Result == MRI_NoModRef)
if (isNoModRef(Result))
return Result;
}
@ -239,9 +241,9 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1,
// If CS1 only reads memory, the only dependence on CS2 can be
// from CS1 reading memory written by CS2.
if (onlyReadsMemory(CS1B))
Result = ModRefInfo(Result & MRI_Ref);
Result = clearMod(Result);
else if (doesNotReadMemory(CS1B))
Result = ModRefInfo(Result & MRI_Mod);
Result = clearRef(Result);
// If CS2 only access memory through arguments, accumulate the mod/ref
// information from CS1's references to the memory referenced by
@ -256,17 +258,23 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1,
unsigned CS2ArgIdx = std::distance(CS2.arg_begin(), I);
auto CS2ArgLoc = MemoryLocation::getForArgument(CS2, CS2ArgIdx, TLI);
// ArgMask indicates what CS2 might do to CS2ArgLoc, and the dependence
// of CS1 on that location is the inverse.
ModRefInfo ArgMask = getArgModRefInfo(CS2, CS2ArgIdx);
if (ArgMask == MRI_Mod)
// ArgModRefCS2 indicates what CS2 might do to CS2ArgLoc, and the
// dependence of CS1 on that location is the inverse:
// - If CS2 modifies location, dependence exists if CS1 reads or writes.
// - If CS2 only reads location, dependence exists if CS1 writes.
ModRefInfo ArgModRefCS2 = getArgModRefInfo(CS2, CS2ArgIdx);
ModRefInfo ArgMask;
if (isModSet(ArgModRefCS2))
ArgMask = MRI_ModRef;
else if (ArgMask == MRI_Ref)
else if (isRefSet(ArgModRefCS2))
ArgMask = MRI_Mod;
ArgMask = ModRefInfo(ArgMask & getModRefInfo(CS1, CS2ArgLoc));
// ModRefCS1 indicates what CS1 might do to CS2ArgLoc, and we use
// above ArgMask to update dependence info.
ModRefInfo ModRefCS1 = getModRefInfo(CS1, CS2ArgLoc);
ArgMask = intersectModRef(ArgMask, ModRefCS1);
R = ModRefInfo((R | ArgMask) & Result);
R = intersectModRef(unionModRef(R, ArgMask), Result);
if (R == Result)
break;
}
@ -286,16 +294,14 @@ ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1,
unsigned CS1ArgIdx = std::distance(CS1.arg_begin(), I);
auto CS1ArgLoc = MemoryLocation::getForArgument(CS1, CS1ArgIdx, TLI);
// ArgMask indicates what CS1 might do to CS1ArgLoc; if CS1 might Mod
// CS1ArgLoc, then we care about either a Mod or a Ref by CS2. If CS1
// might Ref, then we care only about a Mod by CS2.
ModRefInfo ArgMask = getArgModRefInfo(CS1, CS1ArgIdx);
ModRefInfo ArgR = getModRefInfo(CS2, CS1ArgLoc);
if (((ArgMask & MRI_Mod) != MRI_NoModRef &&
(ArgR & MRI_ModRef) != MRI_NoModRef) ||
((ArgMask & MRI_Ref) != MRI_NoModRef &&
(ArgR & MRI_Mod) != MRI_NoModRef))
R = ModRefInfo((R | ArgMask) & Result);
// ArgModRefCS1 indicates what CS1 might do to CS1ArgLoc; if CS1 might
// Mod CS1ArgLoc, then we care about either a Mod or a Ref by CS2. If
// CS1 might Ref, then we care only about a Mod by CS2.
ModRefInfo ArgModRefCS1 = getArgModRefInfo(CS1, CS1ArgIdx);
ModRefInfo ModRefCS2 = getModRefInfo(CS2, CS1ArgLoc);
if ((isModSet(ArgModRefCS1) && isModOrRefSet(ModRefCS2)) ||
(isRefSet(ArgModRefCS1) && isModSet(ModRefCS2)))
R = intersectModRef(unionModRef(R, ArgModRefCS1), Result);
if (R == Result)
break;
@ -456,7 +462,7 @@ ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW,
/// \brief Return information about whether a particular call site modifies
/// or reads the specified memory location \p MemLoc before instruction \p I
/// in a BasicBlock. A ordered basic block \p OBB can be used to speed up
/// in a BasicBlock. An ordered basic block \p OBB can be used to speed up
/// instruction-ordering queries inside the BasicBlock containing \p I.
/// FIXME: this is really just shoring-up a deficiency in alias analysis.
/// BasicAA isn't willing to spend linear time determining whether an alloca
@ -538,7 +544,7 @@ bool AAResults::canInstructionRangeModRef(const Instruction &I1,
++E; // Convert from inclusive to exclusive range.
for (; I != E; ++I) // Check every instruction in range
if (getModRefInfo(&*I, Loc) & Mode)
if (intersectModRef(getModRefInfo(&*I, Loc), Mode))
return true;
return false;
}

View File

@ -211,8 +211,8 @@ bool AliasSet::aliasesPointer(const Value *Ptr, uint64_t Size,
if (!UnknownInsts.empty()) {
for (unsigned i = 0, e = UnknownInsts.size(); i != e; ++i)
if (auto *Inst = getUnknownInst(i))
if (AA.getModRefInfo(Inst, MemoryLocation(Ptr, Size, AAInfo)) !=
MRI_NoModRef)
if (isModOrRefSet(
AA.getModRefInfo(Inst, MemoryLocation(Ptr, Size, AAInfo))))
return true;
}
@ -231,15 +231,15 @@ bool AliasSet::aliasesUnknownInst(const Instruction *Inst,
for (unsigned i = 0, e = UnknownInsts.size(); i != e; ++i) {
if (auto *UnknownInst = getUnknownInst(i)) {
ImmutableCallSite C1(UnknownInst), C2(Inst);
if (!C1 || !C2 || AA.getModRefInfo(C1, C2) != MRI_NoModRef ||
AA.getModRefInfo(C2, C1) != MRI_NoModRef)
if (!C1 || !C2 || isModOrRefSet(AA.getModRefInfo(C1, C2)) ||
isModOrRefSet(AA.getModRefInfo(C2, C1)))
return true;
}
}
for (iterator I = begin(), E = end(); I != E; ++I)
if (AA.getModRefInfo(Inst, MemoryLocation(I.getPointer(), I.getSize(),
I.getAAInfo())) != MRI_NoModRef)
if (isModOrRefSet(AA.getModRefInfo(
Inst, MemoryLocation(I.getPointer(), I.getSize(), I.getAAInfo()))))
return true;
return false;
@ -572,12 +572,11 @@ AliasSet &AliasSetTracker::mergeAllAliasSets() {
AliasAnyAS->AliasAny = true;
for (auto Cur : ASVector) {
// If Cur was already forwarding, just forward to the new AS instead.
AliasSet *FwdTo = Cur->Forward;
if (FwdTo) {
Cur->Forward = AliasAnyAS;
AliasAnyAS->addRef();
AliasAnyAS->addRef();
FwdTo->dropRef(*this);
continue;
}

View File

@ -809,12 +809,12 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
// Operand aliases 'Object', but call doesn't modify it. Strengthen
// initial assumption and keep looking in case if there are more aliases.
if (CS.onlyReadsMemory(OperandNo)) {
Result = static_cast<ModRefInfo>(Result | MRI_Ref);
Result = setRef(Result);
continue;
}
// Operand aliases 'Object' but call only writes into it.
if (CS.doesNotReadMemory(OperandNo)) {
Result = static_cast<ModRefInfo>(Result | MRI_Mod);
Result = setMod(Result);
continue;
}
// This operand aliases 'Object' and call reads and writes into it.
@ -832,7 +832,7 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
// routines do not read values visible in the IR. TODO: Consider special
// casing realloc and strdup routines which access only their arguments as
// well. Or alternatively, replace all of this with inaccessiblememonly once
// that's implemented fully.
// that's implemented fully.
auto *Inst = CS.getInstruction();
if (isMallocOrCallocLikeFn(Inst, &TLI)) {
// Be conservative if the accessed pointer may alias the allocation -
@ -860,9 +860,9 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
// It's also possible for Loc to alias both src and dest, or neither.
ModRefInfo rv = MRI_NoModRef;
if (SrcAA != NoAlias)
rv = static_cast<ModRefInfo>(rv | MRI_Ref);
rv = setRef(rv);
if (DestAA != NoAlias)
rv = static_cast<ModRefInfo>(rv | MRI_Mod);
rv = setMod(rv);
return rv;
}
@ -933,10 +933,12 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS1,
// possibilities for guard intrinsics.
if (isIntrinsicCall(CS1, Intrinsic::experimental_guard))
return getModRefBehavior(CS2) & MRI_Mod ? MRI_Ref : MRI_NoModRef;
return isModSet(ModRefInfo(getModRefBehavior(CS2))) ? MRI_Ref
: MRI_NoModRef;
if (isIntrinsicCall(CS2, Intrinsic::experimental_guard))
return getModRefBehavior(CS1) & MRI_Mod ? MRI_Mod : MRI_NoModRef;
return isModSet(ModRefInfo(getModRefBehavior(CS1))) ? MRI_Mod
: MRI_NoModRef;
// The AAResultBase base class has some smarts, lets use them.
return AAResultBase::getModRefInfo(CS1, CS2);

View File

@ -84,6 +84,7 @@ class GlobalsAAResult::FunctionInfo {
/// The bit that flags that this function may read any global. This is
/// chosen to mix together with ModRefInfo bits.
/// FIXME: This assumes ModRefInfo lattice will remain 4 bits!
enum { MayReadAnyGlobal = 4 };
/// Checks to document the invariants of the bit packing here.
@ -230,9 +231,9 @@ FunctionModRefBehavior GlobalsAAResult::getModRefBehavior(const Function *F) {
FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
if (FunctionInfo *FI = getFunctionInfo(F)) {
if (FI->getModRefInfo() == MRI_NoModRef)
if (!isModOrRefSet(FI->getModRefInfo()))
Min = FMRB_DoesNotAccessMemory;
else if ((FI->getModRefInfo() & MRI_Mod) == 0)
else if (!isModSet(FI->getModRefInfo()))
Min = FMRB_OnlyReadsMemory;
}
@ -246,9 +247,9 @@ GlobalsAAResult::getModRefBehavior(ImmutableCallSite CS) {
if (!CS.hasOperandBundles())
if (const Function *F = CS.getCalledFunction())
if (FunctionInfo *FI = getFunctionInfo(F)) {
if (FI->getModRefInfo() == MRI_NoModRef)
if (!isModOrRefSet(FI->getModRefInfo()))
Min = FMRB_DoesNotAccessMemory;
else if ((FI->getModRefInfo() & MRI_Mod) == 0)
else if (!isModSet(FI->getModRefInfo()))
Min = FMRB_OnlyReadsMemory;
}
@ -544,7 +545,7 @@ void GlobalsAAResult::AnalyzeCallGraph(CallGraph &CG, Module &M) {
// Scan the function bodies for explicit loads or stores.
for (auto *Node : SCC) {
if (FI.getModRefInfo() == MRI_ModRef)
if (isModAndRefSet(FI.getModRefInfo()))
break; // The mod/ref lattice saturates here.
// Don't prove any properties based on the implementation of an optnone
@ -554,7 +555,7 @@ void GlobalsAAResult::AnalyzeCallGraph(CallGraph &CG, Module &M) {
continue;
for (Instruction &I : instructions(Node->getFunction())) {
if (FI.getModRefInfo() == MRI_ModRef)
if (isModAndRefSet(FI.getModRefInfo()))
break; // The mod/ref lattice saturates here.
// We handle calls specially because the graph-relevant aspects are
@ -584,9 +585,9 @@ void GlobalsAAResult::AnalyzeCallGraph(CallGraph &CG, Module &M) {
}
}
if ((FI.getModRefInfo() & MRI_Mod) == 0)
if (!isModSet(FI.getModRefInfo()))
++NumReadMemFunctions;
if (FI.getModRefInfo() == MRI_NoModRef)
if (!isModOrRefSet(FI.getModRefInfo()))
++NumNoMemFunctions;
// Finally, now that we know the full effect on this SCC, clone the
@ -894,7 +895,7 @@ ModRefInfo GlobalsAAResult::getModRefInfoForArgument(ImmutableCallSite CS,
ModRefInfo GlobalsAAResult::getModRefInfo(ImmutableCallSite CS,
const MemoryLocation &Loc) {
unsigned Known = MRI_ModRef;
ModRefInfo Known = MRI_ModRef;
// If we are asking for mod/ref info of a direct call with a pointer to a
// global we are tracking, return information if we have it.
@ -904,12 +905,12 @@ ModRefInfo GlobalsAAResult::getModRefInfo(ImmutableCallSite CS,
if (const Function *F = CS.getCalledFunction())
if (NonAddressTakenGlobals.count(GV))
if (const FunctionInfo *FI = getFunctionInfo(F))
Known = FI->getModRefInfoForGlobal(*GV) |
getModRefInfoForArgument(CS, GV);
Known = unionModRef(FI->getModRefInfoForGlobal(*GV),
getModRefInfoForArgument(CS, GV));
if (Known == MRI_NoModRef)
if (!isModOrRefSet(Known))
return MRI_NoModRef; // No need to query other mod/ref analyses
return ModRefInfo(Known & AAResultBase::getModRefInfo(CS, Loc));
return intersectModRef(Known, AAResultBase::getModRefInfo(CS, Loc));
}
GlobalsAAResult::GlobalsAAResult(const DataLayout &DL,

View File

@ -414,7 +414,7 @@ Value *llvm::FindAvailablePtrLoadStore(Value *Ptr, Type *AccessTy,
// If we have alias analysis and it says the store won't modify the loaded
// value, ignore the store.
if (AA && (AA->getModRefInfo(SI, StrippedPtr, AccessSize) & MRI_Mod) == 0)
if (AA && !isModSet(AA->getModRefInfo(SI, StrippedPtr, AccessSize)))
continue;
// Otherwise the store that may or may not alias the pointer, bail out.
@ -426,8 +426,7 @@ Value *llvm::FindAvailablePtrLoadStore(Value *Ptr, Type *AccessTy,
if (Inst->mayWriteToMemory()) {
// If alias analysis claims that it really won't modify the load,
// ignore it.
if (AA &&
(AA->getModRefInfo(Inst, StrippedPtr, AccessSize) & MRI_Mod) == 0)
if (AA && !isModSet(AA->getModRefInfo(Inst, StrippedPtr, AccessSize)))
continue;
// May modify the pointer, bail out.

View File

@ -212,32 +212,30 @@ MemDepResult MemoryDependenceResults::getCallSiteDependencyFrom(
ModRefInfo MR = GetLocation(Inst, Loc, TLI);
if (Loc.Ptr) {
// A simple instruction.
if (AA.getModRefInfo(CS, Loc) != MRI_NoModRef)
if (isModOrRefSet(AA.getModRefInfo(CS, Loc)))
return MemDepResult::getClobber(Inst);
continue;
}
if (auto InstCS = CallSite(Inst)) {
// If these two calls do not interfere, look past it.
switch (AA.getModRefInfo(CS, InstCS)) {
case MRI_NoModRef:
if (isNoModRef(AA.getModRefInfo(CS, InstCS))) {
// If the two calls are the same, return InstCS as a Def, so that
// CS can be found redundant and eliminated.
if (isReadOnlyCall && !(MR & MRI_Mod) &&
if (isReadOnlyCall && !isModSet(MR) &&
CS.getInstruction()->isIdenticalToWhenDefined(Inst))
return MemDepResult::getDef(Inst);
// Otherwise if the two calls don't interact (e.g. InstCS is readnone)
// keep scanning.
continue;
default:
} else
return MemDepResult::getClobber(Inst);
}
}
// If we could not obtain a pointer for the instruction and the instruction
// touches memory then assume that this is a dependency.
if (MR != MRI_NoModRef)
if (isModOrRefSet(MR))
return MemDepResult::getClobber(Inst);
}
@ -642,7 +640,7 @@ MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
// If alias analysis can tell that this store is guaranteed to not modify
// the query pointer, ignore it. Use getModRefInfo to handle cases where
// the query pointer points to constant memory etc.
if (AA.getModRefInfo(SI, MemLoc) == MRI_NoModRef)
if (!isModOrRefSet(AA.getModRefInfo(SI, MemLoc)))
continue;
// Ok, this store might clobber the query pointer. Check to see if it is
@ -688,7 +686,7 @@ MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
// See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
ModRefInfo MR = AA.getModRefInfo(Inst, MemLoc);
// If necessary, perform additional analysis.
if (MR == MRI_ModRef)
if (isModAndRefSet(MR))
MR = AA.callCapturesBefore(Inst, MemLoc, &DT, &OBB);
switch (MR) {
case MRI_NoModRef:

View File

@ -262,7 +262,7 @@ static bool instructionClobbersQuery(MemoryDef *MD,
if (UseCS) {
ModRefInfo I = AA.getModRefInfo(DefInst, UseCS);
return I != MRI_NoModRef;
return isModOrRefSet(I);
}
if (auto *DefLoad = dyn_cast<LoadInst>(DefInst)) {
@ -278,7 +278,7 @@ static bool instructionClobbersQuery(MemoryDef *MD,
}
}
return AA.getModRefInfo(DefInst, UseLoc) & MRI_Mod;
return isModSet(AA.getModRefInfo(DefInst, UseLoc));
}
static bool instructionClobbersQuery(MemoryDef *MD, const MemoryUseOrDef *MU,
@ -1526,8 +1526,8 @@ MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I) {
// Separate memory aliasing and ordering into two different chains so that we
// can precisely represent both "what memory will this read/write/is clobbered
// by" and "what instructions can I move this past".
bool Def = bool(ModRef & MRI_Mod) || isOrdered(I);
bool Use = bool(ModRef & MRI_Ref);
bool Def = isModSet(ModRef) || isOrdered(I);
bool Use = isRefSet(ModRef);
// It's possible for an instruction to not modify memory at all. During
// construction, we ignore them.

View File

@ -1928,7 +1928,8 @@ mayLoopAccessLocation(Value *Ptr, ModRefInfo Access, Loop *L,
for (auto *B : L->blocks())
for (auto &I : *B)
if (Ignored.count(&I) == 0 && (AA.getModRefInfo(&I, StoreLoc) & Access))
if (Ignored.count(&I) == 0 &&
intersectModRef(AA.getModRefInfo(&I, StoreLoc), Access))
return true;
return false;

View File

@ -130,17 +130,18 @@ static MemoryAccessKind checkFunctionMemoryAccess(Function &F, bool ThisBody,
SCCNodes.count(CS.getCalledFunction()))
continue;
FunctionModRefBehavior MRB = AAR.getModRefBehavior(CS);
ModRefInfo MRI = createModRefInfo(MRB);
// If the call doesn't access memory, we're done.
if (!(MRB & MRI_ModRef))
if (isNoModRef(MRI))
continue;
if (!AliasAnalysis::onlyAccessesArgPointees(MRB)) {
// The call could access any memory. If that includes writes, give up.
if (MRB & MRI_Mod)
if (isModSet(MRI))
return MAK_MayWrite;
// If it reads, note it.
if (MRB & MRI_Ref)
if (isRefSet(MRI))
ReadsMemory = true;
continue;
}
@ -162,10 +163,10 @@ static MemoryAccessKind checkFunctionMemoryAccess(Function &F, bool ThisBody,
if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true))
continue;
if (MRB & MRI_Mod)
if (isModSet(MRI))
// Writes non-local memory. Give up.
return MAK_MayWrite;
if (MRB & MRI_Ref)
if (isRefSet(MRI))
// Ok, it reads non-local memory.
ReadsMemory = true;
}

View File

@ -248,7 +248,7 @@ static StoreInst *findSafeStoreForStoreStrongContraction(LoadInst *Load,
// Ok, now we know we have not seen a store yet. See if Inst can write to
// our load location, if it can not, just ignore the instruction.
if (!(AA->getModRefInfo(Inst, Loc) & MRI_Mod))
if (!isModSet(AA->getModRefInfo(Inst, Loc)))
continue;
Store = dyn_cast<StoreInst>(Inst);

View File

@ -594,11 +594,9 @@ static bool memoryIsNotModifiedBetween(Instruction *FirstI,
}
for (; BI != EI; ++BI) {
Instruction *I = &*BI;
if (I->mayWriteToMemory() && I != SecondI) {
auto Res = AA->getModRefInfo(I, MemLoc);
if (Res & MRI_Mod)
if (I->mayWriteToMemory() && I != SecondI)
if (isModSet(AA->getModRefInfo(I, MemLoc)))
return false;
}
}
if (B != FirstBB) {
assert(B != &FirstBB->getParent()->getEntryBlock() &&
@ -822,9 +820,7 @@ static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA,
// the call is live.
DeadStackObjects.remove_if([&](Value *I) {
// See if the call site touches the value.
ModRefInfo A = AA->getModRefInfo(CS, I, getPointerSize(I, DL, *TLI));
return A == MRI_ModRef || A == MRI_Ref;
return isRefSet(AA->getModRefInfo(CS, I, getPointerSize(I, DL, *TLI)));
});
// If all of the allocas were clobbered by the call then we're not going
@ -1255,7 +1251,7 @@ static bool eliminateDeadStores(BasicBlock &BB, AliasAnalysis *AA,
if (DepWrite == &BB.front()) break;
// Can't look past this instruction if it might read 'Loc'.
if (AA->getModRefInfo(DepWrite, Loc) & MRI_Ref)
if (isRefSet(AA->getModRefInfo(DepWrite, Loc)))
break;
InstDep = MD->getPointerDependencyFrom(Loc, /*isLoad=*/ false,

View File

@ -788,7 +788,7 @@ mayLoopAccessLocation(Value *Ptr, ModRefInfo Access, Loop *L,
++BI)
for (Instruction &I : **BI)
if (IgnoredStores.count(&I) == 0 &&
(AA.getModRefInfo(&I, StoreLoc) & Access))
intersectModRef(AA.getModRefInfo(&I, StoreLoc), Access))
return true;
return false;

View File

@ -518,7 +518,7 @@ static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P,
const LoadInst *LI) {
// If the store alias this position, early bail out.
MemoryLocation StoreLoc = MemoryLocation::get(SI);
if (AA.getModRefInfo(P, StoreLoc) != MRI_NoModRef)
if (isModOrRefSet(AA.getModRefInfo(P, StoreLoc)))
return false;
// Keep track of the arguments of all instruction we plan to lift
@ -542,20 +542,20 @@ static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P,
for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) {
auto *C = &*I;
bool MayAlias = AA.getModRefInfo(C, None) != MRI_NoModRef;
bool MayAlias = isModOrRefSet(AA.getModRefInfo(C, None));
bool NeedLift = false;
if (Args.erase(C))
NeedLift = true;
else if (MayAlias) {
NeedLift = llvm::any_of(MemLocs, [C, &AA](const MemoryLocation &ML) {
return AA.getModRefInfo(C, ML);
return isModOrRefSet(AA.getModRefInfo(C, ML));
});
if (!NeedLift)
NeedLift =
llvm::any_of(CallSites, [C, &AA](const ImmutableCallSite &CS) {
return AA.getModRefInfo(C, CS);
return isModOrRefSet(AA.getModRefInfo(C, CS));
});
}
@ -565,18 +565,18 @@ static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P,
if (MayAlias) {
// Since LI is implicitly moved downwards past the lifted instructions,
// none of them may modify its source.
if (AA.getModRefInfo(C, LoadLoc) & MRI_Mod)
if (isModSet(AA.getModRefInfo(C, LoadLoc)))
return false;
else if (auto CS = ImmutableCallSite(C)) {
// If we can't lift this before P, it's game over.
if (AA.getModRefInfo(P, CS) != MRI_NoModRef)
if (isModOrRefSet(AA.getModRefInfo(P, CS)))
return false;
CallSites.push_back(CS);
} else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) {
// If we can't lift this before P, it's game over.
auto ML = MemoryLocation::get(C);
if (AA.getModRefInfo(P, ML) != MRI_NoModRef)
if (isModOrRefSet(AA.getModRefInfo(P, ML)))
return false;
MemLocs.push_back(ML);
@ -631,7 +631,7 @@ bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
// of at the store position.
Instruction *P = SI;
for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) {
if (AA.getModRefInfo(&I, LoadLoc) & MRI_Mod) {
if (isModSet(AA.getModRefInfo(&I, LoadLoc))) {
P = &I;
break;
}
@ -702,7 +702,7 @@ bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
MemoryLocation StoreLoc = MemoryLocation::get(SI);
for (BasicBlock::iterator I = --SI->getIterator(), E = C->getIterator();
I != E; --I) {
if (AA.getModRefInfo(&*I, StoreLoc) != MRI_NoModRef) {
if (isModOrRefSet(AA.getModRefInfo(&*I, StoreLoc))) {
C = nullptr;
break;
}
@ -934,9 +934,9 @@ bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpy, Value *cpyDest,
AliasAnalysis &AA = LookupAliasAnalysis();
ModRefInfo MR = AA.getModRefInfo(C, cpyDest, srcSize);
// If necessary, perform additional analysis.
if (MR != MRI_NoModRef)
if (isModOrRefSet(MR))
MR = AA.callCapturesBefore(C, cpyDest, srcSize, &DT);
if (MR != MRI_NoModRef)
if (isModOrRefSet(MR))
return false;
// We can't create address space casts here because we don't know if they're

View File

@ -68,7 +68,7 @@ static bool isSafeToMove(Instruction *Inst, AliasAnalysis &AA,
if (LoadInst *L = dyn_cast<LoadInst>(Inst)) {
MemoryLocation Loc = MemoryLocation::get(L);
for (Instruction *S : Stores)
if (AA.getModRefInfo(S, Loc) & MRI_Mod)
if (isModSet(AA.getModRefInfo(S, Loc)))
return false;
}
@ -83,7 +83,7 @@ static bool isSafeToMove(Instruction *Inst, AliasAnalysis &AA,
return false;
for (Instruction *S : Stores)
if (AA.getModRefInfo(S, CS) & MRI_Mod)
if (isModSet(AA.getModRefInfo(S, CS)))
return false;
}

View File

@ -332,7 +332,7 @@ static bool canMoveAboveCall(Instruction *I, CallInst *CI, AliasAnalysis *AA) {
// Writes to memory only matter if they may alias the pointer
// being loaded from.
const DataLayout &DL = L->getModule()->getDataLayout();
if ((AA->getModRefInfo(CI, MemoryLocation::get(L)) & MRI_Mod) ||
if (isModSet(AA->getModRefInfo(CI, MemoryLocation::get(L))) ||
!isSafeToLoadUnconditionally(L->getPointerOperand(),
L->getAlignment(), DL, L))
return false;