[AA] Do not track Must in ModRefInfo

getModRefInfo() queries currently track whether the result is a
MustAlias on a best-effort basis. The only user of this functionality
is the optimized memory access type in MemorySSA -- which in turn
has no users. Given that this functionality has not found a user
since it was introduced five years ago (in D38862), I think we
should drop it again.

The context is that I'm working to separate FunctionModRefBehavior
to track mod/ref for different location kinds (like argmem or
inaccessiblemem) separately, and the fact that ModRefInfo also has
an unrelated Must flag makes this quite awkward, especially as this
means that NoModRef is not a zero value. If we want to retain the
functionality, I would probably split getModRefInfo() results into
a part that just contains the ModRef information, and a separate
part containing a (best-effort) AliasResult.

Differential Revision: https://reviews.llvm.org/D130713
This commit is contained in:
Nikita Popov 2022-07-28 17:23:36 +02:00
parent 967f95fb07
commit f96ea53e89
21 changed files with 89 additions and 431 deletions

View File

@ -142,82 +142,42 @@ raw_ostream &operator<<(raw_ostream &OS, AliasResult AR);
/// Flags indicating whether a memory access modifies or references memory.
///
/// This is no access at all, a modification, a reference, or both
/// a modification and a reference. These are specifically structured such that
/// they form a three bit matrix and bit-tests for 'mod' or 'ref' or 'must'
/// work with any of the possible values.
/// a modification and a reference.
enum class ModRefInfo : uint8_t {
/// Must is provided for completeness, but no routines will return only
/// Must today. See definition of Must below.
Must = 0,
/// The access may reference the value stored in memory,
/// a mustAlias relation was found, and no mayAlias or partialAlias found.
MustRef = 1,
/// The access may modify the value stored in memory,
/// a mustAlias relation was found, and no mayAlias or partialAlias found.
MustMod = 2,
/// The access may reference, modify or both the value stored in memory,
/// a mustAlias relation was found, and no mayAlias or partialAlias found.
MustModRef = MustRef | MustMod,
/// The access neither references nor modifies the value stored in memory.
NoModRef = 4,
NoModRef = 0,
/// The access may reference the value stored in memory.
Ref = NoModRef | MustRef,
Ref = 1,
/// The access may modify the value stored in memory.
Mod = NoModRef | MustMod,
Mod = 2,
/// The access may reference and may modify the value stored in memory.
ModRef = Ref | Mod,
/// About Must:
/// Must is set in a best effort manner.
/// We usually do not try our best to infer Must, instead it is merely
/// another piece of "free" information that is presented when available.
/// Must set means there was certainly a MustAlias found. For calls,
/// where multiple arguments are checked (argmemonly), this translates to
/// only MustAlias or NoAlias was found.
/// Must is not set for RAR accesses, even if the two locations must
/// alias. The reason is that two read accesses translate to an early return
/// of NoModRef. An additional alias check to set Must may be
/// expensive. Other cases may also not set Must(e.g. callCapturesBefore).
/// We refer to Must being *set* when the most significant bit is *cleared*.
/// Conversely we *clear* Must information by *setting* the Must bit to 1.
};
LLVM_NODISCARD inline bool isNoModRef(const ModRefInfo MRI) {
return (static_cast<int>(MRI) & static_cast<int>(ModRefInfo::MustModRef)) ==
static_cast<int>(ModRefInfo::Must);
return MRI == ModRefInfo::NoModRef;
}
LLVM_NODISCARD inline bool isModOrRefSet(const ModRefInfo MRI) {
return static_cast<int>(MRI) & static_cast<int>(ModRefInfo::MustModRef);
return MRI != ModRefInfo::NoModRef;
}
LLVM_NODISCARD inline bool isModAndRefSet(const ModRefInfo MRI) {
return (static_cast<int>(MRI) & static_cast<int>(ModRefInfo::MustModRef)) ==
static_cast<int>(ModRefInfo::MustModRef);
return MRI == ModRefInfo::ModRef;
}
LLVM_NODISCARD inline bool isModSet(const ModRefInfo MRI) {
return static_cast<int>(MRI) & static_cast<int>(ModRefInfo::MustMod);
return static_cast<int>(MRI) & static_cast<int>(ModRefInfo::Mod);
}
LLVM_NODISCARD inline bool isRefSet(const ModRefInfo MRI) {
return static_cast<int>(MRI) & static_cast<int>(ModRefInfo::MustRef);
}
LLVM_NODISCARD inline bool isMustSet(const ModRefInfo MRI) {
return !(static_cast<int>(MRI) & static_cast<int>(ModRefInfo::NoModRef));
return static_cast<int>(MRI) & static_cast<int>(ModRefInfo::Ref);
}
LLVM_NODISCARD inline ModRefInfo setMod(const ModRefInfo MRI) {
return ModRefInfo(static_cast<int>(MRI) |
static_cast<int>(ModRefInfo::MustMod));
return ModRefInfo(static_cast<int>(MRI) | static_cast<int>(ModRefInfo::Mod));
}
LLVM_NODISCARD inline ModRefInfo setRef(const ModRefInfo MRI) {
return ModRefInfo(static_cast<int>(MRI) |
static_cast<int>(ModRefInfo::MustRef));
}
LLVM_NODISCARD inline ModRefInfo setMust(const ModRefInfo MRI) {
return ModRefInfo(static_cast<int>(MRI) &
static_cast<int>(ModRefInfo::MustModRef));
return ModRefInfo(static_cast<int>(MRI) | static_cast<int>(ModRefInfo::Ref));
}
LLVM_NODISCARD inline ModRefInfo setModAndRef(const ModRefInfo MRI) {
return ModRefInfo(static_cast<int>(MRI) |
static_cast<int>(ModRefInfo::MustModRef));
return ModRefInfo::ModRef;
}
LLVM_NODISCARD inline ModRefInfo clearMod(const ModRefInfo MRI) {
return ModRefInfo(static_cast<int>(MRI) & static_cast<int>(ModRefInfo::Ref));
@ -225,10 +185,6 @@ LLVM_NODISCARD inline ModRefInfo clearMod(const ModRefInfo MRI) {
LLVM_NODISCARD inline ModRefInfo clearRef(const ModRefInfo MRI) {
return ModRefInfo(static_cast<int>(MRI) & static_cast<int>(ModRefInfo::Mod));
}
LLVM_NODISCARD inline ModRefInfo clearMust(const ModRefInfo MRI) {
return ModRefInfo(static_cast<int>(MRI) |
static_cast<int>(ModRefInfo::NoModRef));
}
LLVM_NODISCARD inline ModRefInfo unionModRef(const ModRefInfo MRI1,
const ModRefInfo MRI2) {
return ModRefInfo(static_cast<int>(MRI1) | static_cast<int>(MRI2));
@ -609,7 +565,7 @@ public:
/// result's bits are set to indicate the allowed aliasing ModRef kinds. Note
/// that these bits do not necessarily account for the overall behavior of
/// the function, but rather only provide additional per-argument
/// information. This never sets ModRefInfo::Must.
/// information.
ModRefInfo getArgModRefInfo(const CallBase *Call, unsigned ArgIdx);
/// Return the behavior of the given call site.
@ -847,8 +803,6 @@ public:
/// Return information about whether a particular call site modifies
/// or reads the specified memory location \p MemLoc before instruction \p I
/// in a BasicBlock.
/// Early exits in callCapturesBefore may lead to ModRefInfo::Must not being
/// set.
ModRefInfo callCapturesBefore(const Instruction *I,
const MemoryLocation &MemLoc,
DominatorTree *DT) {

View File

@ -36,8 +36,6 @@ class AAEvaluator : public PassInfoMixin<AAEvaluator> {
int64_t NoAliasCount = 0, MayAliasCount = 0, PartialAliasCount = 0;
int64_t MustAliasCount = 0;
int64_t NoModRefCount = 0, ModCount = 0, RefCount = 0, ModRefCount = 0;
int64_t MustCount = 0, MustRefCount = 0, MustModCount = 0;
int64_t MustModRefCount = 0;
public:
AAEvaluator() = default;
@ -47,9 +45,7 @@ public:
PartialAliasCount(Arg.PartialAliasCount),
MustAliasCount(Arg.MustAliasCount), NoModRefCount(Arg.NoModRefCount),
ModCount(Arg.ModCount), RefCount(Arg.RefCount),
ModRefCount(Arg.ModRefCount), MustCount(Arg.MustCount),
MustRefCount(Arg.MustRefCount), MustModCount(Arg.MustModCount),
MustModRefCount(Arg.MustModRefCount) {
ModRefCount(Arg.ModRefCount) {
Arg.FunctionCount = 0;
}
~AAEvaluator();

View File

@ -272,12 +272,6 @@ public:
/// Sets the optimized use for a MemoryDef.
inline void setOptimized(MemoryAccess *);
// Retrieve AliasResult type of the optimized access. Ideally this would be
// returned by the caching walker and may go away in the future.
Optional<AliasResult> getOptimizedAccessType() const {
return isOptimized() ? OptimizedAccessAlias : None;
}
/// Reset the ID of what this MemoryUse was optimized to, causing it to
/// be rewalked by the walker if necessary.
/// This really should only be called by tests.
@ -291,31 +285,23 @@ protected:
DeleteValueTy DeleteValue, Instruction *MI, BasicBlock *BB,
unsigned NumOperands)
: MemoryAccess(C, Vty, DeleteValue, BB, NumOperands),
MemoryInstruction(MI), OptimizedAccessAlias(AliasResult::MayAlias) {
MemoryInstruction(MI) {
setDefiningAccess(DMA);
}
// Use deleteValue() to delete a generic MemoryUseOrDef.
~MemoryUseOrDef() = default;
void setOptimizedAccessType(Optional<AliasResult> AR) {
OptimizedAccessAlias = AR;
}
void setDefiningAccess(
MemoryAccess *DMA, bool Optimized = false,
Optional<AliasResult> AR = AliasResult(AliasResult::MayAlias)) {
void setDefiningAccess(MemoryAccess *DMA, bool Optimized = false) {
if (!Optimized) {
setOperand(0, DMA);
return;
}
setOptimized(DMA);
setOptimizedAccessType(AR);
}
private:
Instruction *MemoryInstruction;
Optional<AliasResult> OptimizedAccessAlias;
};
/// Represents read-only accesses to memory

View File

@ -245,7 +245,6 @@ ModRefInfo AAResults::getModRefInfo(const CallBase *Call,
Result = clearRef(Result);
if (onlyAccessesArgPointees(MRB) || onlyAccessesInaccessibleOrArgMem(MRB)) {
bool IsMustAlias = true;
ModRefInfo AllArgsMask = ModRefInfo::NoModRef;
if (doesAccessArgPointees(MRB)) {
for (const auto &I : llvm::enumerate(Call->args())) {
@ -260,8 +259,6 @@ ModRefInfo AAResults::getModRefInfo(const CallBase *Call,
ModRefInfo ArgMask = getArgModRefInfo(Call, ArgIdx);
AllArgsMask = unionModRef(AllArgsMask, ArgMask);
}
// Conservatively clear IsMustAlias unless only MustAlias is found.
IsMustAlias &= (ArgAlias == AliasResult::MustAlias);
}
}
// Return NoModRef if no alias found with any argument.
@ -269,8 +266,6 @@ ModRefInfo AAResults::getModRefInfo(const CallBase *Call,
return ModRefInfo::NoModRef;
// Logical & between other AA analyses and argument analysis.
Result = intersectModRef(Result, AllArgsMask);
// If only MustAlias found above, set Must bit.
Result = IsMustAlias ? setMust(Result) : clearMust(Result);
}
// If Loc is a constant memory location, the call definitely could not
@ -329,7 +324,6 @@ ModRefInfo AAResults::getModRefInfo(const CallBase *Call1,
if (!doesAccessArgPointees(Call2B))
return ModRefInfo::NoModRef;
ModRefInfo R = ModRefInfo::NoModRef;
bool IsMustAlias = true;
for (auto I = Call2->arg_begin(), E = Call2->arg_end(); I != E; ++I) {
const Value *Arg = *I;
if (!Arg->getType()->isPointerTy())
@ -355,23 +349,12 @@ ModRefInfo AAResults::getModRefInfo(const CallBase *Call1,
ModRefInfo ModRefC1 = getModRefInfo(Call1, Call2ArgLoc, AAQI);
ArgMask = intersectModRef(ArgMask, ModRefC1);
// Conservatively clear IsMustAlias unless only MustAlias is found.
IsMustAlias &= isMustSet(ModRefC1);
R = intersectModRef(unionModRef(R, ArgMask), Result);
if (R == Result) {
// On early exit, not all args were checked, cannot set Must.
if (I + 1 != E)
IsMustAlias = false;
if (R == Result)
break;
}
}
if (isNoModRef(R))
return ModRefInfo::NoModRef;
// If MustAlias found above, set Must bit.
return IsMustAlias ? setMust(R) : clearMust(R);
return R;
}
// If Call1 only accesses memory through arguments, check if Call2 references
@ -380,7 +363,6 @@ ModRefInfo AAResults::getModRefInfo(const CallBase *Call1,
if (!doesAccessArgPointees(Call1B))
return ModRefInfo::NoModRef;
ModRefInfo R = ModRefInfo::NoModRef;
bool IsMustAlias = true;
for (auto I = Call1->arg_begin(), E = Call1->arg_end(); I != E; ++I) {
const Value *Arg = *I;
if (!Arg->getType()->isPointerTy())
@ -398,22 +380,11 @@ ModRefInfo AAResults::getModRefInfo(const CallBase *Call1,
(isRefSet(ArgModRefC1) && isModSet(ModRefC2)))
R = intersectModRef(unionModRef(R, ArgModRefC1), Result);
// Conservatively clear IsMustAlias unless only MustAlias is found.
IsMustAlias &= isMustSet(ModRefC2);
if (R == Result) {
// On early exit, not all args were checked, cannot set Must.
if (I + 1 != E)
IsMustAlias = false;
if (R == Result)
break;
}
}
if (isNoModRef(R))
return ModRefInfo::NoModRef;
// If MustAlias found above, set Must bit.
return IsMustAlias ? setMust(R) : clearMust(R);
return R;
}
return Result;
@ -489,8 +460,6 @@ ModRefInfo AAResults::getModRefInfo(const LoadInst *L,
AliasResult AR = alias(MemoryLocation::get(L), Loc, AAQI);
if (AR == AliasResult::NoAlias)
return ModRefInfo::NoModRef;
if (AR == AliasResult::MustAlias)
return ModRefInfo::MustRef;
}
// Otherwise, a load just reads.
return ModRefInfo::Ref;
@ -519,10 +488,6 @@ ModRefInfo AAResults::getModRefInfo(const StoreInst *S,
// been modified by this store.
if (pointsToConstantMemory(Loc, AAQI))
return ModRefInfo::NoModRef;
// If the store address aliases the pointer as must alias, set Must.
if (AR == AliasResult::MustAlias)
return ModRefInfo::MustMod;
}
// Otherwise, a store just writes.
@ -564,10 +529,6 @@ ModRefInfo AAResults::getModRefInfo(const VAArgInst *V,
// been modified by this va_arg.
if (pointsToConstantMemory(Loc, AAQI))
return ModRefInfo::NoModRef;
// If the va_arg aliases the pointer as must alias, set Must.
if (AR == AliasResult::MustAlias)
return ModRefInfo::MustModRef;
}
// Otherwise, a va_arg reads and writes.
@ -633,10 +594,6 @@ ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX,
// it.
if (AR == AliasResult::NoAlias)
return ModRefInfo::NoModRef;
// If the cmpxchg address aliases the pointer as must alias, set Must.
if (AR == AliasResult::MustAlias)
return ModRefInfo::MustModRef;
}
return ModRefInfo::ModRef;
@ -661,10 +618,6 @@ ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW,
// it.
if (AR == AliasResult::NoAlias)
return ModRefInfo::NoModRef;
// If the atomicrmw address aliases the pointer as must alias, set Must.
if (AR == AliasResult::MustAlias)
return ModRefInfo::MustModRef;
}
return ModRefInfo::ModRef;
@ -738,7 +691,6 @@ ModRefInfo AAResults::callCapturesBefore(const Instruction *I,
unsigned ArgNo = 0;
ModRefInfo R = ModRefInfo::NoModRef;
bool IsMustAlias = true;
// Set flag only if no May found and all operands processed.
for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
CI != CE; ++CI, ++ArgNo) {
@ -757,8 +709,6 @@ ModRefInfo AAResults::callCapturesBefore(const Instruction *I,
// is impossible to alias the pointer we're checking. If not, we have to
// assume that the call could touch the pointer, even though it doesn't
// escape.
if (AR != AliasResult::MustAlias)
IsMustAlias = false;
if (AR == AliasResult::NoAlias)
continue;
if (Call->doesNotAccessMemory(ArgNo))
@ -767,10 +717,9 @@ ModRefInfo AAResults::callCapturesBefore(const Instruction *I,
R = ModRefInfo::Ref;
continue;
}
// Not returning MustModRef since we have not seen all the arguments.
return ModRefInfo::ModRef;
}
return IsMustAlias ? setMust(R) : clearMust(R);
return R;
}
/// canBasicBlockModify - Return true if it is possible for execution of the

View File

@ -31,10 +31,6 @@ static cl::opt<bool> PrintNoModRef("print-no-modref", cl::ReallyHidden);
static cl::opt<bool> PrintRef("print-ref", cl::ReallyHidden);
static cl::opt<bool> PrintMod("print-mod", cl::ReallyHidden);
static cl::opt<bool> PrintModRef("print-modref", cl::ReallyHidden);
static cl::opt<bool> PrintMust("print-must", cl::ReallyHidden);
static cl::opt<bool> PrintMustRef("print-mustref", cl::ReallyHidden);
static cl::opt<bool> PrintMustMod("print-mustmod", cl::ReallyHidden);
static cl::opt<bool> PrintMustModRef("print-mustmodref", cl::ReallyHidden);
static cl::opt<bool> EvalAAMD("evaluate-aa-metadata", cl::ReallyHidden);
@ -238,25 +234,6 @@ void AAEvaluator::runInternal(Function &F, AAResults &AA) {
F.getParent());
++ModRefCount;
break;
case ModRefInfo::Must:
PrintModRefResults("Must", PrintMust, Call, Pointer, F.getParent());
++MustCount;
break;
case ModRefInfo::MustMod:
PrintModRefResults("Just Mod (MustAlias)", PrintMustMod, Call, Pointer,
F.getParent());
++MustModCount;
break;
case ModRefInfo::MustRef:
PrintModRefResults("Just Ref (MustAlias)", PrintMustRef, Call, Pointer,
F.getParent());
++MustRefCount;
break;
case ModRefInfo::MustModRef:
PrintModRefResults("Both ModRef (MustAlias)", PrintMustModRef, Call,
Pointer, F.getParent());
++MustModRefCount;
break;
}
}
}
@ -285,25 +262,6 @@ void AAEvaluator::runInternal(Function &F, AAResults &AA) {
F.getParent());
++ModRefCount;
break;
case ModRefInfo::Must:
PrintModRefResults("Must", PrintMust, CallA, CallB, F.getParent());
++MustCount;
break;
case ModRefInfo::MustMod:
PrintModRefResults("Just Mod (MustAlias)", PrintMustMod, CallA, CallB,
F.getParent());
++MustModCount;
break;
case ModRefInfo::MustRef:
PrintModRefResults("Just Ref (MustAlias)", PrintMustRef, CallA, CallB,
F.getParent());
++MustRefCount;
break;
case ModRefInfo::MustModRef:
PrintModRefResults("Both ModRef (MustAlias)", PrintMustModRef, CallA,
CallB, F.getParent());
++MustModRefCount;
break;
}
}
}
@ -341,8 +299,7 @@ AAEvaluator::~AAEvaluator() {
}
// Display the summary for mod/ref analysis
int64_t ModRefSum = NoModRefCount + RefCount + ModCount + ModRefCount +
MustCount + MustRefCount + MustModCount + MustModRefCount;
int64_t ModRefSum = NoModRefCount + RefCount + ModCount + ModRefCount;
if (ModRefSum == 0) {
errs() << " Alias Analysis Mod/Ref Evaluator Summary: no "
"mod/ref!\n";
@ -356,22 +313,10 @@ AAEvaluator::~AAEvaluator() {
PrintPercent(RefCount, ModRefSum);
errs() << " " << ModRefCount << " mod & ref responses ";
PrintPercent(ModRefCount, ModRefSum);
errs() << " " << MustCount << " must responses ";
PrintPercent(MustCount, ModRefSum);
errs() << " " << MustModCount << " must mod responses ";
PrintPercent(MustModCount, ModRefSum);
errs() << " " << MustRefCount << " must ref responses ";
PrintPercent(MustRefCount, ModRefSum);
errs() << " " << MustModRefCount << " must mod & ref responses ";
PrintPercent(MustModRefCount, ModRefSum);
errs() << " Alias Analysis Evaluator Mod/Ref Summary: "
<< NoModRefCount * 100 / ModRefSum << "%/"
<< ModCount * 100 / ModRefSum << "%/" << RefCount * 100 / ModRefSum
<< "%/" << ModRefCount * 100 / ModRefSum << "%/"
<< MustCount * 100 / ModRefSum << "%/"
<< MustRefCount * 100 / ModRefSum << "%/"
<< MustModCount * 100 / ModRefSum << "%/"
<< MustModRefCount * 100 / ModRefSum << "%\n";
<< "%/" << ModRefCount * 100 / ModRefSum << "%\n";
}
}

View File

@ -912,7 +912,6 @@ ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
// Optimistically assume that call doesn't touch Object and check this
// assumption in the following loop.
ModRefInfo Result = ModRefInfo::NoModRef;
bool IsMustAlias = true;
unsigned OperandNo = 0;
for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
@ -935,8 +934,6 @@ ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
AliasResult AR = getBestAAResults().alias(
MemoryLocation::getBeforeOrAfter(*CI),
MemoryLocation::getBeforeOrAfter(Object), AAQI);
if (AR != AliasResult::MustAlias)
IsMustAlias = false;
// Operand doesn't alias 'Object', continue looking for other aliases
if (AR == AliasResult::NoAlias)
continue;
@ -958,17 +955,9 @@ ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
break;
}
// No operand aliases, reset Must bit. Add below if at least one aliases
// and all aliases found are MustAlias.
if (isNoModRef(Result))
IsMustAlias = false;
// Early return if we improved mod ref information
if (!isModAndRefSet(Result)) {
if (isNoModRef(Result))
return ModRefInfo::NoModRef;
return IsMustAlias ? setMust(Result) : clearMust(Result);
}
if (!isModAndRefSet(Result))
return Result;
}
// If the call is malloc/calloc like, we can assume that it doesn't

View File

@ -87,17 +87,14 @@ class GlobalsAAResult::FunctionInfo {
/// The bit that flags that this function may read any global. This is
/// chosen to mix together with ModRefInfo bits.
/// FIXME: This assumes ModRefInfo lattice will remain 4 bits!
/// It overlaps with ModRefInfo::Must bit!
/// FunctionInfo.getModRefInfo() masks out everything except ModRef so
/// this remains correct, but the Must info is lost.
/// this remains correct.
enum { MayReadAnyGlobal = 4 };
/// Checks to document the invariants of the bit packing here.
static_assert((MayReadAnyGlobal & static_cast<int>(ModRefInfo::MustModRef)) ==
0,
static_assert((MayReadAnyGlobal & static_cast<int>(ModRefInfo::ModRef)) == 0,
"ModRef and the MayReadAnyGlobal flag bits overlap.");
static_assert(((MayReadAnyGlobal |
static_cast<int>(ModRefInfo::MustModRef)) >>
static_assert(((MayReadAnyGlobal | static_cast<int>(ModRefInfo::ModRef)) >>
AlignedMapPointerTraits::NumLowBitsAvailable) == 0,
"Insufficient low bits to store our flag and ModRef info.");
@ -133,11 +130,9 @@ public:
}
/// This method clears MayReadAnyGlobal bit added by GlobalsAAResult to return
/// the corresponding ModRefInfo. It must align in functionality with
/// clearMust().
/// the corresponding ModRefInfo.
ModRefInfo globalClearMayReadAnyGlobal(int I) const {
return ModRefInfo((I & static_cast<int>(ModRefInfo::ModRef)) |
static_cast<int>(ModRefInfo::NoModRef));
return ModRefInfo(I & static_cast<int>(ModRefInfo::ModRef));
}
/// Returns the \c ModRefInfo info for this function.
@ -147,7 +142,7 @@ public:
/// Adds new \c ModRefInfo for this function to its state.
void addModRefInfo(ModRefInfo NewMRI) {
Info.setInt(Info.getInt() | static_cast<int>(setMust(NewMRI)));
Info.setInt(Info.getInt() | static_cast<int>(NewMRI));
}
/// Returns whether this function may read any global variable, and we don't

View File

@ -610,7 +610,7 @@ MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
// If necessary, perform additional analysis.
if (isModAndRefSet(MR))
MR = BatchAA.callCapturesBefore(Inst, MemLoc, &DT);
switch (clearMust(MR)) {
switch (MR) {
case ModRefInfo::NoModRef:
// If the call has no effect on the queried pointer, just ignore it.
continue;

View File

@ -283,24 +283,12 @@ static bool areLoadsReorderable(const LoadInst *Use,
return !(SeqCstUse || MayClobberIsAcquire);
}
namespace {
struct ClobberAlias {
bool IsClobber;
Optional<AliasResult> AR;
};
} // end anonymous namespace
// Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being
// ignored if IsClobber = false.
template <typename AliasAnalysisType>
static ClobberAlias
static bool
instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc,
const Instruction *UseInst, AliasAnalysisType &AA) {
Instruction *DefInst = MD->getMemoryInst();
assert(DefInst && "Defining instruction not actually an instruction");
Optional<AliasResult> AR;
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
// These intrinsics will show up as affecting memory, but they are just
@ -316,7 +304,7 @@ instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc,
case Intrinsic::assume:
case Intrinsic::experimental_noalias_scope_decl:
case Intrinsic::pseudoprobe:
return {false, AliasResult(AliasResult::NoAlias)};
return false;
case Intrinsic::dbg_addr:
case Intrinsic::dbg_declare:
case Intrinsic::dbg_label:
@ -329,25 +317,21 @@ instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc,
if (auto *CB = dyn_cast_or_null<CallBase>(UseInst)) {
ModRefInfo I = AA.getModRefInfo(DefInst, CB);
AR = isMustSet(I) ? AliasResult::MustAlias : AliasResult::MayAlias;
return {isModOrRefSet(I), AR};
return isModOrRefSet(I);
}
if (auto *DefLoad = dyn_cast<LoadInst>(DefInst))
if (auto *UseLoad = dyn_cast_or_null<LoadInst>(UseInst))
return {!areLoadsReorderable(UseLoad, DefLoad),
AliasResult(AliasResult::MayAlias)};
return !areLoadsReorderable(UseLoad, DefLoad);
ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc);
AR = isMustSet(I) ? AliasResult::MustAlias : AliasResult::MayAlias;
return {isModSet(I), AR};
return isModSet(I);
}
template <typename AliasAnalysisType>
static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
const MemoryUseOrDef *MU,
const MemoryLocOrCall &UseMLOC,
AliasAnalysisType &AA) {
static bool instructionClobbersQuery(MemoryDef *MD, const MemoryUseOrDef *MU,
const MemoryLocOrCall &UseMLOC,
AliasAnalysisType &AA) {
// FIXME: This is a temporary hack to allow a single instructionClobbersQuery
// to exist while MemoryLocOrCall is pushed through places.
if (UseMLOC.IsCall)
@ -360,7 +344,7 @@ static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
// Return true when MD may alias MU, return false otherwise.
bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
AliasAnalysis &AA) {
return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber;
return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA);
}
namespace {
@ -451,12 +435,8 @@ checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt,
// since MD may only act as a clobber for 1 of N MemoryLocations.
FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD);
if (!FoundClobber) {
ClobberAlias CA =
instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
if (CA.IsClobber) {
if (instructionClobbersQuery(MD, MAP.second, Query.Inst, AA))
FoundClobber = true;
// Not used: CA.AR;
}
}
}
break;
@ -575,7 +555,6 @@ template <class AliasAnalysisType> class ClobberWalker {
/// both. Include alias info when clobber found.
MemoryAccess *Result;
bool IsKnownClobber;
Optional<AliasResult> AR;
};
/// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
@ -601,19 +580,17 @@ template <class AliasAnalysisType> class ClobberWalker {
for (MemoryAccess *Current : def_chain(Desc.Last)) {
Desc.Last = Current;
if (Current == StopAt || Current == SkipStopAt)
return {Current, false, AliasResult(AliasResult::MayAlias)};
return {Current, false};
if (auto *MD = dyn_cast<MemoryDef>(Current)) {
if (MSSA.isLiveOnEntryDef(MD))
return {MD, true, AliasResult(AliasResult::MustAlias)};
return {MD, true};
if (!--*UpwardWalkLimit)
return {Current, true, AliasResult(AliasResult::MayAlias)};
return {Current, true};
ClobberAlias CA =
instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA);
if (CA.IsClobber)
return {MD, true, CA.AR};
if (instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA))
return {MD, true};
}
}
@ -622,7 +599,7 @@ template <class AliasAnalysisType> class ClobberWalker {
assert(isa<MemoryPhi>(Desc.Last) &&
"Ended at a non-clobber that's not a phi?");
return {Desc.Last, false, AliasResult(AliasResult::MayAlias)};
return {Desc.Last, false};
}
void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
@ -992,7 +969,6 @@ public:
MemoryAccess *Result;
if (WalkResult.IsKnownClobber) {
Result = WalkResult.Result;
Q.AR = WalkResult.AR;
} else {
OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
Current, Q.StartingLoc);
@ -1407,7 +1383,7 @@ void MemorySSA::OptimizeUses::optimizeUsesInBlock(
continue;
if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None);
MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true);
continue;
}
@ -1449,7 +1425,6 @@ void MemorySSA::OptimizeUses::optimizeUsesInBlock(
if (!LocInfo.LastKillValid) {
LocInfo.LastKill = VersionStack.size() - 1;
LocInfo.LastKillValid = true;
LocInfo.AR = AliasResult::MayAlias;
}
// At this point, we should have corrected last kill and LowerBound to be
@ -1492,10 +1467,8 @@ void MemorySSA::OptimizeUses::optimizeUsesInBlock(
}
MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA);
if (CA.IsClobber) {
if (instructionClobbersQuery(MD, MU, UseMLOC, *AA)) {
FoundClobberResult = true;
LocInfo.AR = CA.AR;
break;
}
--UpperBound;
@ -1506,15 +1479,12 @@ void MemorySSA::OptimizeUses::optimizeUsesInBlock(
// At the end of this loop, UpperBound is either a clobber, or lower bound
// PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
// We were last killed now by where we got to
if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound]))
LocInfo.AR = None;
MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR);
MU->setDefiningAccess(VersionStack[UpperBound], true);
LocInfo.LastKill = UpperBound;
} else {
// Otherwise, we checked all the new ones, and now we know we can get to
// LastKill.
MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR);
MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true);
}
LocInfo.LowerBound = VersionStack.size() - 1;
LocInfo.LowerBoundBlock = BB;
@ -2220,9 +2190,6 @@ void MemoryDef::print(raw_ostream &OS) const {
if (isOptimized()) {
OS << "->";
printID(getOptimized());
if (Optional<AliasResult> AR = getOptimizedAccessType())
OS << " " << *AR;
}
}
@ -2256,9 +2223,6 @@ void MemoryUse::print(raw_ostream &OS) const {
else
OS << LiveOnEntryStr;
OS << ')';
if (Optional<AliasResult> AR = getOptimizedAccessType())
OS << " " << *AR;
}
void MemoryAccess::dump() const {
@ -2609,7 +2573,6 @@ MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
if (isUseTriviallyOptimizableToLiveOnEntry(*Walker.getAA(), I)) {
MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
StartingAccess->setOptimized(LiveOnEntry);
StartingAccess->setOptimizedAccessType(None);
return LiveOnEntry;
}
@ -2622,17 +2585,11 @@ MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
// If it is, we will not get a better result.
if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
StartingAccess->setOptimized(DefiningAccess);
StartingAccess->setOptimizedAccessType(None);
return DefiningAccess;
}
OptimizedAccess = Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit);
StartingAccess->setOptimized(OptimizedAccess);
if (MSSA->isLiveOnEntryDef(OptimizedAccess))
StartingAccess->setOptimizedAccessType(None);
else if (Q.AR && *Q.AR == AliasResult::MustAlias)
StartingAccess->setOptimizedAccessType(
AliasResult(AliasResult::MustAlias));
} else
OptimizedAccess = StartingAccess->getOptimized();

View File

@ -364,9 +364,5 @@ define void @caller_a(double* %arg_a0,
; CHECK-NEXT: 0 mod responses (0.0%)
; CHECK-NEXT: 0 ref responses (0.0%)
; CHECK-NEXT: 140 mod & ref responses (76.0%)
; CHECK-NEXT: 0 must responses (0.0%)
; CHECK-NEXT: 0 must mod responses (0.0%)
; CHECK-NEXT: 0 must ref responses (0.0%)
; CHECK-NEXT: 0 must mod & ref responses (0.0%)
; CHECK-NEXT: Alias Analysis Evaluator Mod/Ref Summary: 23%/0%/0%/76%/0%/0%/0%/0%
; CHECK-NEXT: Alias Analysis Evaluator Mod/Ref Summary: 23%/0%/0%/76%

View File

@ -4,7 +4,7 @@ declare void @llvm.memset.element.unordered.atomic.p0i8.i32(i8*, i8, i64, i32)
define void @test_memset_element_unordered_atomic_const_size(i8* noalias %a) {
; CHECK-LABEL: Function: test_memset_element_unordered_atomic_const_size
; CHECK: Just Mod (MustAlias): Ptr: i8* %a <-> call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %a, i8 0, i64 4, i32 1)
; CHECK: Just Mod: Ptr: i8* %a <-> call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %a, i8 0, i64 4, i32 1)
; CHECK-NEXT: Just Mod: Ptr: i8* %a.gep.1 <-> call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %a, i8 0, i64 4, i32 1)
; CHECK-NEXT: NoModRef: Ptr: i8* %a.gep.5 <-> call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %a, i8 0, i64 4, i32 1)
;
@ -20,7 +20,7 @@ entry:
define void @test_memset_element_unordered_atomic_variable_size(i8* noalias %a, i64 %n) {
; CHECK-LABEL: Function: test_memset_element_unordered_atomic_variable_size
; CHECK: Just Mod (MustAlias): Ptr: i8* %a <-> call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %a, i8 0, i64 %n, i32 1)
; CHECK: Just Mod: Ptr: i8* %a <-> call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %a, i8 0, i64 %n, i32 1)
; CHECK-NEXT: Just Mod: Ptr: i8* %a.gep.1 <-> call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %a, i8 0, i64 %n, i32 1)
; CHECK-NEXT: Just Mod: Ptr: i8* %a.gep.5 <-> call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %a, i8 0, i64 %n, i32 1)
;

View File

@ -32,12 +32,12 @@ entry:
ret void
}
; CHECK: Just Ref (MustAlias): Ptr: i8* %p <-> call void @readonly_attr(i8* %p)
; CHECK: Just Ref: Ptr: i8* %p <-> call void @readonly_attr(i8* %p)
; CHECK: Just Ref: Ptr: i8* %p <-> call void @readonly_func(i8* %p)
; CHECK: Just Mod (MustAlias): Ptr: i8* %p <-> call void @writeonly_attr(i8* %p)
; CHECK: Just Mod: Ptr: i8* %p <-> call void @writeonly_attr(i8* %p)
; CHECK: Just Mod: Ptr: i8* %p <-> call void @writeonly_func(i8* %p)
; CHECK: NoModRef: Ptr: i8* %p <-> call void @readnone_attr(i8* %p)
; CHECK: NoModRef: Ptr: i8* %p <-> call void @readnone_func(i8* %p)
; CHECK: Both ModRef: Ptr: i8* %p <-> call void @read_write(i8* %p, i8* %p, i8* %p)
; CHECK: Just Ref (MustAlias): Ptr: i8* %p <-> call void @func() [ "deopt"(i8* %p) ]
; CHECK: Just Ref: Ptr: i8* %p <-> call void @func() [ "deopt"(i8* %p) ]
; CHECK: Both ModRef: Ptr: i8* %p <-> call void @writeonly_attr(i8* %p) [ "deopt"(i8* %p) ]

View File

@ -205,7 +205,7 @@ define void @test4(i8* %P, i8* noalias %Q) #3 {
; CHECK-LABEL: Function: test4:
; CHECK: NoAlias: i8* %P, i8* %Q
; CHECK: Just Mod (MustAlias): Ptr: i8* %P <-> tail call void @llvm.memset.p0i8.i64(i8* %P, i8 42, i64 8, i1 false)
; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memset.p0i8.i64(i8* %P, i8 42, i64 8, i1 false)
; CHECK: NoModRef: Ptr: i8* %Q <-> tail call void @llvm.memset.p0i8.i64(i8* %P, i8 42, i64 8, i1 false)
; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i1 false)
; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i1 false)
@ -267,7 +267,7 @@ define void @test6(i8* %P) #3 {
; CHECK-LABEL: Function: test6:
; CHECK: Just Mod (MustAlias): Ptr: i8* %P <-> call void @llvm.memset.p0i8.i64(i8* align 8 %P, i8 -51, i64 32, i1 false)
; CHECK: Just Mod: Ptr: i8* %P <-> call void @llvm.memset.p0i8.i64(i8* align 8 %P, i8 -51, i64 32, i1 false)
; CHECK: Just Ref: Ptr: i8* %P <-> call void @a_readonly_func(i8* %P)
; CHECK: Just Mod: call void @llvm.memset.p0i8.i64(i8* align 8 %P, i8 -51, i64 32, i1 false) <-> call void @a_readonly_func(i8* %P)
; CHECK: Just Ref: call void @a_readonly_func(i8* %P) <-> call void @llvm.memset.p0i8.i64(i8* align 8 %P, i8 -51, i64 32, i1 false)
@ -308,9 +308,9 @@ entry:
; CHECK: NoModRef: Ptr: i8* %p <-> call void @an_inaccessiblememonly_func()
; CHECK: NoModRef: Ptr: i8* %q <-> call void @an_inaccessiblememonly_func()
; CHECK: Both ModRef: Ptr: i8* %p <-> call void @an_inaccessibleorargmemonly_func(i8* %q)
; CHECK: Both ModRef (MustAlias): Ptr: i8* %q <-> call void @an_inaccessibleorargmemonly_func(i8* %q)
; CHECK: Both ModRef: Ptr: i8* %q <-> call void @an_inaccessibleorargmemonly_func(i8* %q)
; CHECK: Both ModRef: Ptr: i8* %p <-> call void @an_argmemonly_func(i8* %q)
; CHECK: Both ModRef (MustAlias): Ptr: i8* %q <-> call void @an_argmemonly_func(i8* %q)
; CHECK: Both ModRef: Ptr: i8* %q <-> call void @an_argmemonly_func(i8* %q)
; CHECK: Just Ref: call void @a_readonly_func(i8* %p) <-> call void @an_inaccessiblememonly_func()
; CHECK: Just Ref: call void @a_readonly_func(i8* %p) <-> call void @an_inaccessibleorargmemonly_func(i8* %q)
; CHECK: Just Ref: call void @a_readonly_func(i8* %p) <-> call void @an_argmemonly_func(i8* %q)
@ -325,11 +325,11 @@ entry:
; CHECK: Both ModRef: call void @an_inaccessibleorargmemonly_func(i8* %q) <-> call void @a_readonly_func(i8* %p)
; CHECK: Both ModRef: call void @an_inaccessibleorargmemonly_func(i8* %q) <-> call void @a_writeonly_func(i8* %q)
; CHECK: Both ModRef: call void @an_inaccessibleorargmemonly_func(i8* %q) <-> call void @an_inaccessiblememonly_func()
; CHECK: Both ModRef (MustAlias): call void @an_inaccessibleorargmemonly_func(i8* %q) <-> call void @an_argmemonly_func(i8* %q)
; CHECK: Both ModRef: call void @an_inaccessibleorargmemonly_func(i8* %q) <-> call void @an_argmemonly_func(i8* %q)
; CHECK: Both ModRef: call void @an_argmemonly_func(i8* %q) <-> call void @a_readonly_func(i8* %p)
; CHECK: Both ModRef: call void @an_argmemonly_func(i8* %q) <-> call void @a_writeonly_func(i8* %q)
; CHECK: NoModRef: call void @an_argmemonly_func(i8* %q) <-> call void @an_inaccessiblememonly_func()
; CHECK: Both ModRef (MustAlias): call void @an_argmemonly_func(i8* %q) <-> call void @an_inaccessibleorargmemonly_func(i8* %q)
; CHECK: Both ModRef: call void @an_argmemonly_func(i8* %q) <-> call void @an_inaccessibleorargmemonly_func(i8* %q)
}
;; test that MustAlias is set for calls when no MayAlias is found.
@ -411,9 +411,9 @@ entry:
; CHECK: NoModRef: Ptr: i8* %p <-> call void @an_inaccessiblememonly_func() #10 [ "unknown"() ]
; CHECK: NoModRef: Ptr: i8* %q <-> call void @an_inaccessiblememonly_func() #10 [ "unknown"() ]
; CHECK: Both ModRef: Ptr: i8* %p <-> call void @an_inaccessibleorargmemonly_func(i8* %q) #11 [ "unknown"() ]
; CHECK: Both ModRef (MustAlias): Ptr: i8* %q <-> call void @an_inaccessibleorargmemonly_func(i8* %q) #11 [ "unknown"() ]
; CHECK: Both ModRef: Ptr: i8* %q <-> call void @an_inaccessibleorargmemonly_func(i8* %q) #11 [ "unknown"() ]
; CHECK: Both ModRef: Ptr: i8* %p <-> call void @an_argmemonly_func(i8* %q) #12 [ "unknown"() ]
; CHECK: Both ModRef (MustAlias): Ptr: i8* %q <-> call void @an_argmemonly_func(i8* %q) #12 [ "unknown"() ]
; CHECK: Both ModRef: Ptr: i8* %q <-> call void @an_argmemonly_func(i8* %q) #12 [ "unknown"() ]
; CHECK: Just Ref: call void @a_readonly_func(i8* %p) #9 [ "unknown"() ] <-> call void @an_inaccessiblememonly_func() #10 [ "unknown"() ]
; CHECK: Just Ref: call void @a_readonly_func(i8* %p) #9 [ "unknown"() ] <-> call void @an_inaccessibleorargmemonly_func(i8* %q) #11 [ "unknown"() ]
; CHECK: Just Ref: call void @a_readonly_func(i8* %p) #9 [ "unknown"() ] <-> call void @an_argmemonly_func(i8* %q) #12 [ "unknown"() ]
@ -422,10 +422,10 @@ entry:
; CHECK: NoModRef: call void @an_inaccessiblememonly_func() #10 [ "unknown"() ] <-> call void @an_argmemonly_func(i8* %q) #12 [ "unknown"() ]
; CHECK: Both ModRef: call void @an_inaccessibleorargmemonly_func(i8* %q) #11 [ "unknown"() ] <-> call void @a_readonly_func(i8* %p) #9 [ "unknown"() ]
; CHECK: Both ModRef: call void @an_inaccessibleorargmemonly_func(i8* %q) #11 [ "unknown"() ] <-> call void @an_inaccessiblememonly_func() #10 [ "unknown"() ]
; CHECK: Both ModRef (MustAlias): call void @an_inaccessibleorargmemonly_func(i8* %q) #11 [ "unknown"() ] <-> call void @an_argmemonly_func(i8* %q) #12 [ "unknown"() ]
; CHECK: Both ModRef: call void @an_inaccessibleorargmemonly_func(i8* %q) #11 [ "unknown"() ] <-> call void @an_argmemonly_func(i8* %q) #12 [ "unknown"() ]
; CHECK: Both ModRef: call void @an_argmemonly_func(i8* %q) #12 [ "unknown"() ] <-> call void @a_readonly_func(i8* %p) #9 [ "unknown"() ]
; CHECK: NoModRef: call void @an_argmemonly_func(i8* %q) #12 [ "unknown"() ] <-> call void @an_inaccessiblememonly_func() #10 [ "unknown"() ]
; CHECK: Both ModRef (MustAlias): call void @an_argmemonly_func(i8* %q) #12 [ "unknown"() ] <-> call void @an_inaccessibleorargmemonly_func(i8* %q) #11 [ "unknown"() ]
; CHECK: Both ModRef: call void @an_argmemonly_func(i8* %q) #12 [ "unknown"() ] <-> call void @an_inaccessibleorargmemonly_func(i8* %q) #11 [ "unknown"() ]
}

View File

@ -282,7 +282,7 @@ declare i8* @__memset_chk(i8* writeonly, i32, i64, i64)
; CHECK-LABEL: Function: test_memset_chk_const_size
define i8* @test_memset_chk_const_size(i8* noalias %a, i64 %n) {
; CHECK: Just Mod (MustAlias): Ptr: i8* %a <-> %res = tail call i8* @__memset_chk(i8* %a, i32 0, i64 4, i64 %n)
; CHECK: Just Mod: Ptr: i8* %a <-> %res = tail call i8* @__memset_chk(i8* %a, i32 0, i64 4, i64 %n)
; CHECK-NEXT: Just Mod: Ptr: i8* %res <-> %res = tail call i8* @__memset_chk(i8* %a, i32 0, i64 4, i64 %n)
; CHECK-NEXT: Just Mod: Ptr: i8* %a.gep.1 <-> %res = tail call i8* @__memset_chk(i8* %a, i32 0, i64 4, i64 %n)
; CHECK-NEXT: NoModRef: Ptr: i8* %a.gep.5 <-> %res = tail call i8* @__memset_chk(i8* %a, i32 0, i64 4, i64 %n)
@ -300,7 +300,7 @@ entry:
define i8* @test_memset_chk_variable_size(i8* noalias %a, i64 %n.1, i64 %n.2) {
; CHECK-LABEL: Function: test_memset_chk_variable_size
; CHECK: Just Mod (MustAlias): Ptr: i8* %a <-> %res = tail call i8* @__memset_chk(i8* %a, i32 0, i64 %n.1, i64 %n.2)
; CHECK: Just Mod: Ptr: i8* %a <-> %res = tail call i8* @__memset_chk(i8* %a, i32 0, i64 %n.1, i64 %n.2)
; CHECK-NEXT: Just Mod: Ptr: i8* %res <-> %res = tail call i8* @__memset_chk(i8* %a, i32 0, i64 %n.1, i64 %n.2)
; CHECK-NEXT: Just Mod: Ptr: i8* %a.gep.1 <-> %res = tail call i8* @__memset_chk(i8* %a, i32 0, i64 %n.1, i64 %n.2)
; CHECK-NEXT: Just Mod: Ptr: i8* %a.gep.5 <-> %res = tail call i8* @__memset_chk(i8* %a, i32 0, i64 %n.1, i64 %n.2)

View File

@ -6,7 +6,7 @@ define void @source_clobber(i8* %a, i8* %b) {
; CHECK-LABEL: @source_clobber(
; CHECK-NEXT: ; 1 = MemoryDef(liveOnEntry)
; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* %a, i8* %b, i64 128, i1 false)
; CHECK-NEXT: ; MemoryUse(1) MayAlias
; CHECK-NEXT: ; MemoryUse(1)
; CHECK-NEXT: [[X:%.*]] = load i8, i8* %b
; CHECK-NEXT: ret void
;

View File

@ -6,7 +6,7 @@ declare i1 @opaque_true(i1) nounwind readonly
define i1 @foo(i32* %ptr, i1 %cond) {
%cond_wide = zext i1 %cond to i32
; CHECK: MemoryUse(liveOnEntry) MayAlias
; CHECK: MemoryUse(liveOnEntry)
; CHECK-NEXT: call i32 bitcast
%cond_hidden_wide = call i32 bitcast (i1 (i1)* @opaque_true to i32 (i32)*)(i32 %cond_wide)
%cond_hidden = trunc i32 %cond_hidden_wide to i1

View File

@ -18,22 +18,22 @@ entry:
; CHECK: 4 = MemoryDef(3)
; CHECK-NEXT: store i32 7, i32* %1, align 4
store i32 7, i32* %1, align 4
; NOLIMIT: MemoryUse(3) MustAlias
; NOLIMIT: MemoryUse(3)
; NOLIMIT-NEXT: %2 = load i32, i32* %0, align 4
; LIMIT: MemoryUse(4)
; LIMIT-NEXT: %2 = load i32, i32* %0, align 4
%2 = load i32, i32* %0, align 4
; NOLIMIT: MemoryUse(4) MustAlias
; NOLIMIT: MemoryUse(4)
; NOLIMIT-NEXT: %3 = load i32, i32* %1, align 4
; LIMIT: MemoryUse(4)
; LIMIT-NEXT: %3 = load i32, i32* %1, align 4
%3 = load i32, i32* %1, align 4
; NOLIMIT: MemoryUse(3) MustAlias
; NOLIMIT: MemoryUse(3)
; NOLIMIT-NEXT: %4 = load i32, i32* %0, align 4
; LIMIT: MemoryUse(4)
; LIMIT-NEXT: %4 = load i32, i32* %0, align 4
%4 = load i32, i32* %0, align 4
; NOLIMIT: MemoryUse(4) MustAlias
; NOLIMIT: MemoryUse(4)
; NOLIMIT-NEXT: %5 = load i32, i32* %1, align 4
; LIMIT: MemoryUse(4)
; LIMIT-NEXT: %5 = load i32, i32* %1, align 4

View File

@ -21,7 +21,7 @@ if.then:
if.end:
; CHECK: 3 = MemoryPhi({entry,1},{if.then,2})
; NOLIMIT: MemoryUse(1) MayAlias
; NOLIMIT: MemoryUse(1)
; NOLIMIT-NEXT: load i8, i8* %local, align 1
; LIMIT: MemoryUse(3)
; LIMIT-NEXT: load i8, i8* %local, align 1
@ -64,7 +64,7 @@ phi.1:
; Order matters here; phi.2 needs to come before phi.3, because that's the order
; they're visited in.
; CHECK: 6 = MemoryPhi({phi.2,4},{phi.3,3})
; NOLIMIT: MemoryUse(1) MayAlias
; NOLIMIT: MemoryUse(1)
; NOLIMIT-NEXT: load i8, i8* %local
; LIMIT: MemoryUse(6)
; LIMIT-NEXT: load i8, i8* %local
@ -77,7 +77,7 @@ define void @cross_phi(i8* noalias %p1, i8* noalias %p2) {
; CHECK: 1 = MemoryDef(liveOnEntry)
; CHECK-NEXT: store i8 0, i8* %p1
store i8 0, i8* %p1
; NOLIMIT: MemoryUse(1) MustAlias
; NOLIMIT: MemoryUse(1)
; NOLIMIT-NEXT: load i8, i8* %p1
; LIMIT: MemoryUse(1)
; LIMIT-NEXT: load i8, i8* %p1
@ -112,7 +112,7 @@ d:
e:
; 8 = MemoryPhi({c,4},{d,5})
; NOLIMIT: MemoryUse(1) MustAlias
; NOLIMIT: MemoryUse(1)
; NOLIMIT-NEXT: load i8, i8* %p1
; LIMIT: MemoryUse(8)
; LIMIT-NEXT: load i8, i8* %p1
@ -146,7 +146,7 @@ loop.3:
; CHECK: 4 = MemoryDef(7)
; CHECK-NEXT: store i8 2, i8* %p2
store i8 2, i8* %p2
; NOLIMIT: MemoryUse(1) MayAlias
; NOLIMIT: MemoryUse(1)
; NOLIMIT-NEXT: load i8, i8* %p1
; LIMIT: MemoryUse(4)
; LIMIT-NEXT: load i8, i8* %p1
@ -183,7 +183,7 @@ if.end:
; CHECK: 3 = MemoryDef(4)
; CHECK-NEXT: store i8 2, i8* %p2
store i8 2, i8* %p2
; NOLIMIT: MemoryUse(4) MayAlias
; NOLIMIT: MemoryUse(4)
; NOLIMIT-NEXT: load i8, i8* %p1
; LIMIT: MemoryUse(3)
; LIMIT-NEXT: load i8, i8* %p1

View File

@ -5,17 +5,17 @@
; CHECK: store i8 42, i8* %a1
; CHECK: 2 = MemoryDef(1)->liveOnEntry - clobbered by liveOnEntry
; CHECK: store i8 42, i8* %a2
; CHECK: MemoryUse(1) MustAlias - clobbered by 1 = MemoryDef(liveOnEntry)->liveOnEntry
; CHECK: MemoryUse(1) - clobbered by 1 = MemoryDef(liveOnEntry)->liveOnEntry
; CHECK: %l1 = load i8, i8* %a1
; CHECK: MemoryUse(2) MustAlias - clobbered by 2 = MemoryDef(1)->liveOnEntry
; CHECK: MemoryUse(2) - clobbered by 2 = MemoryDef(1)->liveOnEntry
; CHECK: %l2 = load i8, i8* %a2
; CHECK: 3 = MemoryDef(2)->liveOnEntry - clobbered by liveOnEntry
; CHECK: store i8 42, i8* %p
; CHECK: 4 = MemoryDef(3)->3 MustAlias - clobbered by 3 = MemoryDef(2)->liveOnEntry
; CHECK: 4 = MemoryDef(3)->3 - clobbered by 3 = MemoryDef(2)->liveOnEntry
; CHECK: store i8 42, i8* %p
; CHECK: MemoryUse(4) MustAlias - clobbered by 4 = MemoryDef(3)->3 MustAlias
; CHECK: MemoryUse(4) - clobbered by 4 = MemoryDef(3)->3
; CHECK: %p1 = load i8, i8* %p
; CHECK: MemoryUse(4) MustAlias - clobbered by 4 = MemoryDef(3)->3 MustAlias
; CHECK: MemoryUse(4) - clobbered by 4 = MemoryDef(3)->3
; CHECK: %p2 = load i8, i8* %p
define void @test(i8* %p) {

View File

@ -2,7 +2,7 @@
; CHECK-LABEL: define <vscale x 4 x i32> @f(
; CHECK: 1 = MemoryDef(liveOnEntry)
; CHECK: MemoryUse(1) MustAlias
; CHECK: MemoryUse(1)
define <vscale x 4 x i32> @f(<vscale x 4 x i32> %z) {
%a = alloca <vscale x 4 x i32>
store <vscale x 4 x i32> %z, <vscale x 4 x i32>* %a
@ -12,7 +12,7 @@ define <vscale x 4 x i32> @f(<vscale x 4 x i32> %z) {
; CHECK-LABEL: define i32 @g(
; CHECK: 1 = MemoryDef(liveOnEntry)
; CHECK: MemoryUse(1) MayAlias
; CHECK: MemoryUse(1)
declare i32* @gg(<vscale x 4 x i32>* %a)
define i32 @g(i32 %z, i32 *%bb) {
%a = alloca <vscale x 4 x i32>

View File

@ -1003,50 +1003,6 @@ TEST_F(MemorySSATest, RemovingDefInvalidatesCache) {
<< "(DefX1 = " << DefX1 << ")";
}
// Test Must alias for optimized uses
TEST_F(MemorySSATest, TestLoadMustAlias) {
F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
GlobalValue::ExternalLinkage, "F", &M);
B.SetInsertPoint(BasicBlock::Create(C, "", F));
Type *Int8 = Type::getInt8Ty(C);
Value *AllocaA = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "A");
Value *AllocaB = B.CreateAlloca(Int8, ConstantInt::get(Int8, 1), "B");
B.CreateStore(ConstantInt::get(Int8, 1), AllocaB);
// Check load from LOE
LoadInst *LA1 = B.CreateLoad(Int8, AllocaA, "");
// Check load alias cached for second load
LoadInst *LA2 = B.CreateLoad(Int8, AllocaA, "");
B.CreateStore(ConstantInt::get(Int8, 1), AllocaA);
// Check load from store/def
LoadInst *LA3 = B.CreateLoad(Int8, AllocaA, "");
// Check load alias cached for second load
LoadInst *LA4 = B.CreateLoad(Int8, AllocaA, "");
setupAnalyses();
MemorySSA &MSSA = *Analyses->MSSA;
MSSA.ensureOptimizedUses();
unsigned I = 0;
for (LoadInst *V : {LA1, LA2}) {
MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V));
EXPECT_EQ(MemUse->getOptimizedAccessType(), None)
<< "Load " << I << " doesn't have the correct alias information";
// EXPECT_EQ expands such that if we increment I above, it won't get
// incremented except when we try to print the error message.
++I;
}
for (LoadInst *V : {LA3, LA4}) {
MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V));
EXPECT_EQ(*MemUse->getOptimizedAccessType(), AliasResult::MustAlias)
<< "Load " << I << " doesn't have the correct alias information";
// EXPECT_EQ expands such that if we increment I above, it won't get
// incremented except when we try to print the error message.
++I;
}
}
// Test Must alias for optimized defs.
TEST_F(MemorySSATest, TestStoreMustAlias) {
F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
@ -1071,9 +1027,6 @@ TEST_F(MemorySSATest, TestStoreMustAlias) {
MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V));
EXPECT_EQ(MemDef->isOptimized(), false)
<< "Store " << I << " is optimized from the start?";
EXPECT_EQ(MemDef->getOptimizedAccessType(), None)
<< "Store " << I
<< " has correct alias information before being optimized?";
if (V == SA1)
Walker->getClobberingMemoryAccess(V);
else {
@ -1084,55 +1037,6 @@ TEST_F(MemorySSATest, TestStoreMustAlias) {
}
EXPECT_EQ(MemDef->isOptimized(), true)
<< "Store " << I << " was not optimized";
if (I == 0 || I == 1)
EXPECT_EQ(MemDef->getOptimizedAccessType(), None)
<< "Store " << I << " doesn't have the correct alias information";
else
EXPECT_EQ(*MemDef->getOptimizedAccessType(), AliasResult::MustAlias)
<< "Store " << I << " doesn't have the correct alias information";
// EXPECT_EQ expands such that if we increment I above, it won't get
// incremented except when we try to print the error message.
++I;
}
}
// Test May alias for optimized uses.
TEST_F(MemorySSATest, TestLoadMayAlias) {
F = Function::Create(FunctionType::get(B.getVoidTy(),
{B.getInt8PtrTy(), B.getInt8PtrTy()},
false),
GlobalValue::ExternalLinkage, "F", &M);
B.SetInsertPoint(BasicBlock::Create(C, "", F));
Type *Int8 = Type::getInt8Ty(C);
auto *ArgIt = F->arg_begin();
Argument *PointerA = &*ArgIt;
Argument *PointerB = &*(++ArgIt);
B.CreateStore(ConstantInt::get(Int8, 1), PointerB);
LoadInst *LA1 = B.CreateLoad(Int8, PointerA, "");
B.CreateStore(ConstantInt::get(Int8, 0), PointerA);
LoadInst *LB1 = B.CreateLoad(Int8, PointerB, "");
B.CreateStore(ConstantInt::get(Int8, 0), PointerA);
LoadInst *LA2 = B.CreateLoad(Int8, PointerA, "");
B.CreateStore(ConstantInt::get(Int8, 0), PointerB);
LoadInst *LB2 = B.CreateLoad(Int8, PointerB, "");
setupAnalyses();
MemorySSA &MSSA = *Analyses->MSSA;
MSSA.ensureOptimizedUses();
unsigned I = 0;
for (LoadInst *V : {LA1, LB1}) {
MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V));
EXPECT_EQ(*MemUse->getOptimizedAccessType(), AliasResult::MayAlias)
<< "Load " << I << " doesn't have the correct alias information";
// EXPECT_EQ expands such that if we increment I above, it won't get
// incremented except when we try to print the error message.
++I;
}
for (LoadInst *V : {LA2, LB2}) {
MemoryUse *MemUse = dyn_cast_or_null<MemoryUse>(MSSA.getMemoryAccess(V));
EXPECT_EQ(*MemUse->getOptimizedAccessType(), AliasResult::MustAlias)
<< "Load " << I << " doesn't have the correct alias information";
// EXPECT_EQ expands such that if we increment I above, it won't get
// incremented except when we try to print the error message.
++I;
@ -1176,9 +1080,6 @@ TEST_F(MemorySSATest, TestStoreMayAlias) {
MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V));
EXPECT_EQ(MemDef->isOptimized(), false)
<< "Store " << I << " is optimized from the start?";
EXPECT_EQ(MemDef->getOptimizedAccessType(), None)
<< "Store " << I
<< " has correct alias information before being optimized?";
++I;
}
@ -1190,16 +1091,6 @@ TEST_F(MemorySSATest, TestStoreMayAlias) {
MemoryDef *MemDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(V));
EXPECT_EQ(MemDef->isOptimized(), true)
<< "Store " << I << " was not optimized";
if (I == 1 || I == 3 || I == 4)
EXPECT_EQ(MemDef->getOptimizedAccessType().value(), AliasResult::MayAlias)
<< "Store " << I << " doesn't have the correct alias information";
else if (I == 0 || I == 2)
EXPECT_EQ(MemDef->getOptimizedAccessType(), None)
<< "Store " << I << " doesn't have the correct alias information";
else
EXPECT_EQ(MemDef->getOptimizedAccessType().value(),
AliasResult::MustAlias)
<< "Store " << I << " doesn't have the correct alias information";
// EXPECT_EQ expands such that if we increment I above, it won't get
// incremented except when we try to print the error message.
++I;