[Attributor] Keep loads feeding in `llvm.assume` if stores stays

If a load is only used by an `llvm.assume` and the stores feeding into
the load are not removable, keep the load.
This commit is contained in:
Johannes Doerfert 2022-03-15 12:32:28 -05:00
parent 6847081160
commit c42aa1be74
5 changed files with 186 additions and 121 deletions

View File

@ -204,18 +204,20 @@ bool getAssumedUnderlyingObjects(Attributor &A, const Value &Ptr,
/// Collect all potential values \p LI could read into \p PotentialValues. That
/// is, the only values read by \p LI are assumed to be known and all are in
/// \p PotentialValues. Dependences onto \p QueryingAA are properly tracked,
/// \p UsedAssumedInformation will inform the caller if assumed information was
/// \p PotentialValues. \p PotentialValueOrigins will contain all the
/// instructions that might have put a potential value into \p PotentialValues.
/// Dependences onto \p QueryingAA are properly tracked, \p
/// UsedAssumedInformation will inform the caller if assumed information was
/// used.
///
/// \returns True if the assumed potential copies are all in \p PotentialValues,
/// false if something went wrong and the copies could not be
/// determined.
bool getPotentiallyLoadedValues(Attributor &A, LoadInst &LI,
SmallSetVector<Value *, 4> &PotentialValues,
const AbstractAttribute &QueryingAA,
bool &UsedAssumedInformation,
bool OnlyExact = false);
bool getPotentiallyLoadedValues(
Attributor &A, LoadInst &LI, SmallSetVector<Value *, 4> &PotentialValues,
SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
bool OnlyExact = false);
/// Collect all potential values of the one stored by \p SI into
/// \p PotentialCopies. That is, the only copies that were made via the
@ -1057,6 +1059,10 @@ struct InformationCache {
return FI.CalledViaMustTail || FI.ContainsMustTailCall;
}
bool isOnlyUsedByAssume(const Instruction &I) const {
return AssumeOnlyValues.contains(&I);
}
/// Return the analysis result from a pass \p AP for function \p F.
template <typename AP>
typename AP::Result *getAnalysisResultForFunction(const Function &F) {
@ -1149,6 +1155,9 @@ private:
/// A map with knowledge retained in `llvm.assume` instructions.
RetainedKnowledgeMap KnowledgeMap;
/// A container for all instructions that are only used by `llvm.assume`.
SetVector<const Instruction *> AssumeOnlyValues;
/// Getters for analysis.
AnalysisGetter &AG;
@ -3412,6 +3421,12 @@ protected:
/// Returns true if \p I is known dead.
virtual bool isKnownDead(const Instruction *I) const = 0;
/// Return true if the underlying value is a store that is known to be
/// removable. This is different from dead stores as the removable store
/// can have an effect on live values, especially loads, but that effect
/// is propagated which allows us to remove the store in turn.
virtual bool isRemovableStore() const { return false; }
/// This method is used to check if at least one instruction in a collection
/// of instructions is live.
template <typename T> bool isLiveInstSet(T begin, T end) const {

View File

@ -322,11 +322,11 @@ AA::combineOptionalValuesInAAValueLatice(const Optional<Value *> &A,
}
template <bool IsLoad, typename Ty>
static bool
getPotentialCopiesOfMemoryValue(Attributor &A, Ty &I,
SmallSetVector<Value *, 4> &PotentialCopies,
const AbstractAttribute &QueryingAA,
bool &UsedAssumedInformation, bool OnlyExact) {
static bool getPotentialCopiesOfMemoryValue(
Attributor &A, Ty &I, SmallSetVector<Value *, 4> &PotentialCopies,
SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
bool OnlyExact) {
LLVM_DEBUG(dbgs() << "Trying to determine the potential copies of " << I
<< " (only exact: " << OnlyExact << ")\n";);
@ -344,6 +344,7 @@ getPotentialCopiesOfMemoryValue(Attributor &A, Ty &I,
// dependences and potential copies in the provided container.
SmallVector<const AAPointerInfo *> PIs;
SmallVector<Value *> NewCopies;
SmallVector<Instruction *> NewCopyOrigins;
const auto *TLI =
A.getInfoCache().getTargetLibraryInfoForFunction(*I.getFunction());
@ -383,6 +384,7 @@ getPotentialCopiesOfMemoryValue(Attributor &A, Ty &I,
if (!InitialValue)
return false;
NewCopies.push_back(InitialValue);
NewCopyOrigins.push_back(nullptr);
}
auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) {
@ -400,6 +402,7 @@ getPotentialCopiesOfMemoryValue(Attributor &A, Ty &I,
assert(isa<LoadInst>(I) && "Expected load or store instruction only!");
if (!Acc.isWrittenValueUnknown()) {
NewCopies.push_back(Acc.getWrittenValue());
NewCopyOrigins.push_back(Acc.getRemoteInst());
return true;
}
auto *SI = dyn_cast<StoreInst>(Acc.getRemoteInst());
@ -410,6 +413,7 @@ getPotentialCopiesOfMemoryValue(Attributor &A, Ty &I,
return false;
}
NewCopies.push_back(SI->getValueOperand());
NewCopyOrigins.push_back(SI);
} else {
assert(isa<StoreInst>(I) && "Expected load or store instruction only!");
auto *LI = dyn_cast<LoadInst>(Acc.getRemoteInst());
@ -445,25 +449,29 @@ getPotentialCopiesOfMemoryValue(Attributor &A, Ty &I,
A.recordDependence(*PI, QueryingAA, DepClassTy::OPTIONAL);
}
PotentialCopies.insert(NewCopies.begin(), NewCopies.end());
PotentialValueOrigins.insert(NewCopyOrigins.begin(), NewCopyOrigins.end());
return true;
}
bool AA::getPotentiallyLoadedValues(Attributor &A, LoadInst &LI,
SmallSetVector<Value *, 4> &PotentialValues,
const AbstractAttribute &QueryingAA,
bool &UsedAssumedInformation,
bool OnlyExact) {
bool AA::getPotentiallyLoadedValues(
Attributor &A, LoadInst &LI, SmallSetVector<Value *, 4> &PotentialValues,
SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
bool OnlyExact) {
return getPotentialCopiesOfMemoryValue</* IsLoad */ true>(
A, LI, PotentialValues, QueryingAA, UsedAssumedInformation, OnlyExact);
A, LI, PotentialValues, PotentialValueOrigins, QueryingAA,
UsedAssumedInformation, OnlyExact);
}
bool AA::getPotentialCopiesOfStoredValue(
Attributor &A, StoreInst &SI, SmallSetVector<Value *, 4> &PotentialCopies,
const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
bool OnlyExact) {
SmallSetVector<Instruction *, 4> PotentialValueOrigins;
return getPotentialCopiesOfMemoryValue</* IsLoad */ false>(
A, SI, PotentialCopies, QueryingAA, UsedAssumedInformation, OnlyExact);
A, SI, PotentialCopies, PotentialValueOrigins, QueryingAA,
UsedAssumedInformation, OnlyExact);
}
static bool isAssumedReadOnlyOrReadNone(Attributor &A, const IRPosition &IRP,
@ -1152,6 +1160,19 @@ bool Attributor::isAssumedDead(const Use &U,
BasicBlock *IncomingBB = PHI->getIncomingBlock(U);
return isAssumedDead(*IncomingBB->getTerminator(), QueryingAA, FnLivenessAA,
UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
} else if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
if (!CheckBBLivenessOnly && SI->getPointerOperand() != U.get()) {
const IRPosition IRP = IRPosition::inst(*SI);
const AAIsDead &IsDeadAA =
getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
if (IsDeadAA.isRemovableStore()) {
if (QueryingAA)
recordDependence(IsDeadAA, *QueryingAA, DepClass);
if (!IsDeadAA.isKnown(AAIsDead::IS_REMOVABLE))
UsedAssumedInformation = true;
return true;
}
}
}
return isAssumedDead(IRPosition::inst(*UserI), QueryingAA, FnLivenessAA,
@ -2655,6 +2676,30 @@ void InformationCache::initializeInformationCache(const Function &CF,
// queried by abstract attributes during their initialization or update.
// This has to happen before we create attributes.
DenseMap<const Value *, Optional<short>> AssumeUsesMap;
// Add \p V to the assume uses map which track the number of uses outside of
// "visited" assumes. If no outside uses are left the value is added to the
// assume only use vector.
auto AddToAssumeUsesMap = [&](const Value &V) -> void {
SmallVector<const Instruction *> Worklist;
if (auto *I = dyn_cast<Instruction>(&V))
Worklist.push_back(I);
while (!Worklist.empty()) {
const Instruction *I = Worklist.pop_back_val();
Optional<short> &NumUses = AssumeUsesMap[I];
if (!NumUses.hasValue())
NumUses = I->getNumUses();
NumUses = NumUses.getValue() - /* this assume */ 1;
if (NumUses.getValue() != 0)
continue;
AssumeOnlyValues.insert(I);
for (const Value *Op : I->operands())
if (auto *OpI = dyn_cast<Instruction>(Op))
Worklist.push_back(OpI);
}
};
for (Instruction &I : instructions(&F)) {
bool IsInterestingOpcode = false;
@ -2675,6 +2720,7 @@ void InformationCache::initializeInformationCache(const Function &CF,
// For `must-tail` calls we remember the caller and callee.
if (auto *Assume = dyn_cast<AssumeInst>(&I)) {
fillMapFromAssume(*Assume, KnowledgeMap);
AddToAssumeUsesMap(*Assume->getArgOperand(0));
} else if (cast<CallInst>(I).isMustTailCall()) {
FI.ContainsMustTailCall = true;
if (const Function *Callee = cast<CallInst>(I).getCalledFunction())

View File

@ -409,9 +409,11 @@ static bool genericValueTraversal(
// will simply end up here again. The load is as far as we can make it.
if (LI->getPointerOperand() != InitialV) {
SmallSetVector<Value *, 4> PotentialCopies;
if (AA::getPotentiallyLoadedValues(A, *LI, PotentialCopies, QueryingAA,
UsedAssumedInformation,
/* OnlyExact */ true)) {
SmallSetVector<Instruction *, 4> PotentialValueOrigins;
if (AA::getPotentiallyLoadedValues(A, *LI, PotentialCopies,
PotentialValueOrigins, QueryingAA,
UsedAssumedInformation,
/* OnlyExact */ true)) {
// Values have to be dynamically unique or we loose the fact that a
// single llvm::Value might represent two runtime values (e.g., stack
// locations in different recursive calls).
@ -421,9 +423,9 @@ static bool genericValueTraversal(
});
if (DynamicallyUnique &&
(!Intraprocedural || !CtxI ||
llvm::all_of(PotentialCopies, [CtxI](Value *PC) {
return AA::isValidInScope(*PC, CtxI->getFunction());
}))) {
llvm::all_of(PotentialCopies, [CtxI](Value *PC) {
return AA::isValidInScope(*PC, CtxI->getFunction());
}))) {
for (auto *PotentialCopy : PotentialCopies)
Worklist.push_back({PotentialCopy, CtxI});
continue;
@ -3611,6 +3613,10 @@ struct AAIsDeadFloating : public AAIsDeadValueImpl {
return ChangeStatus::UNCHANGED;
}
bool isRemovableStore() const override {
return isAssumed(IS_REMOVABLE) && isa<StoreInst>(&getAssociatedValue());
}
/// See AbstractAttribute::manifest(...).
ChangeStatus manifest(Attributor &A) override {
Value &V = getAssociatedValue();
@ -5670,6 +5676,36 @@ struct AAValueSimplifyFloating : AAValueSimplifyImpl {
ChangeStatus updateImpl(Attributor &A) override {
auto Before = SimplifiedAssociatedValue;
// Do not simplify loads that are only used in llvm.assume if we cannot also
// remove all stores that may feed into the load. The reason is that the
// assume is probably worth something as long as the stores are around.
if (auto *LI = dyn_cast<LoadInst>(&getAssociatedValue())) {
InformationCache &InfoCache = A.getInfoCache();
if (InfoCache.isOnlyUsedByAssume(*LI)) {
SmallSetVector<Value *, 4> PotentialCopies;
SmallSetVector<Instruction *, 4> PotentialValueOrigins;
bool UsedAssumedInformation = false;
if (AA::getPotentiallyLoadedValues(A, *LI, PotentialCopies,
PotentialValueOrigins, *this,
UsedAssumedInformation,
/* OnlyExact */ true)) {
if (!llvm::all_of(PotentialValueOrigins, [&](Instruction *I) {
if (!I)
return true;
if (auto *SI = dyn_cast<StoreInst>(I))
return A.isAssumedDead(SI->getOperandUse(0), this,
/* LivenessAA */ nullptr,
UsedAssumedInformation,
/* CheckBBLivenessOnly */ false);
return A.isAssumedDead(*I, this, /* LivenessAA */ nullptr,
UsedAssumedInformation,
/* CheckBBLivenessOnly */ false);
}))
return indicatePessimisticFixpoint();
}
}
}
auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
bool Stripped) -> bool {
auto &AA = A.getAAFor<AAValueSimplify>(

View File

@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --check-attributes --check-globals
; RUN: opt -attributor -enable-new-pm=0 -attributor-manifest-internal -attributor-max-iterations-verify -attributor-annotate-decl-cs -attributor-max-iterations=4 -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_CGSCC_NPM,NOT_CGSCC_OPM,NOT_TUNIT_NPM,IS__TUNIT____,IS________OPM,IS__TUNIT_OPM
; RUN: opt -aa-pipeline=basic-aa -passes=attributor -attributor-manifest-internal -attributor-max-iterations-verify -attributor-annotate-decl-cs -attributor-max-iterations=4 -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_CGSCC_OPM,NOT_CGSCC_NPM,NOT_TUNIT_OPM,IS__TUNIT____,IS________NPM,IS__TUNIT_NPM
; RUN: opt -attributor -enable-new-pm=0 -attributor-manifest-internal -attributor-max-iterations-verify -attributor-annotate-decl-cs -attributor-max-iterations=6 -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_CGSCC_NPM,NOT_CGSCC_OPM,NOT_TUNIT_NPM,IS__TUNIT____,IS________OPM,IS__TUNIT_OPM
; RUN: opt -aa-pipeline=basic-aa -passes=attributor -attributor-manifest-internal -attributor-max-iterations-verify -attributor-annotate-decl-cs -attributor-max-iterations=6 -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_CGSCC_OPM,NOT_CGSCC_NPM,NOT_TUNIT_OPM,IS__TUNIT____,IS________NPM,IS__TUNIT_NPM
; RUN: opt -attributor-cgscc -enable-new-pm=0 -attributor-manifest-internal -attributor-annotate-decl-cs -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_TUNIT_NPM,NOT_TUNIT_OPM,NOT_CGSCC_NPM,IS__CGSCC____,IS________OPM,IS__CGSCC_OPM
; RUN: opt -aa-pipeline=basic-aa -passes=attributor-cgscc -attributor-manifest-internal -attributor-annotate-decl-cs -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_TUNIT_NPM,NOT_TUNIT_OPM,NOT_CGSCC_OPM,IS__CGSCC____,IS________NPM,IS__CGSCC_NPM

View File

@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --check-attributes --check-globals
; RUN: opt -attributor -enable-new-pm=0 -attributor-manifest-internal -attributor-max-iterations-verify -attributor-annotate-decl-cs -attributor-max-iterations=3 -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_CGSCC_NPM,NOT_CGSCC_OPM,NOT_TUNIT_NPM,IS__TUNIT____,IS________OPM,IS__TUNIT_OPM
; RUN: opt -aa-pipeline=basic-aa -passes=attributor -attributor-manifest-internal -attributor-max-iterations-verify -attributor-annotate-decl-cs -attributor-max-iterations=3 -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_CGSCC_OPM,NOT_CGSCC_NPM,NOT_TUNIT_OPM,IS__TUNIT____,IS________NPM,IS__TUNIT_NPM
; RUN: opt -attributor -enable-new-pm=0 -attributor-manifest-internal -attributor-max-iterations-verify -attributor-annotate-decl-cs -attributor-max-iterations=4 -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_CGSCC_NPM,NOT_CGSCC_OPM,NOT_TUNIT_NPM,IS__TUNIT____,IS________OPM,IS__TUNIT_OPM
; RUN: opt -aa-pipeline=basic-aa -passes=attributor -attributor-manifest-internal -attributor-max-iterations-verify -attributor-annotate-decl-cs -attributor-max-iterations=4 -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_CGSCC_OPM,NOT_CGSCC_NPM,NOT_TUNIT_OPM,IS__TUNIT____,IS________NPM,IS__TUNIT_NPM
; RUN: opt -attributor-cgscc -enable-new-pm=0 -attributor-manifest-internal -attributor-annotate-decl-cs -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_TUNIT_NPM,NOT_TUNIT_OPM,NOT_CGSCC_NPM,IS__CGSCC____,IS________OPM,IS__CGSCC_OPM
; RUN: opt -aa-pipeline=basic-aa -passes=attributor-cgscc -attributor-manifest-internal -attributor-annotate-decl-cs -S < %s | FileCheck %s --check-prefixes=CHECK,NOT_TUNIT_NPM,NOT_TUNIT_OPM,NOT_CGSCC_OPM,IS__CGSCC____,IS________NPM,IS__CGSCC_NPM
@ -244,7 +244,8 @@ define void @assume_1b_nr(i1 %arg, i1 %cond) norecurse {
; CHECK-SAME: (i1 [[ARG:%.*]], i1 [[COND:%.*]]) #[[ATTR3]] {
; CHECK-NEXT: [[STACK:%.*]] = alloca i1, align 1
; CHECK-NEXT: store i1 [[ARG]], i1* [[STACK]], align 1
; CHECK-NEXT: call void @llvm.assume(i1 noundef [[ARG]]) #[[ATTR4]]
; CHECK-NEXT: [[L:%.*]] = load i1, i1* [[STACK]], align 1
; CHECK-NEXT: call void @llvm.assume(i1 noundef [[L]]) #[[ATTR4]]
; CHECK-NEXT: br i1 [[COND]], label [[T:%.*]], label [[F:%.*]]
; CHECK: t:
; CHECK-NEXT: br label [[M:%.*]]
@ -340,7 +341,8 @@ define i1 @assume_3_nr(i1 %arg, i1 %cond) norecurse {
; CHECK-SAME: (i1 [[ARG:%.*]], i1 [[COND:%.*]]) #[[ATTR3]] {
; CHECK-NEXT: [[STACK:%.*]] = alloca i1, align 1
; CHECK-NEXT: store i1 [[ARG]], i1* [[STACK]], align 1
; CHECK-NEXT: call void @llvm.assume(i1 noundef [[ARG]]) #[[ATTR4]]
; CHECK-NEXT: [[L:%.*]] = load i1, i1* [[STACK]], align 1
; CHECK-NEXT: call void @llvm.assume(i1 noundef [[L]]) #[[ATTR4]]
; CHECK-NEXT: br i1 [[COND]], label [[T:%.*]], label [[F:%.*]]
; CHECK: t:
; CHECK-NEXT: store i1 true, i1* [[STACK]], align 1
@ -404,49 +406,29 @@ m:
}
define i1 @assume_5_nr(i1 %arg, i1 %cond) norecurse {
; IS________OPM: Function Attrs: inaccessiblememonly nofree norecurse nosync nounwind willreturn
; IS________OPM-LABEL: define {{[^@]+}}@assume_5_nr
; IS________OPM-SAME: (i1 [[ARG:%.*]], i1 [[COND:%.*]]) #[[ATTR3]] {
; IS________OPM-NEXT: [[STACK:%.*]] = alloca i1, align 1
; IS________OPM-NEXT: store i1 [[ARG]], i1* [[STACK]], align 1
; IS________OPM-NEXT: call void @llvm.assume(i1 noundef [[ARG]]) #[[ATTR4]]
; IS________OPM-NEXT: br i1 [[COND]], label [[T:%.*]], label [[F:%.*]]
; IS________OPM: t:
; IS________OPM-NEXT: store i1 true, i1* [[STACK]], align 1
; IS________OPM-NEXT: [[L2:%.*]] = load i1, i1* [[STACK]], align 1
; IS________OPM-NEXT: call void @llvm.assume(i1 noundef [[L2]]) #[[ATTR4]]
; IS________OPM-NEXT: br label [[M:%.*]]
; IS________OPM: f:
; IS________OPM-NEXT: store i1 false, i1* [[STACK]], align 1
; IS________OPM-NEXT: [[L3:%.*]] = load i1, i1* [[STACK]], align 1
; IS________OPM-NEXT: call void @llvm.assume(i1 noundef [[L3]]) #[[ATTR4]]
; IS________OPM-NEXT: br label [[M]]
; IS________OPM: m:
; IS________OPM-NEXT: [[L4:%.*]] = load i1, i1* [[STACK]], align 1
; IS________OPM-NEXT: call void @llvm.assume(i1 noundef [[L4]]) #[[ATTR4]]
; IS________OPM-NEXT: [[R:%.*]] = call i1 @readI1p(i1* noalias nocapture nofree noundef nonnull readonly dereferenceable(1) [[STACK]]) #[[ATTR5]]
; IS________OPM-NEXT: ret i1 [[R]]
;
; IS________NPM: Function Attrs: inaccessiblememonly nofree norecurse nosync nounwind willreturn
; IS________NPM-LABEL: define {{[^@]+}}@assume_5_nr
; IS________NPM-SAME: (i1 [[ARG:%.*]], i1 [[COND:%.*]]) #[[ATTR3]] {
; IS________NPM-NEXT: [[STACK:%.*]] = alloca i1, align 1
; IS________NPM-NEXT: store i1 [[ARG]], i1* [[STACK]], align 1
; IS________NPM-NEXT: call void @llvm.assume(i1 noundef [[ARG]]) #[[ATTR4]]
; IS________NPM-NEXT: br i1 [[COND]], label [[T:%.*]], label [[F:%.*]]
; IS________NPM: t:
; IS________NPM-NEXT: store i1 true, i1* [[STACK]], align 1
; IS________NPM-NEXT: call void @llvm.assume(i1 noundef true) #[[ATTR4]]
; IS________NPM-NEXT: br label [[M:%.*]]
; IS________NPM: f:
; IS________NPM-NEXT: store i1 false, i1* [[STACK]], align 1
; IS________NPM-NEXT: call void @llvm.assume(i1 noundef false) #[[ATTR4]]
; IS________NPM-NEXT: br label [[M]]
; IS________NPM: m:
; IS________NPM-NEXT: [[L4:%.*]] = load i1, i1* [[STACK]], align 1
; IS________NPM-NEXT: call void @llvm.assume(i1 noundef [[L4]]) #[[ATTR4]]
; IS________NPM-NEXT: [[R:%.*]] = call i1 @readI1p(i1* noalias nocapture nofree noundef nonnull readonly dereferenceable(1) [[STACK]]) #[[ATTR5]]
; IS________NPM-NEXT: ret i1 [[R]]
; CHECK: Function Attrs: inaccessiblememonly nofree norecurse nosync nounwind willreturn
; CHECK-LABEL: define {{[^@]+}}@assume_5_nr
; CHECK-SAME: (i1 [[ARG:%.*]], i1 [[COND:%.*]]) #[[ATTR3]] {
; CHECK-NEXT: [[STACK:%.*]] = alloca i1, align 1
; CHECK-NEXT: store i1 [[ARG]], i1* [[STACK]], align 1
; CHECK-NEXT: [[L1:%.*]] = load i1, i1* [[STACK]], align 1
; CHECK-NEXT: call void @llvm.assume(i1 noundef [[L1]]) #[[ATTR4]]
; CHECK-NEXT: br i1 [[COND]], label [[T:%.*]], label [[F:%.*]]
; CHECK: t:
; CHECK-NEXT: store i1 true, i1* [[STACK]], align 1
; CHECK-NEXT: [[L2:%.*]] = load i1, i1* [[STACK]], align 1
; CHECK-NEXT: call void @llvm.assume(i1 noundef [[L2]]) #[[ATTR4]]
; CHECK-NEXT: br label [[M:%.*]]
; CHECK: f:
; CHECK-NEXT: store i1 false, i1* [[STACK]], align 1
; CHECK-NEXT: [[L3:%.*]] = load i1, i1* [[STACK]], align 1
; CHECK-NEXT: call void @llvm.assume(i1 noundef [[L3]]) #[[ATTR4]]
; CHECK-NEXT: br label [[M]]
; CHECK: m:
; CHECK-NEXT: [[L4:%.*]] = load i1, i1* [[STACK]], align 1
; CHECK-NEXT: call void @llvm.assume(i1 noundef [[L4]]) #[[ATTR4]]
; CHECK-NEXT: [[R:%.*]] = call i1 @readI1p(i1* noalias nocapture nofree noundef nonnull readonly dereferenceable(1) [[STACK]]) #[[ATTR5]]
; CHECK-NEXT: ret i1 [[R]]
;
%stack = alloca i1
store i1 %arg, i1* %stack
@ -471,48 +453,29 @@ m:
}
define i1 @assume_5c_nr(i1 %cond) norecurse {
; IS________OPM: Function Attrs: inaccessiblememonly nofree norecurse nosync nounwind willreturn
; IS________OPM-LABEL: define {{[^@]+}}@assume_5c_nr
; IS________OPM-SAME: (i1 [[COND:%.*]]) #[[ATTR3]] {
; IS________OPM-NEXT: [[STACK:%.*]] = alloca i1, align 1
; IS________OPM-NEXT: store i1 true, i1* [[STACK]], align 1
; IS________OPM-NEXT: call void @llvm.assume(i1 noundef true) #[[ATTR4]]
; IS________OPM-NEXT: br i1 [[COND]], label [[T:%.*]], label [[F:%.*]]
; IS________OPM: t:
; IS________OPM-NEXT: store i1 true, i1* [[STACK]], align 1
; IS________OPM-NEXT: call void @llvm.assume(i1 noundef true) #[[ATTR4]]
; IS________OPM-NEXT: br label [[M:%.*]]
; IS________OPM: f:
; IS________OPM-NEXT: store i1 false, i1* [[STACK]], align 1
; IS________OPM-NEXT: [[L3:%.*]] = load i1, i1* [[STACK]], align 1
; IS________OPM-NEXT: call void @llvm.assume(i1 noundef [[L3]]) #[[ATTR4]]
; IS________OPM-NEXT: br label [[M]]
; IS________OPM: m:
; IS________OPM-NEXT: [[L4:%.*]] = load i1, i1* [[STACK]], align 1
; IS________OPM-NEXT: call void @llvm.assume(i1 noundef [[L4]]) #[[ATTR4]]
; IS________OPM-NEXT: [[R:%.*]] = call i1 @readI1p(i1* noalias nocapture nofree noundef nonnull readonly dereferenceable(1) [[STACK]]) #[[ATTR5]]
; IS________OPM-NEXT: ret i1 [[R]]
;
; IS________NPM: Function Attrs: inaccessiblememonly nofree norecurse nosync nounwind willreturn
; IS________NPM-LABEL: define {{[^@]+}}@assume_5c_nr
; IS________NPM-SAME: (i1 [[COND:%.*]]) #[[ATTR3]] {
; IS________NPM-NEXT: [[STACK:%.*]] = alloca i1, align 1
; IS________NPM-NEXT: store i1 true, i1* [[STACK]], align 1
; IS________NPM-NEXT: call void @llvm.assume(i1 noundef true) #[[ATTR4]]
; IS________NPM-NEXT: br i1 [[COND]], label [[T:%.*]], label [[F:%.*]]
; IS________NPM: t:
; IS________NPM-NEXT: store i1 true, i1* [[STACK]], align 1
; IS________NPM-NEXT: call void @llvm.assume(i1 noundef true) #[[ATTR4]]
; IS________NPM-NEXT: br label [[M:%.*]]
; IS________NPM: f:
; IS________NPM-NEXT: store i1 false, i1* [[STACK]], align 1
; IS________NPM-NEXT: call void @llvm.assume(i1 noundef false) #[[ATTR4]]
; IS________NPM-NEXT: br label [[M]]
; IS________NPM: m:
; IS________NPM-NEXT: [[L4:%.*]] = load i1, i1* [[STACK]], align 1
; IS________NPM-NEXT: call void @llvm.assume(i1 noundef [[L4]]) #[[ATTR4]]
; IS________NPM-NEXT: [[R:%.*]] = call i1 @readI1p(i1* noalias nocapture nofree noundef nonnull readonly dereferenceable(1) [[STACK]]) #[[ATTR5]]
; IS________NPM-NEXT: ret i1 [[R]]
; CHECK: Function Attrs: inaccessiblememonly nofree norecurse nosync nounwind willreturn
; CHECK-LABEL: define {{[^@]+}}@assume_5c_nr
; CHECK-SAME: (i1 [[COND:%.*]]) #[[ATTR3]] {
; CHECK-NEXT: [[STACK:%.*]] = alloca i1, align 1
; CHECK-NEXT: store i1 true, i1* [[STACK]], align 1
; CHECK-NEXT: [[L1:%.*]] = load i1, i1* [[STACK]], align 1
; CHECK-NEXT: call void @llvm.assume(i1 noundef [[L1]]) #[[ATTR4]]
; CHECK-NEXT: br i1 [[COND]], label [[T:%.*]], label [[F:%.*]]
; CHECK: t:
; CHECK-NEXT: store i1 true, i1* [[STACK]], align 1
; CHECK-NEXT: [[L2:%.*]] = load i1, i1* [[STACK]], align 1
; CHECK-NEXT: call void @llvm.assume(i1 noundef [[L2]]) #[[ATTR4]]
; CHECK-NEXT: br label [[M:%.*]]
; CHECK: f:
; CHECK-NEXT: store i1 false, i1* [[STACK]], align 1
; CHECK-NEXT: [[L3:%.*]] = load i1, i1* [[STACK]], align 1
; CHECK-NEXT: call void @llvm.assume(i1 noundef [[L3]]) #[[ATTR4]]
; CHECK-NEXT: br label [[M]]
; CHECK: m:
; CHECK-NEXT: [[L4:%.*]] = load i1, i1* [[STACK]], align 1
; CHECK-NEXT: call void @llvm.assume(i1 noundef [[L4]]) #[[ATTR4]]
; CHECK-NEXT: [[R:%.*]] = call i1 @readI1p(i1* noalias nocapture nofree noundef nonnull readonly dereferenceable(1) [[STACK]]) #[[ATTR5]]
; CHECK-NEXT: ret i1 [[R]]
;
%stack = alloca i1
store i1 true, i1* %stack
@ -750,7 +713,8 @@ define void @assume_1b(i1 %arg, i1 %cond) {
; CHECK-SAME: (i1 [[ARG:%.*]], i1 [[COND:%.*]]) #[[ATTR3]] {
; CHECK-NEXT: [[STACK:%.*]] = alloca i1, align 1
; CHECK-NEXT: store i1 [[ARG]], i1* [[STACK]], align 1
; CHECK-NEXT: call void @llvm.assume(i1 noundef [[ARG]]) #[[ATTR4]]
; CHECK-NEXT: [[L:%.*]] = load i1, i1* [[STACK]], align 1
; CHECK-NEXT: call void @llvm.assume(i1 noundef [[L]]) #[[ATTR4]]
; CHECK-NEXT: br i1 [[COND]], label [[T:%.*]], label [[F:%.*]]
; CHECK: t:
; CHECK-NEXT: br label [[M:%.*]]
@ -846,7 +810,8 @@ define i1 @assume_3(i1 %arg, i1 %cond) {
; CHECK-SAME: (i1 [[ARG:%.*]], i1 [[COND:%.*]]) #[[ATTR3]] {
; CHECK-NEXT: [[STACK:%.*]] = alloca i1, align 1
; CHECK-NEXT: store i1 [[ARG]], i1* [[STACK]], align 1
; CHECK-NEXT: call void @llvm.assume(i1 noundef [[ARG]]) #[[ATTR4]]
; CHECK-NEXT: [[L:%.*]] = load i1, i1* [[STACK]], align 1
; CHECK-NEXT: call void @llvm.assume(i1 noundef [[L]]) #[[ATTR4]]
; CHECK-NEXT: br i1 [[COND]], label [[T:%.*]], label [[F:%.*]]
; CHECK: t:
; CHECK-NEXT: store i1 true, i1* [[STACK]], align 1
@ -915,7 +880,8 @@ define i1 @assume_5(i1 %arg, i1 %cond) {
; CHECK-SAME: (i1 [[ARG:%.*]], i1 [[COND:%.*]]) #[[ATTR3]] {
; CHECK-NEXT: [[STACK:%.*]] = alloca i1, align 1
; CHECK-NEXT: store i1 [[ARG]], i1* [[STACK]], align 1
; CHECK-NEXT: call void @llvm.assume(i1 noundef [[ARG]]) #[[ATTR4]]
; CHECK-NEXT: [[L1:%.*]] = load i1, i1* [[STACK]], align 1
; CHECK-NEXT: call void @llvm.assume(i1 noundef [[L1]]) #[[ATTR4]]
; CHECK-NEXT: br i1 [[COND]], label [[T:%.*]], label [[F:%.*]]
; CHECK: t:
; CHECK-NEXT: store i1 true, i1* [[STACK]], align 1
@ -961,11 +927,13 @@ define i1 @assume_5c(i1 %cond) {
; CHECK-SAME: (i1 [[COND:%.*]]) #[[ATTR3]] {
; CHECK-NEXT: [[STACK:%.*]] = alloca i1, align 1
; CHECK-NEXT: store i1 true, i1* [[STACK]], align 1
; CHECK-NEXT: call void @llvm.assume(i1 noundef true) #[[ATTR4]]
; CHECK-NEXT: [[L1:%.*]] = load i1, i1* [[STACK]], align 1
; CHECK-NEXT: call void @llvm.assume(i1 noundef [[L1]]) #[[ATTR4]]
; CHECK-NEXT: br i1 [[COND]], label [[T:%.*]], label [[F:%.*]]
; CHECK: t:
; CHECK-NEXT: store i1 true, i1* [[STACK]], align 1
; CHECK-NEXT: call void @llvm.assume(i1 noundef true) #[[ATTR4]]
; CHECK-NEXT: [[L2:%.*]] = load i1, i1* [[STACK]], align 1
; CHECK-NEXT: call void @llvm.assume(i1 noundef [[L2]]) #[[ATTR4]]
; CHECK-NEXT: br label [[M:%.*]]
; CHECK: f:
; CHECK-NEXT: store i1 false, i1* [[STACK]], align 1