forked from OSchip/llvm-project
[C++11] Add range based accessors for the Use-Def chain of a Value.
This requires a number of steps. 1) Move value_use_iterator into the Value class as an implementation detail 2) Change it to actually be a *Use* iterator rather than a *User* iterator. 3) Add an adaptor which is a User iterator that always looks through the Use to the User. 4) Wrap these in Value::use_iterator and Value::user_iterator typedefs. 5) Add the range adaptors as Value::uses() and Value::users(). 6) Update *all* of the callers to correctly distinguish between whether they wanted a use_iterator (and to explicitly dig out the User when needed), or a user_iterator which makes the Use itself totally opaque. Because #6 requires churning essentially everything that walked the Use-Def chains, I went ahead and added all of the range adaptors and switched them to range-based loops where appropriate. Also because the renaming requires at least churning every line of code, it didn't make any sense to split these up into multiple commits -- all of which would touch all of the same lies of code. The result is still not quite optimal. The Value::use_iterator is a nice regular iterator, but Value::user_iterator is an iterator over User*s rather than over the User objects themselves. As a consequence, it fits a bit awkwardly into the range-based world and it has the weird extra-dereferencing 'operator->' that so many of our iterators have. I think this could be fixed by providing something which transforms a range of T&s into a range of T*s, but that *can* be separated into another patch, and it isn't yet 100% clear whether this is the right move. However, this change gets us most of the benefit and cleans up a substantial amount of code around Use and User. =] llvm-svn: 203364
This commit is contained in:
parent
c980afc578
commit
cdf4788401
|
@ -44,10 +44,10 @@ public:
|
|||
typedef typename super::reference reference;
|
||||
|
||||
PredIterator() {}
|
||||
explicit inline PredIterator(Ptr *bb) : It(bb->use_begin()) {
|
||||
explicit inline PredIterator(Ptr *bb) : It(bb->user_begin()) {
|
||||
advancePastNonTerminators();
|
||||
}
|
||||
inline PredIterator(Ptr *bb, bool) : It(bb->use_end()) {}
|
||||
inline PredIterator(Ptr *bb, bool) : It(bb->user_end()) {}
|
||||
|
||||
inline bool operator==(const Self& x) const { return It == x.It; }
|
||||
inline bool operator!=(const Self& x) const { return !operator==(x); }
|
||||
|
@ -81,9 +81,9 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
typedef PredIterator<BasicBlock, Value::use_iterator> pred_iterator;
|
||||
typedef PredIterator<BasicBlock, Value::user_iterator> pred_iterator;
|
||||
typedef PredIterator<const BasicBlock,
|
||||
Value::const_use_iterator> const_pred_iterator;
|
||||
Value::const_user_iterator> const_pred_iterator;
|
||||
|
||||
inline pred_iterator pred_begin(BasicBlock *BB) { return pred_iterator(BB); }
|
||||
inline const_pred_iterator pred_begin(const BasicBlock *BB) {
|
||||
|
|
|
@ -103,11 +103,13 @@ public:
|
|||
|
||||
/// isCallee - Determine whether the passed iterator points to the
|
||||
/// callee operand's Use.
|
||||
///
|
||||
bool isCallee(Value::const_use_iterator UI) const {
|
||||
return getCallee() == &UI.getUse();
|
||||
bool isCallee(Value::const_user_iterator UI) const {
|
||||
return isCallee(&UI.getUse());
|
||||
}
|
||||
|
||||
/// Determine whether this Use is the callee operand's Use.
|
||||
bool isCallee(const Use *U) const { return getCallee() == U; }
|
||||
|
||||
ValTy *getArgument(unsigned ArgNo) const {
|
||||
assert(arg_begin() + ArgNo < arg_end() && "Argument # out of range!");
|
||||
return *(arg_begin() + ArgNo);
|
||||
|
@ -121,11 +123,17 @@ public:
|
|||
|
||||
/// Given a value use iterator, returns the argument that corresponds to it.
|
||||
/// Iterator must actually correspond to an argument.
|
||||
unsigned getArgumentNo(Value::const_use_iterator I) const {
|
||||
unsigned getArgumentNo(Value::const_user_iterator I) const {
|
||||
return getArgumentNo(&I.getUse());
|
||||
}
|
||||
|
||||
/// Given a use for an argument, get the argument number that corresponds to
|
||||
/// it.
|
||||
unsigned getArgumentNo(const Use *U) const {
|
||||
assert(getInstruction() && "Not a call or invoke instruction!");
|
||||
assert(arg_begin() <= &I.getUse() && &I.getUse() < arg_end()
|
||||
assert(arg_begin() <= U && U < arg_end()
|
||||
&& "Argument # out of range!");
|
||||
return &I.getUse() - arg_begin();
|
||||
return U - arg_begin();
|
||||
}
|
||||
|
||||
/// arg_iterator - The type of iterator to use when looping over actual
|
||||
|
|
|
@ -45,10 +45,10 @@ public:
|
|||
// Out of line virtual method, so the vtable, etc has a home.
|
||||
~Instruction();
|
||||
|
||||
/// use_back - Specialize the methods defined in Value, as we know that an
|
||||
/// user_back - Specialize the methods defined in Value, as we know that an
|
||||
/// instruction can only be used by other instructions.
|
||||
Instruction *use_back() { return cast<Instruction>(*use_begin());}
|
||||
const Instruction *use_back() const { return cast<Instruction>(*use_begin());}
|
||||
Instruction *user_back() { return cast<Instruction>(*user_begin());}
|
||||
const Instruction *user_back() const { return cast<Instruction>(*user_begin());}
|
||||
|
||||
inline const BasicBlock *getParent() const { return Parent; }
|
||||
inline BasicBlock *getParent() { return Parent; }
|
||||
|
|
|
@ -2100,7 +2100,7 @@ public:
|
|||
/// getIncomingBlock - Return incoming basic block corresponding
|
||||
/// to value use iterator.
|
||||
///
|
||||
BasicBlock *getIncomingBlock(Value::const_use_iterator I) const {
|
||||
BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
|
||||
return getIncomingBlock(I.getUse());
|
||||
}
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#define LLVM_IR_VALUE_H
|
||||
|
||||
#include "llvm-c/Core.h"
|
||||
#include "llvm/ADT/iterator_range.h"
|
||||
#include "llvm/IR/Use.h"
|
||||
#include "llvm/Support/CBindingWrapping.h"
|
||||
#include "llvm/Support/Casting.h"
|
||||
|
@ -75,13 +76,54 @@ protected:
|
|||
unsigned char SubclassOptionalData : 7;
|
||||
|
||||
private:
|
||||
template <typename UseT> // UseT == 'Use' or 'const Use'
|
||||
class use_iterator_impl
|
||||
: public std::iterator<std::forward_iterator_tag, UseT *, ptrdiff_t> {
|
||||
typedef std::iterator<std::forward_iterator_tag, UseT *, ptrdiff_t> super;
|
||||
|
||||
UseT *U;
|
||||
explicit use_iterator_impl(UseT *u) : U(u) {}
|
||||
friend class Value;
|
||||
|
||||
public:
|
||||
typedef typename super::reference reference;
|
||||
typedef typename super::pointer pointer;
|
||||
|
||||
use_iterator_impl() : U() {}
|
||||
|
||||
bool operator==(const use_iterator_impl &x) const { return U == x.U; }
|
||||
bool operator!=(const use_iterator_impl &x) const { return !operator==(x); }
|
||||
|
||||
use_iterator_impl &operator++() { // Preincrement
|
||||
assert(U && "Cannot increment end iterator!");
|
||||
U = U->getNext();
|
||||
return *this;
|
||||
}
|
||||
use_iterator_impl operator++(int) { // Postincrement
|
||||
auto tmp = *this;
|
||||
++*this;
|
||||
return tmp;
|
||||
}
|
||||
|
||||
UseT &operator*() const {
|
||||
assert(U && "Cannot dereference end iterator!");
|
||||
return *U;
|
||||
}
|
||||
|
||||
UseT *operator->() const { return &operator*(); }
|
||||
|
||||
operator use_iterator_impl<const UseT>() const {
|
||||
return use_iterator_impl<const UseT>(U);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename UserTy> // UserTy == 'User' or 'const User'
|
||||
class user_iterator_impl
|
||||
: public std::iterator<std::forward_iterator_tag, UserTy *, ptrdiff_t> {
|
||||
typedef std::iterator<std::forward_iterator_tag, UserTy *, ptrdiff_t> super;
|
||||
|
||||
Use *U;
|
||||
explicit user_iterator_impl(Use *u) : U(u) {}
|
||||
use_iterator_impl<Use> UI;
|
||||
explicit user_iterator_impl(Use *U) : UI(U) {}
|
||||
friend class Value;
|
||||
|
||||
public:
|
||||
|
@ -90,16 +132,14 @@ private:
|
|||
|
||||
user_iterator_impl() {}
|
||||
|
||||
bool operator==(const user_iterator_impl &x) const { return U == x.U; }
|
||||
bool operator==(const user_iterator_impl &x) const { return UI == x.UI; }
|
||||
bool operator!=(const user_iterator_impl &x) const { return !operator==(x); }
|
||||
|
||||
/// \brief Returns true if this iterator is equal to use_end() on the value.
|
||||
bool atEnd() const { return U == 0; }
|
||||
/// \brief Returns true if this iterator is equal to user_end() on the value.
|
||||
bool atEnd() const { return *this == user_iterator_impl(); }
|
||||
|
||||
// Iterator traversal: forward iteration only
|
||||
user_iterator_impl &operator++() { // Preincrement
|
||||
assert(U && "Cannot increment end iterator!");
|
||||
U = U->getNext();
|
||||
++UI;
|
||||
return *this;
|
||||
}
|
||||
user_iterator_impl operator++(int) { // Postincrement
|
||||
|
@ -110,21 +150,20 @@ private:
|
|||
|
||||
// Retrieve a pointer to the current User.
|
||||
UserTy *operator*() const {
|
||||
assert(U && "Cannot dereference end iterator!");
|
||||
return U->getUser();
|
||||
return UI->getUser();
|
||||
}
|
||||
|
||||
UserTy *operator->() const { return operator*(); }
|
||||
|
||||
operator user_iterator_impl<const UserTy>() const {
|
||||
return user_iterator_impl<const UserTy>(U);
|
||||
return user_iterator_impl<const UserTy>(*UI);
|
||||
}
|
||||
|
||||
Use &getUse() const { return *U; }
|
||||
Use &getUse() const { return *UI; }
|
||||
|
||||
/// \brief Return the operand # of this use in its User.
|
||||
/// FIXME: Replace all callers with a direct call to Use::getOperandNo.
|
||||
unsigned getOperandNo() const { return U->getOperandNo(); }
|
||||
unsigned getOperandNo() const { return UI->getOperandNo(); }
|
||||
};
|
||||
|
||||
/// SubclassData - This member is defined by this class, but is not used for
|
||||
|
@ -205,14 +244,33 @@ public:
|
|||
//
|
||||
bool use_empty() const { return UseList == 0; }
|
||||
|
||||
typedef user_iterator_impl<User> use_iterator;
|
||||
typedef user_iterator_impl<const User> const_use_iterator;
|
||||
typedef use_iterator_impl<Use> use_iterator;
|
||||
typedef use_iterator_impl<const Use> const_use_iterator;
|
||||
use_iterator use_begin() { return use_iterator(UseList); }
|
||||
const_use_iterator use_begin() const { return const_use_iterator(UseList); }
|
||||
use_iterator use_end() { return use_iterator(0); }
|
||||
const_use_iterator use_end() const { return const_use_iterator(0); }
|
||||
User *use_back() { return *use_begin(); }
|
||||
const User *use_back() const { return *use_begin(); }
|
||||
use_iterator use_end() { return use_iterator(); }
|
||||
const_use_iterator use_end() const { return const_use_iterator(); }
|
||||
iterator_range<use_iterator> uses() {
|
||||
return iterator_range<use_iterator>(use_begin(), use_end());
|
||||
}
|
||||
iterator_range<const_use_iterator> uses() const {
|
||||
return iterator_range<const_use_iterator>(use_begin(), use_end());
|
||||
}
|
||||
|
||||
typedef user_iterator_impl<User> user_iterator;
|
||||
typedef user_iterator_impl<const User> const_user_iterator;
|
||||
user_iterator user_begin() { return user_iterator(UseList); }
|
||||
const_user_iterator user_begin() const { return const_user_iterator(UseList); }
|
||||
user_iterator user_end() { return user_iterator(); }
|
||||
const_user_iterator user_end() const { return const_user_iterator(); }
|
||||
User *user_back() { return *user_begin(); }
|
||||
const User *user_back() const { return *user_begin(); }
|
||||
iterator_range<user_iterator> users() {
|
||||
return iterator_range<user_iterator>(user_begin(), user_end());
|
||||
}
|
||||
iterator_range<const_user_iterator> users() const {
|
||||
return iterator_range<const_user_iterator>(user_begin(), user_end());
|
||||
}
|
||||
|
||||
/// hasOneUse - Return true if there is exactly one user of this value. This
|
||||
/// is specialized because it is a common request and does not require
|
||||
|
|
|
@ -85,17 +85,15 @@ void llvm::PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker) {
|
|||
SmallSet<const Use *, Threshold> Visited;
|
||||
int Count = 0;
|
||||
|
||||
for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end();
|
||||
UI != UE; ++UI) {
|
||||
for (const Use &U : V->uses()) {
|
||||
// If there are lots of uses, conservatively say that the value
|
||||
// is captured to avoid taking too much compile time.
|
||||
if (Count++ >= Threshold)
|
||||
return Tracker->tooManyUses();
|
||||
|
||||
Use *U = &UI.getUse();
|
||||
if (!Tracker->shouldExplore(U)) continue;
|
||||
Visited.insert(U);
|
||||
Worklist.push_back(U);
|
||||
if (!Tracker->shouldExplore(&U)) continue;
|
||||
Visited.insert(&U);
|
||||
Worklist.push_back(&U);
|
||||
}
|
||||
|
||||
while (!Worklist.empty()) {
|
||||
|
@ -148,17 +146,15 @@ void llvm::PointerMayBeCaptured(const Value *V, CaptureTracker *Tracker) {
|
|||
case Instruction::AddrSpaceCast:
|
||||
// The original value is not captured via this if the new value isn't.
|
||||
Count = 0;
|
||||
for (Instruction::use_iterator UI = I->use_begin(), UE = I->use_end();
|
||||
UI != UE; ++UI) {
|
||||
for (Use &UU : I->uses()) {
|
||||
// If there are lots of uses, conservatively say that the value
|
||||
// is captured to avoid taking too much compile time.
|
||||
if (Count++ >= Threshold)
|
||||
return Tracker->tooManyUses();
|
||||
|
||||
Use *U = &UI.getUse();
|
||||
if (Visited.insert(U))
|
||||
if (Tracker->shouldExplore(U))
|
||||
Worklist.push_back(U);
|
||||
if (Visited.insert(&UU))
|
||||
if (Tracker->shouldExplore(&UU))
|
||||
Worklist.push_back(&UU);
|
||||
}
|
||||
break;
|
||||
case Instruction::ICmp:
|
||||
|
|
|
@ -252,33 +252,33 @@ bool GlobalsModRef::AnalyzeUsesOfPointer(Value *V,
|
|||
GlobalValue *OkayStoreDest) {
|
||||
if (!V->getType()->isPointerTy()) return true;
|
||||
|
||||
for (Value::use_iterator UI = V->use_begin(), E=V->use_end(); UI != E; ++UI) {
|
||||
User *U = *UI;
|
||||
if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
|
||||
for (Use &U : V->uses()) {
|
||||
User *I = U.getUser();
|
||||
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
|
||||
Readers.push_back(LI->getParent()->getParent());
|
||||
} else if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
|
||||
} else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
|
||||
if (V == SI->getOperand(1)) {
|
||||
Writers.push_back(SI->getParent()->getParent());
|
||||
} else if (SI->getOperand(1) != OkayStoreDest) {
|
||||
return true; // Storing the pointer
|
||||
}
|
||||
} else if (Operator::getOpcode(U) == Instruction::GetElementPtr) {
|
||||
if (AnalyzeUsesOfPointer(U, Readers, Writers))
|
||||
} else if (Operator::getOpcode(I) == Instruction::GetElementPtr) {
|
||||
if (AnalyzeUsesOfPointer(I, Readers, Writers))
|
||||
return true;
|
||||
} else if (Operator::getOpcode(U) == Instruction::BitCast) {
|
||||
if (AnalyzeUsesOfPointer(U, Readers, Writers, OkayStoreDest))
|
||||
} else if (Operator::getOpcode(I) == Instruction::BitCast) {
|
||||
if (AnalyzeUsesOfPointer(I, Readers, Writers, OkayStoreDest))
|
||||
return true;
|
||||
} else if (CallSite CS = U) {
|
||||
} else if (CallSite CS = I) {
|
||||
// Make sure that this is just the function being called, not that it is
|
||||
// passing into the function.
|
||||
if (!CS.isCallee(UI)) {
|
||||
if (!CS.isCallee(&U)) {
|
||||
// Detect calls to free.
|
||||
if (isFreeCall(U, TLI))
|
||||
if (isFreeCall(I, TLI))
|
||||
Writers.push_back(CS->getParent()->getParent());
|
||||
else
|
||||
return true; // Argument of an unknown call.
|
||||
}
|
||||
} else if (ICmpInst *ICI = dyn_cast<ICmpInst>(U)) {
|
||||
} else if (ICmpInst *ICI = dyn_cast<ICmpInst>(I)) {
|
||||
if (!isa<ConstantPointerNull>(ICI->getOperand(1)))
|
||||
return true; // Allow comparison against null.
|
||||
} else {
|
||||
|
@ -303,8 +303,7 @@ bool GlobalsModRef::AnalyzeIndirectGlobalMemory(GlobalValue *GV) {
|
|||
|
||||
// Walk the user list of the global. If we find anything other than a direct
|
||||
// load or store, bail out.
|
||||
for (Value::use_iterator I = GV->use_begin(), E = GV->use_end(); I != E; ++I){
|
||||
User *U = *I;
|
||||
for (User *U : GV->users()) {
|
||||
if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
|
||||
// The pointer loaded from the global can only be used in simple ways:
|
||||
// we allow addressing of it and loading storing to it. We do *not* allow
|
||||
|
|
|
@ -1052,9 +1052,8 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
|
|||
|
||||
Function *Caller = CS.getInstruction()->getParent()->getParent();
|
||||
// Check if the caller function is recursive itself.
|
||||
for (Value::use_iterator U = Caller->use_begin(), E = Caller->use_end();
|
||||
U != E; ++U) {
|
||||
CallSite Site(cast<Value>(*U));
|
||||
for (User *U : Caller->users()) {
|
||||
CallSite Site(U);
|
||||
if (!Site)
|
||||
continue;
|
||||
Instruction *I = Site.getInstruction();
|
||||
|
|
|
@ -142,9 +142,8 @@ bool IVUsers::AddUsersImpl(Instruction *I,
|
|||
return false;
|
||||
|
||||
SmallPtrSet<Instruction *, 4> UniqueUsers;
|
||||
for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
|
||||
UI != E; ++UI) {
|
||||
Instruction *User = cast<Instruction>(*UI);
|
||||
for (Use &U : I->uses()) {
|
||||
Instruction *User = cast<Instruction>(U.getUser());
|
||||
if (!UniqueUsers.insert(User))
|
||||
continue;
|
||||
|
||||
|
@ -157,7 +156,7 @@ bool IVUsers::AddUsersImpl(Instruction *I,
|
|||
BasicBlock *UseBB = User->getParent();
|
||||
// A phi's use is live out of its predecessor block.
|
||||
if (PHINode *PHI = dyn_cast<PHINode>(User)) {
|
||||
unsigned OperandNo = UI.getOperandNo();
|
||||
unsigned OperandNo = U.getOperandNo();
|
||||
unsigned ValNo = PHINode::getIncomingValueNumForOperand(OperandNo);
|
||||
UseBB = PHI->getIncomingBlock(ValNo);
|
||||
}
|
||||
|
|
|
@ -3200,10 +3200,9 @@ static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
|
|||
// If we have an explicit value to collapse to, do that round of the
|
||||
// simplification loop by hand initially.
|
||||
if (SimpleV) {
|
||||
for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); UI != UE;
|
||||
++UI)
|
||||
if (*UI != I)
|
||||
Worklist.insert(cast<Instruction>(*UI));
|
||||
for (User *U : I->users())
|
||||
if (U != I)
|
||||
Worklist.insert(cast<Instruction>(U));
|
||||
|
||||
// Replace the instruction with its simplified value.
|
||||
I->replaceAllUsesWith(SimpleV);
|
||||
|
@ -3230,9 +3229,8 @@ static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
|
|||
// Stash away all the uses of the old instruction so we can check them for
|
||||
// recursive simplifications after a RAUW. This is cheaper than checking all
|
||||
// uses of To on the recursive step in most cases.
|
||||
for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); UI != UE;
|
||||
++UI)
|
||||
Worklist.insert(cast<Instruction>(*UI));
|
||||
for (User *U : I->users())
|
||||
Worklist.insert(cast<Instruction>(U));
|
||||
|
||||
// Replace the instruction with its simplified value.
|
||||
I->replaceAllUsesWith(SimpleV);
|
||||
|
|
|
@ -179,12 +179,11 @@ bool Loop::isLCSSAForm(DominatorTree &DT) const {
|
|||
for (block_iterator BI = block_begin(), E = block_end(); BI != E; ++BI) {
|
||||
BasicBlock *BB = *BI;
|
||||
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;++I)
|
||||
for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E;
|
||||
++UI) {
|
||||
User *U = *UI;
|
||||
BasicBlock *UserBB = cast<Instruction>(U)->getParent();
|
||||
if (PHINode *P = dyn_cast<PHINode>(U))
|
||||
UserBB = P->getIncomingBlock(UI);
|
||||
for (Use &U : I->uses()) {
|
||||
Instruction *UI = cast<Instruction>(U.getUser());
|
||||
BasicBlock *UserBB = UI->getParent();
|
||||
if (PHINode *P = dyn_cast<PHINode>(UI))
|
||||
UserBB = P->getIncomingBlock(U);
|
||||
|
||||
// Check the current block, as a fast-path, before checking whether
|
||||
// the use is anywhere in the loop. Most values are used in the same
|
||||
|
|
|
@ -261,8 +261,8 @@ PointerType *llvm::getMallocType(const CallInst *CI,
|
|||
unsigned NumOfBitCastUses = 0;
|
||||
|
||||
// Determine if CallInst has a bitcast use.
|
||||
for (Value::const_use_iterator UI = CI->use_begin(), E = CI->use_end();
|
||||
UI != E; )
|
||||
for (Value::const_user_iterator UI = CI->user_begin(), E = CI->user_end();
|
||||
UI != E;)
|
||||
if (const BitCastInst *BCI = dyn_cast<BitCastInst>(*UI++)) {
|
||||
MallocType = cast<PointerType>(BCI->getDestTy());
|
||||
NumOfBitCastUses++;
|
||||
|
|
|
@ -202,9 +202,8 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
|
|||
|
||||
// Otherwise we have to see if a casted version of the incoming pointer
|
||||
// is available. If so, we can use it, otherwise we have to fail.
|
||||
for (Value::use_iterator UI = PHIIn->use_begin(), E = PHIIn->use_end();
|
||||
UI != E; ++UI) {
|
||||
if (CastInst *CastI = dyn_cast<CastInst>(*UI))
|
||||
for (User *U : PHIIn->users()) {
|
||||
if (CastInst *CastI = dyn_cast<CastInst>(U))
|
||||
if (CastI->getOpcode() == Cast->getOpcode() &&
|
||||
CastI->getType() == Cast->getType() &&
|
||||
(!DT || DT->dominates(CastI->getParent(), PredBB)))
|
||||
|
@ -238,9 +237,8 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
|
|||
|
||||
// Scan to see if we have this GEP available.
|
||||
Value *APHIOp = GEPOps[0];
|
||||
for (Value::use_iterator UI = APHIOp->use_begin(), E = APHIOp->use_end();
|
||||
UI != E; ++UI) {
|
||||
if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(*UI))
|
||||
for (User *U : APHIOp->users()) {
|
||||
if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U))
|
||||
if (GEPI->getType() == GEP->getType() &&
|
||||
GEPI->getNumOperands() == GEPOps.size() &&
|
||||
GEPI->getParent()->getParent() == CurBB->getParent() &&
|
||||
|
@ -297,9 +295,8 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
|
|||
return Inst;
|
||||
|
||||
// Otherwise, see if we have this add available somewhere.
|
||||
for (Value::use_iterator UI = LHS->use_begin(), E = LHS->use_end();
|
||||
UI != E; ++UI) {
|
||||
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(*UI))
|
||||
for (User *U : LHS->users()) {
|
||||
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U))
|
||||
if (BO->getOpcode() == Instruction::Add &&
|
||||
BO->getOperand(0) == LHS && BO->getOperand(1) == RHS &&
|
||||
BO->getParent()->getParent() == CurBB->getParent() &&
|
||||
|
|
|
@ -16,11 +16,10 @@
|
|||
using namespace llvm;
|
||||
|
||||
void detail::PtrUseVisitorBase::enqueueUsers(Instruction &I) {
|
||||
for (Value::use_iterator UI = I.use_begin(), UE = I.use_end();
|
||||
UI != UE; ++UI) {
|
||||
if (VisitedUses.insert(&UI.getUse())) {
|
||||
for (Use &U : I.uses()) {
|
||||
if (VisitedUses.insert(&U)) {
|
||||
UseToVisit NewU = {
|
||||
UseToVisit::UseAndIsOffsetKnownPair(&UI.getUse(), IsOffsetKnown),
|
||||
UseToVisit::UseAndIsOffsetKnownPair(&U, IsOffsetKnown),
|
||||
Offset
|
||||
};
|
||||
Worklist.push_back(std::move(NewU));
|
||||
|
|
|
@ -3033,9 +3033,8 @@ static void
|
|||
PushDefUseChildren(Instruction *I,
|
||||
SmallVectorImpl<Instruction *> &Worklist) {
|
||||
// Push the def-use children onto the Worklist stack.
|
||||
for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
|
||||
UI != UE; ++UI)
|
||||
Worklist.push_back(cast<Instruction>(*UI));
|
||||
for (User *U : I->users())
|
||||
Worklist.push_back(cast<Instruction>(U));
|
||||
}
|
||||
|
||||
/// ForgetSymbolicValue - This looks up computed SCEV values for all
|
||||
|
@ -7334,11 +7333,8 @@ void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
|
|||
// so that future queries will recompute the expressions using the new
|
||||
// value.
|
||||
Value *Old = getValPtr();
|
||||
SmallVector<User *, 16> Worklist;
|
||||
SmallVector<User *, 16> Worklist(Old->user_begin(), Old->user_end());
|
||||
SmallPtrSet<User *, 8> Visited;
|
||||
for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
|
||||
UI != UE; ++UI)
|
||||
Worklist.push_back(*UI);
|
||||
while (!Worklist.empty()) {
|
||||
User *U = Worklist.pop_back_val();
|
||||
// Deleting the Old value will cause this to dangle. Postpone
|
||||
|
@ -7350,9 +7346,7 @@ void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
|
|||
if (PHINode *PN = dyn_cast<PHINode>(U))
|
||||
SE->ConstantEvolutionLoopExitValue.erase(PN);
|
||||
SE->ValueExprMap.erase(U);
|
||||
for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
|
||||
UI != UE; ++UI)
|
||||
Worklist.push_back(*UI);
|
||||
Worklist.insert(Worklist.end(), U->user_begin(), U->user_end());
|
||||
}
|
||||
// Delete the Old value.
|
||||
if (PHINode *PN = dyn_cast<PHINode>(Old))
|
||||
|
|
|
@ -47,9 +47,7 @@ Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
|
|||
Instruction *Ret = NULL;
|
||||
|
||||
// Check to see if there is already a cast!
|
||||
for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
|
||||
UI != E; ++UI) {
|
||||
User *U = *UI;
|
||||
for (User *U : V->users())
|
||||
if (U->getType() == Ty)
|
||||
if (CastInst *CI = dyn_cast<CastInst>(U))
|
||||
if (CI->getOpcode() == Op) {
|
||||
|
@ -69,7 +67,6 @@ Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
|
|||
Ret = CI;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Create a new cast.
|
||||
if (!Ret)
|
||||
|
|
|
@ -303,11 +303,10 @@ void SparseSolver::Solve(Function &F) {
|
|||
|
||||
// "I" got into the work list because it made a transition. See if any
|
||||
// users are both live and in need of updating.
|
||||
for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
|
||||
UI != E; ++UI) {
|
||||
Instruction *U = cast<Instruction>(*UI);
|
||||
if (BBExecutable.count(U->getParent())) // Inst is executable?
|
||||
visitInst(*U);
|
||||
for (User *U : I->users()) {
|
||||
Instruction *UI = cast<Instruction>(U);
|
||||
if (BBExecutable.count(UI->getParent())) // Inst is executable?
|
||||
visitInst(*UI);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1960,9 +1960,8 @@ llvm::GetUnderlyingObjects(Value *V,
|
|||
/// are lifetime markers.
|
||||
///
|
||||
bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
|
||||
for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end();
|
||||
UI != UE; ++UI) {
|
||||
const IntrinsicInst *II = dyn_cast<IntrinsicInst>(*UI);
|
||||
for (const User *U : V->users()) {
|
||||
const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
|
||||
if (!II) return false;
|
||||
|
||||
if (II->getIntrinsicID() != Intrinsic::lifetime_start &&
|
||||
|
|
|
@ -332,7 +332,7 @@ void BitcodeReaderValueList::ResolveConstantForwardRefs() {
|
|||
// new value. If they reference more than one placeholder, update them all
|
||||
// at once.
|
||||
while (!Placeholder->use_empty()) {
|
||||
Value::use_iterator UI = Placeholder->use_begin();
|
||||
auto UI = Placeholder->user_begin();
|
||||
User *U = *UI;
|
||||
|
||||
// If the using object isn't uniqued, just update the operands. This
|
||||
|
@ -3116,8 +3116,8 @@ error_code BitcodeReader::Materialize(GlobalValue *GV) {
|
|||
for (UpgradedIntrinsicMap::iterator I = UpgradedIntrinsics.begin(),
|
||||
E = UpgradedIntrinsics.end(); I != E; ++I) {
|
||||
if (I->first != I->second) {
|
||||
for (Value::use_iterator UI = I->first->use_begin(),
|
||||
UE = I->first->use_end(); UI != UE; ) {
|
||||
for (auto UI = I->first->user_begin(), UE = I->first->user_end();
|
||||
UI != UE;) {
|
||||
if (CallInst* CI = dyn_cast<CallInst>(*UI++))
|
||||
UpgradeIntrinsicCall(CI, I->second);
|
||||
}
|
||||
|
@ -3172,8 +3172,8 @@ error_code BitcodeReader::MaterializeModule(Module *M) {
|
|||
for (std::vector<std::pair<Function*, Function*> >::iterator I =
|
||||
UpgradedIntrinsics.begin(), E = UpgradedIntrinsics.end(); I != E; ++I) {
|
||||
if (I->first != I->second) {
|
||||
for (Value::use_iterator UI = I->first->use_begin(),
|
||||
UE = I->first->use_end(); UI != UE; ) {
|
||||
for (auto UI = I->first->user_begin(), UE = I->first->user_end();
|
||||
UI != UE;) {
|
||||
if (CallInst* CI = dyn_cast<CallInst>(*UI++))
|
||||
UpgradeIntrinsicCall(CI, I->second);
|
||||
}
|
||||
|
|
|
@ -1807,17 +1807,10 @@ static void WriteUseList(const Value *V, const ValueEnumerator &VE,
|
|||
return;
|
||||
|
||||
// Make a copy of the in-memory use-list for sorting.
|
||||
unsigned UseListSize = std::distance(V->use_begin(), V->use_end());
|
||||
SmallVector<const User*, 8> UseList;
|
||||
UseList.reserve(UseListSize);
|
||||
for (Value::const_use_iterator I = V->use_begin(), E = V->use_end();
|
||||
I != E; ++I) {
|
||||
const User *U = *I;
|
||||
UseList.push_back(U);
|
||||
}
|
||||
SmallVector<const User*, 8> UserList(V->user_begin(), V->user_end());
|
||||
|
||||
// Sort the copy based on the order read by the BitcodeReader.
|
||||
std::sort(UseList.begin(), UseList.end(), bitcodereader_order);
|
||||
std::sort(UserList.begin(), UserList.end(), bitcodereader_order);
|
||||
|
||||
// TODO: Generate a diff between the BitcodeWriter in-memory use-list and the
|
||||
// sorted list (i.e., the expected BitcodeReader in-memory use-list).
|
||||
|
|
|
@ -159,12 +159,11 @@ void ValueEnumerator::print(raw_ostream &OS, const ValueMapType &Map,
|
|||
V->dump();
|
||||
|
||||
OS << " Uses(" << std::distance(V->use_begin(),V->use_end()) << "):";
|
||||
for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end();
|
||||
UI != UE; ++UI) {
|
||||
if (UI != V->use_begin())
|
||||
for (const Use &U : V->uses()) {
|
||||
if (&U != &*V->use_begin())
|
||||
OS << ",";
|
||||
if((*UI)->hasName())
|
||||
OS << " " << (*UI)->getName();
|
||||
if(U->hasName())
|
||||
OS << " " << U->getName();
|
||||
else
|
||||
OS << " [null]";
|
||||
|
||||
|
|
|
@ -336,16 +336,15 @@ bool CodeGenPrepare::CanMergeBlocks(const BasicBlock *BB,
|
|||
// don't mess around with them.
|
||||
BasicBlock::const_iterator BBI = BB->begin();
|
||||
while (const PHINode *PN = dyn_cast<PHINode>(BBI++)) {
|
||||
for (Value::const_use_iterator UI = PN->use_begin(), E = PN->use_end();
|
||||
UI != E; ++UI) {
|
||||
const Instruction *User = cast<Instruction>(*UI);
|
||||
if (User->getParent() != DestBB || !isa<PHINode>(User))
|
||||
for (const User *U : PN->users()) {
|
||||
const Instruction *UI = cast<Instruction>(U);
|
||||
if (UI->getParent() != DestBB || !isa<PHINode>(UI))
|
||||
return false;
|
||||
// If User is inside DestBB block and it is a PHINode then check
|
||||
// incoming value. If incoming value is not from BB then this is
|
||||
// a complex condition (e.g. preheaders) we want to avoid here.
|
||||
if (User->getParent() == DestBB) {
|
||||
if (const PHINode *UPN = dyn_cast<PHINode>(User))
|
||||
if (UI->getParent() == DestBB) {
|
||||
if (const PHINode *UPN = dyn_cast<PHINode>(UI))
|
||||
for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) {
|
||||
Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I));
|
||||
if (Insn && Insn->getParent() == BB &&
|
||||
|
@ -474,7 +473,7 @@ static bool SinkCast(CastInst *CI) {
|
|||
DenseMap<BasicBlock*, CastInst*> InsertedCasts;
|
||||
|
||||
bool MadeChange = false;
|
||||
for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end();
|
||||
for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end();
|
||||
UI != E; ) {
|
||||
Use &TheUse = UI.getUse();
|
||||
Instruction *User = cast<Instruction>(*UI);
|
||||
|
@ -483,7 +482,7 @@ static bool SinkCast(CastInst *CI) {
|
|||
// appropriate predecessor block.
|
||||
BasicBlock *UserBB = User->getParent();
|
||||
if (PHINode *PN = dyn_cast<PHINode>(User)) {
|
||||
UserBB = PN->getIncomingBlock(UI);
|
||||
UserBB = PN->getIncomingBlock(TheUse);
|
||||
}
|
||||
|
||||
// Preincrement use iterator so we don't invalidate it.
|
||||
|
@ -567,7 +566,7 @@ static bool OptimizeCmpExpression(CmpInst *CI) {
|
|||
DenseMap<BasicBlock*, CmpInst*> InsertedCmps;
|
||||
|
||||
bool MadeChange = false;
|
||||
for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end();
|
||||
for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end();
|
||||
UI != E; ) {
|
||||
Use &TheUse = UI.getUse();
|
||||
Instruction *User = cast<Instruction>(*UI);
|
||||
|
@ -1143,11 +1142,9 @@ class TypePromotionTransaction {
|
|||
DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New
|
||||
<< "\n");
|
||||
// Record the original uses.
|
||||
for (Value::use_iterator UseIt = Inst->use_begin(),
|
||||
EndIt = Inst->use_end();
|
||||
UseIt != EndIt; ++UseIt) {
|
||||
Instruction *Use = cast<Instruction>(*UseIt);
|
||||
OriginalUses.push_back(InstructionAndIdx(Use, UseIt.getOperandNo()));
|
||||
for (Use &U : Inst->uses()) {
|
||||
Instruction *UserI = cast<Instruction>(U.getUser());
|
||||
OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo()));
|
||||
}
|
||||
// Now, we can replace the uses.
|
||||
Inst->replaceAllUsesWith(New);
|
||||
|
@ -2115,23 +2112,22 @@ static bool FindAllMemoryUses(Instruction *I,
|
|||
return true;
|
||||
|
||||
// Loop over all the uses, recursively processing them.
|
||||
for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
|
||||
UI != E; ++UI) {
|
||||
User *U = *UI;
|
||||
for (Use &U : I->uses()) {
|
||||
Instruction *UserI = cast<Instruction>(U.getUser());
|
||||
|
||||
if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
|
||||
MemoryUses.push_back(std::make_pair(LI, UI.getOperandNo()));
|
||||
if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) {
|
||||
MemoryUses.push_back(std::make_pair(LI, U.getOperandNo()));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
|
||||
unsigned opNo = UI.getOperandNo();
|
||||
if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
|
||||
unsigned opNo = U.getOperandNo();
|
||||
if (opNo == 0) return true; // Storing addr, not into addr.
|
||||
MemoryUses.push_back(std::make_pair(SI, opNo));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (CallInst *CI = dyn_cast<CallInst>(U)) {
|
||||
if (CallInst *CI = dyn_cast<CallInst>(UserI)) {
|
||||
InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue());
|
||||
if (!IA) return true;
|
||||
|
||||
|
@ -2141,8 +2137,7 @@ static bool FindAllMemoryUses(Instruction *I,
|
|||
continue;
|
||||
}
|
||||
|
||||
if (FindAllMemoryUses(cast<Instruction>(U), MemoryUses, ConsideredInsts,
|
||||
TLI))
|
||||
if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI))
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -2603,12 +2598,11 @@ bool CodeGenPrepare::OptimizeExtUses(Instruction *I) {
|
|||
return false;
|
||||
|
||||
bool DefIsLiveOut = false;
|
||||
for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
|
||||
UI != E; ++UI) {
|
||||
Instruction *User = cast<Instruction>(*UI);
|
||||
for (User *U : I->users()) {
|
||||
Instruction *UI = cast<Instruction>(U);
|
||||
|
||||
// Figure out which BB this ext is used in.
|
||||
BasicBlock *UserBB = User->getParent();
|
||||
BasicBlock *UserBB = UI->getParent();
|
||||
if (UserBB == DefBB) continue;
|
||||
DefIsLiveOut = true;
|
||||
break;
|
||||
|
@ -2617,14 +2611,13 @@ bool CodeGenPrepare::OptimizeExtUses(Instruction *I) {
|
|||
return false;
|
||||
|
||||
// Make sure none of the uses are PHI nodes.
|
||||
for (Value::use_iterator UI = Src->use_begin(), E = Src->use_end();
|
||||
UI != E; ++UI) {
|
||||
Instruction *User = cast<Instruction>(*UI);
|
||||
BasicBlock *UserBB = User->getParent();
|
||||
for (User *U : Src->users()) {
|
||||
Instruction *UI = cast<Instruction>(U);
|
||||
BasicBlock *UserBB = UI->getParent();
|
||||
if (UserBB == DefBB) continue;
|
||||
// Be conservative. We don't want this xform to end up introducing
|
||||
// reloads just before load / store instructions.
|
||||
if (isa<PHINode>(User) || isa<LoadInst>(User) || isa<StoreInst>(User))
|
||||
if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI))
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -2632,10 +2625,8 @@ bool CodeGenPrepare::OptimizeExtUses(Instruction *I) {
|
|||
DenseMap<BasicBlock*, Instruction*> InsertedTruncs;
|
||||
|
||||
bool MadeChange = false;
|
||||
for (Value::use_iterator UI = Src->use_begin(), E = Src->use_end();
|
||||
UI != E; ++UI) {
|
||||
Use &TheUse = UI.getUse();
|
||||
Instruction *User = cast<Instruction>(*UI);
|
||||
for (Use &U : Src->uses()) {
|
||||
Instruction *User = cast<Instruction>(U.getUser());
|
||||
|
||||
// Figure out which BB this ext is used in.
|
||||
BasicBlock *UserBB = User->getParent();
|
||||
|
@ -2651,7 +2642,7 @@ bool CodeGenPrepare::OptimizeExtUses(Instruction *I) {
|
|||
}
|
||||
|
||||
// Replace a use of the {s|z}ext source with a use of the result.
|
||||
TheUse = InsertedTrunc;
|
||||
U = InsertedTrunc;
|
||||
++NumExtUses;
|
||||
MadeChange = true;
|
||||
}
|
||||
|
@ -2779,16 +2770,15 @@ bool CodeGenPrepare::OptimizeShuffleVectorInst(ShuffleVectorInst *SVI) {
|
|||
DenseMap<BasicBlock*, Instruction*> InsertedShuffles;
|
||||
|
||||
bool MadeChange = false;
|
||||
for (Value::use_iterator UI = SVI->use_begin(), E = SVI->use_end();
|
||||
UI != E; ++UI) {
|
||||
Instruction *User = cast<Instruction>(*UI);
|
||||
for (User *U : SVI->users()) {
|
||||
Instruction *UI = cast<Instruction>(U);
|
||||
|
||||
// Figure out which BB this ext is used in.
|
||||
BasicBlock *UserBB = User->getParent();
|
||||
BasicBlock *UserBB = UI->getParent();
|
||||
if (UserBB == DefBB) continue;
|
||||
|
||||
// For now only apply this when the splat is used by a shift instruction.
|
||||
if (!User->isShift()) continue;
|
||||
if (!UI->isShift()) continue;
|
||||
|
||||
// Everything checks out, sink the shuffle if the user's block doesn't
|
||||
// already have a copy.
|
||||
|
@ -2801,7 +2791,7 @@ bool CodeGenPrepare::OptimizeShuffleVectorInst(ShuffleVectorInst *SVI) {
|
|||
SVI->getOperand(2), "", InsertPt);
|
||||
}
|
||||
|
||||
User->replaceUsesOfWith(SVI, InsertedShuffle);
|
||||
UI->replaceUsesOfWith(SVI, InsertedShuffle);
|
||||
MadeChange = true;
|
||||
}
|
||||
|
||||
|
|
|
@ -133,7 +133,7 @@ bool FastISel::hasTrivialKill(const Value *V) const {
|
|||
!(I->getOpcode() == Instruction::BitCast ||
|
||||
I->getOpcode() == Instruction::PtrToInt ||
|
||||
I->getOpcode() == Instruction::IntToPtr) &&
|
||||
cast<Instruction>(*I->use_begin())->getParent() == I->getParent();
|
||||
cast<Instruction>(*I->user_begin())->getParent() == I->getParent();
|
||||
}
|
||||
|
||||
unsigned FastISel::getRegForValue(const Value *V) {
|
||||
|
@ -1523,7 +1523,7 @@ bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
|
|||
// this by scanning the single-use users of the load until we get to FoldInst.
|
||||
unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
|
||||
|
||||
const Instruction *TheUser = LI->use_back();
|
||||
const Instruction *TheUser = LI->user_back();
|
||||
while (TheUser != FoldInst && // Scan up until we find FoldInst.
|
||||
// Stay in the right block.
|
||||
TheUser->getParent() == FoldInst->getParent() &&
|
||||
|
@ -1532,7 +1532,7 @@ bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
|
|||
if (!TheUser->hasOneUse())
|
||||
return false;
|
||||
|
||||
TheUser = TheUser->use_back();
|
||||
TheUser = TheUser->user_back();
|
||||
}
|
||||
|
||||
// If we didn't find the fold instruction, then we failed to collapse the
|
||||
|
|
|
@ -47,12 +47,10 @@ static bool isUsedOutsideOfDefiningBlock(const Instruction *I) {
|
|||
if (I->use_empty()) return false;
|
||||
if (isa<PHINode>(I)) return true;
|
||||
const BasicBlock *BB = I->getParent();
|
||||
for (Value::const_use_iterator UI = I->use_begin(), E = I->use_end();
|
||||
UI != E; ++UI) {
|
||||
const User *U = *UI;
|
||||
for (const User *U : I->users())
|
||||
if (cast<Instruction>(U)->getParent() != BB || isa<PHINode>(U))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -5580,9 +5580,8 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
|
|||
/// IsOnlyUsedInZeroEqualityComparison - Return true if it only matters that the
|
||||
/// value is equal or not-equal to zero.
|
||||
static bool IsOnlyUsedInZeroEqualityComparison(const Value *V) {
|
||||
for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end();
|
||||
UI != E; ++UI) {
|
||||
if (const ICmpInst *IC = dyn_cast<ICmpInst>(*UI))
|
||||
for (const User *U : V->users()) {
|
||||
if (const ICmpInst *IC = dyn_cast<ICmpInst>(U))
|
||||
if (IC->isEquality())
|
||||
if (const Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
|
||||
if (C->isNullValue())
|
||||
|
@ -7326,12 +7325,10 @@ static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
|
|||
return A->use_empty();
|
||||
|
||||
const BasicBlock *Entry = A->getParent()->begin();
|
||||
for (Value::const_use_iterator UI = A->use_begin(), E = A->use_end();
|
||||
UI != E; ++UI) {
|
||||
const User *U = *UI;
|
||||
for (const User *U : A->users())
|
||||
if (cast<Instruction>(U)->getParent() != Entry || isa<SwitchInst>(U))
|
||||
return false; // Use not in entry block.
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -149,7 +149,7 @@ static void MarkBlocksLiveIn(BasicBlock *BB,
|
|||
/// instruction with those returned by the personality function.
|
||||
void SjLjEHPrepare::substituteLPadValues(LandingPadInst *LPI, Value *ExnVal,
|
||||
Value *SelVal) {
|
||||
SmallVector<Value *, 8> UseWorkList(LPI->use_begin(), LPI->use_end());
|
||||
SmallVector<Value *, 8> UseWorkList(LPI->user_begin(), LPI->user_end());
|
||||
while (!UseWorkList.empty()) {
|
||||
Value *Val = UseWorkList.pop_back_val();
|
||||
ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Val);
|
||||
|
@ -294,8 +294,8 @@ void SjLjEHPrepare::lowerAcrossUnwindEdges(Function &F,
|
|||
if (Inst->use_empty())
|
||||
continue;
|
||||
if (Inst->hasOneUse() &&
|
||||
cast<Instruction>(Inst->use_back())->getParent() == BB &&
|
||||
!isa<PHINode>(Inst->use_back()))
|
||||
cast<Instruction>(Inst->user_back())->getParent() == BB &&
|
||||
!isa<PHINode>(Inst->user_back()))
|
||||
continue;
|
||||
|
||||
// If this is an alloca in the entry block, it's not a real register
|
||||
|
@ -306,11 +306,10 @@ void SjLjEHPrepare::lowerAcrossUnwindEdges(Function &F,
|
|||
|
||||
// Avoid iterator invalidation by copying users to a temporary vector.
|
||||
SmallVector<Instruction *, 16> Users;
|
||||
for (Value::use_iterator UI = Inst->use_begin(), E = Inst->use_end();
|
||||
UI != E; ++UI) {
|
||||
Instruction *User = cast<Instruction>(*UI);
|
||||
if (User->getParent() != BB || isa<PHINode>(User))
|
||||
Users.push_back(User);
|
||||
for (User *U : Inst->users()) {
|
||||
Instruction *UI = cast<Instruction>(U);
|
||||
if (UI->getParent() != BB || isa<PHINode>(UI))
|
||||
Users.push_back(UI);
|
||||
}
|
||||
|
||||
// Find all of the blocks that this value is live in.
|
||||
|
|
|
@ -150,9 +150,7 @@ bool StackProtector::ContainsProtectableArray(Type *Ty, bool &IsLarge,
|
|||
}
|
||||
|
||||
bool StackProtector::HasAddressTaken(const Instruction *AI) {
|
||||
for (Value::const_use_iterator UI = AI->use_begin(), UE = AI->use_end();
|
||||
UI != UE; ++UI) {
|
||||
const User *U = *UI;
|
||||
for (const User *U : AI->users()) {
|
||||
if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
|
||||
if (AI == SI->getValueOperand())
|
||||
return true;
|
||||
|
|
|
@ -411,7 +411,7 @@ void llvm::UpgradeCallsToIntrinsic(Function* F) {
|
|||
if (UpgradeIntrinsicFunction(F, NewFn)) {
|
||||
if (NewFn != F) {
|
||||
// Replace all uses to the old function with the new one if necessary.
|
||||
for (Value::use_iterator UI = F->use_begin(), UE = F->use_end();
|
||||
for (Value::user_iterator UI = F->user_begin(), UE = F->user_end();
|
||||
UI != UE; ) {
|
||||
if (CallInst *CI = dyn_cast<CallInst>(*UI++))
|
||||
UpgradeIntrinsicCall(CI, NewFn);
|
||||
|
|
|
@ -74,7 +74,7 @@ BasicBlock::~BasicBlock() {
|
|||
Constant *Replacement =
|
||||
ConstantInt::get(llvm::Type::getInt32Ty(getContext()), 1);
|
||||
while (!use_empty()) {
|
||||
BlockAddress *BA = cast<BlockAddress>(use_back());
|
||||
BlockAddress *BA = cast<BlockAddress>(user_back());
|
||||
BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement,
|
||||
BA->getType()));
|
||||
BA->destroyConstant();
|
||||
|
|
|
@ -218,7 +218,7 @@ void Constant::destroyConstantImpl() {
|
|||
// Constants) that they are, in fact, invalid now and should be deleted.
|
||||
//
|
||||
while (!use_empty()) {
|
||||
Value *V = use_back();
|
||||
Value *V = user_back();
|
||||
#ifndef NDEBUG // Only in -g mode...
|
||||
if (!isa<Constant>(V)) {
|
||||
dbgs() << "While deleting: " << *this
|
||||
|
@ -230,7 +230,7 @@ void Constant::destroyConstantImpl() {
|
|||
cast<Constant>(V)->destroyConstant();
|
||||
|
||||
// The constant should remove itself from our use list...
|
||||
assert((use_empty() || use_back() != V) && "Constant not removed!");
|
||||
assert((use_empty() || user_back() != V) && "Constant not removed!");
|
||||
}
|
||||
|
||||
// Value has no outstanding references it is safe to delete it now...
|
||||
|
@ -307,8 +307,8 @@ bool Constant::isThreadDependent() const {
|
|||
/// isConstantUsed - Return true if the constant has users other than constant
|
||||
/// exprs and other dangling things.
|
||||
bool Constant::isConstantUsed() const {
|
||||
for (const_use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
|
||||
const Constant *UC = dyn_cast<Constant>(*UI);
|
||||
for (const User *U : users()) {
|
||||
const Constant *UC = dyn_cast<Constant>(U);
|
||||
if (UC == 0 || isa<GlobalValue>(UC))
|
||||
return true;
|
||||
|
||||
|
@ -377,7 +377,7 @@ static bool removeDeadUsersOfConstant(const Constant *C) {
|
|||
if (isa<GlobalValue>(C)) return false; // Cannot remove this
|
||||
|
||||
while (!C->use_empty()) {
|
||||
const Constant *User = dyn_cast<Constant>(C->use_back());
|
||||
const Constant *User = dyn_cast<Constant>(C->user_back());
|
||||
if (!User) return false; // Non-constant usage;
|
||||
if (!removeDeadUsersOfConstant(User))
|
||||
return false; // Constant wasn't dead
|
||||
|
@ -393,8 +393,8 @@ static bool removeDeadUsersOfConstant(const Constant *C) {
|
|||
/// that want to check to see if a global is unused, but don't want to deal
|
||||
/// with potentially dead constants hanging off of the globals.
|
||||
void Constant::removeDeadConstantUsers() const {
|
||||
Value::const_use_iterator I = use_begin(), E = use_end();
|
||||
Value::const_use_iterator LastNonDeadUser = E;
|
||||
Value::const_user_iterator I = user_begin(), E = user_end();
|
||||
Value::const_user_iterator LastNonDeadUser = E;
|
||||
while (I != E) {
|
||||
const Constant *User = dyn_cast<Constant>(*I);
|
||||
if (User == 0) {
|
||||
|
@ -413,7 +413,7 @@ void Constant::removeDeadConstantUsers() const {
|
|||
|
||||
// If the constant was dead, then the iterator is invalidated.
|
||||
if (LastNonDeadUser == E) {
|
||||
I = use_begin();
|
||||
I = user_begin();
|
||||
if (I == E) break;
|
||||
} else {
|
||||
I = LastNonDeadUser;
|
||||
|
|
|
@ -514,7 +514,7 @@ LLVMUseRef LLVMGetFirstUse(LLVMValueRef Val) {
|
|||
Value::use_iterator I = V->use_begin();
|
||||
if (I == V->use_end())
|
||||
return 0;
|
||||
return wrap(&(I.getUse()));
|
||||
return wrap(&*I);
|
||||
}
|
||||
|
||||
LLVMUseRef LLVMGetNextUse(LLVMUseRef U) {
|
||||
|
|
|
@ -1476,7 +1476,7 @@ bool llvm::StripDebugInfo(Module &M) {
|
|||
// the module.
|
||||
if (Function *Declare = M.getFunction("llvm.dbg.declare")) {
|
||||
while (!Declare->use_empty()) {
|
||||
CallInst *CI = cast<CallInst>(Declare->use_back());
|
||||
CallInst *CI = cast<CallInst>(Declare->user_back());
|
||||
CI->eraseFromParent();
|
||||
}
|
||||
Declare->eraseFromParent();
|
||||
|
@ -1485,7 +1485,7 @@ bool llvm::StripDebugInfo(Module &M) {
|
|||
|
||||
if (Function *DbgVal = M.getFunction("llvm.dbg.value")) {
|
||||
while (!DbgVal->use_empty()) {
|
||||
CallInst *CI = cast<CallInst>(DbgVal->use_back());
|
||||
CallInst *CI = cast<CallInst>(DbgVal->user_back());
|
||||
CI->eraseFromParent();
|
||||
}
|
||||
DbgVal->eraseFromParent();
|
||||
|
|
|
@ -711,15 +711,15 @@ Function *Intrinsic::getDeclaration(Module *M, ID id, ArrayRef<Type*> Tys) {
|
|||
/// hasAddressTaken - returns true if there are any uses of this function
|
||||
/// other than direct calls or invokes to it.
|
||||
bool Function::hasAddressTaken(const User* *PutOffender) const {
|
||||
for (Value::const_use_iterator I = use_begin(), E = use_end(); I != E; ++I) {
|
||||
const User *U = *I;
|
||||
if (isa<BlockAddress>(U))
|
||||
for (const Use &U : uses()) {
|
||||
const User *FU = U.getUser();
|
||||
if (isa<BlockAddress>(FU))
|
||||
continue;
|
||||
if (!isa<CallInst>(U) && !isa<InvokeInst>(U))
|
||||
return PutOffender ? (*PutOffender = U, true) : true;
|
||||
ImmutableCallSite CS(cast<Instruction>(U));
|
||||
if (!CS.isCallee(I))
|
||||
return PutOffender ? (*PutOffender = U, true) : true;
|
||||
if (!isa<CallInst>(FU) && !isa<InvokeInst>(FU))
|
||||
return PutOffender ? (*PutOffender = FU, true) : true;
|
||||
ImmutableCallSite CS(cast<Instruction>(FU));
|
||||
if (!CS.isCallee(&U))
|
||||
return PutOffender ? (*PutOffender = FU, true) : true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -731,8 +731,8 @@ bool Function::isDefTriviallyDead() const {
|
|||
return false;
|
||||
|
||||
// Check if the function is used by anything other than a blockaddress.
|
||||
for (Value::const_use_iterator I = use_begin(), E = use_end(); I != E; ++I)
|
||||
if (!isa<BlockAddress>(*I))
|
||||
for (const User *U : users())
|
||||
if (!isa<BlockAddress>(U))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
|
|
@ -403,18 +403,18 @@ bool Instruction::isSameOperationAs(const Instruction *I,
|
|||
/// specified block. Note that PHI nodes are considered to evaluate their
|
||||
/// operands in the corresponding predecessor block.
|
||||
bool Instruction::isUsedOutsideOfBlock(const BasicBlock *BB) const {
|
||||
for (const_use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
|
||||
for (const Use &U : uses()) {
|
||||
// PHI nodes uses values in the corresponding predecessor block. For other
|
||||
// instructions, just check to see whether the parent of the use matches up.
|
||||
const User *U = *UI;
|
||||
const PHINode *PN = dyn_cast<PHINode>(U);
|
||||
const Instruction *I = cast<Instruction>(U.getUser());
|
||||
const PHINode *PN = dyn_cast<PHINode>(I);
|
||||
if (PN == 0) {
|
||||
if (cast<Instruction>(U)->getParent() != BB)
|
||||
if (I->getParent() != BB)
|
||||
return true;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (PN->getIncomingBlock(UI) != BB)
|
||||
if (PN->getIncomingBlock(U) != BB)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
|
|
@ -119,7 +119,7 @@ bool Value::isUsedInBasicBlock(const BasicBlock *BB) const {
|
|||
// Scan both lists simultaneously until one is exhausted. This limits the
|
||||
// search to the shorter list.
|
||||
BasicBlock::const_iterator BI = BB->begin(), BE = BB->end();
|
||||
const_use_iterator UI = use_begin(), UE = use_end();
|
||||
const_user_iterator UI = user_begin(), UE = user_end();
|
||||
for (; BI != BE && UI != UE; ++BI, ++UI) {
|
||||
// Scan basic block: Check if this Value is used by the instruction at BI.
|
||||
if (std::find(BI->op_begin(), BI->op_end(), this) != BI->op_end())
|
||||
|
|
|
@ -1974,9 +1974,8 @@ void Verifier::visitInstruction(Instruction &I) {
|
|||
Assert1(BB, "Instruction not embedded in basic block!", &I);
|
||||
|
||||
if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
|
||||
for (Value::use_iterator UI = I.use_begin(), UE = I.use_end();
|
||||
UI != UE; ++UI)
|
||||
Assert1(*UI != (User*)&I || !DT.isReachableFromEntry(BB),
|
||||
for (User *U : I.users())
|
||||
Assert1(U != (User*)&I || !DT.isReachableFromEntry(BB),
|
||||
"Only PHI nodes may reference their own value!", &I);
|
||||
}
|
||||
|
||||
|
@ -1999,13 +1998,12 @@ void Verifier::visitInstruction(Instruction &I) {
|
|||
// Check that all uses of the instruction, if they are instructions
|
||||
// themselves, actually have parent basic blocks. If the use is not an
|
||||
// instruction, it is an error!
|
||||
for (User::use_iterator UI = I.use_begin(), UE = I.use_end();
|
||||
UI != UE; ++UI) {
|
||||
if (Instruction *Used = dyn_cast<Instruction>(*UI))
|
||||
for (Use &U : I.uses()) {
|
||||
if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
|
||||
Assert2(Used->getParent() != 0, "Instruction referencing instruction not"
|
||||
" embedded in a basic block!", &I, Used);
|
||||
else {
|
||||
CheckFailed("Use of instruction is not an instruction!", *UI);
|
||||
CheckFailed("Use of instruction is not an instruction!", U);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -60,18 +60,17 @@ bool HexagonRemoveExtendArgs::runOnFunction(Function &F) {
|
|||
if (F.getAttributes().hasAttribute(Idx, Attribute::SExt)) {
|
||||
Argument* Arg = AI;
|
||||
if (!isa<PointerType>(Arg->getType())) {
|
||||
for (Instruction::use_iterator UI = Arg->use_begin();
|
||||
UI != Arg->use_end();) {
|
||||
for (auto UI = Arg->user_begin(); UI != Arg->user_end();) {
|
||||
if (isa<SExtInst>(*UI)) {
|
||||
Instruction* Use = cast<Instruction>(*UI);
|
||||
SExtInst* SI = new SExtInst(Arg, Use->getType());
|
||||
Instruction* I = cast<Instruction>(*UI);
|
||||
SExtInst* SI = new SExtInst(Arg, I->getType());
|
||||
assert (EVT::getEVT(SI->getType()) ==
|
||||
(EVT::getEVT(Use->getType())));
|
||||
(EVT::getEVT(I->getType())));
|
||||
++UI;
|
||||
Use->replaceAllUsesWith(SI);
|
||||
I->replaceAllUsesWith(SI);
|
||||
Instruction* First = F.getEntryBlock().begin();
|
||||
SI->insertBefore(First);
|
||||
Use->eraseFromParent();
|
||||
I->eraseFromParent();
|
||||
} else {
|
||||
++UI;
|
||||
}
|
||||
|
|
|
@ -699,12 +699,11 @@ static bool usedInGlobalVarDef(const Constant *C) {
|
|||
return true;
|
||||
}
|
||||
|
||||
for (Value::const_use_iterator ui = C->use_begin(), ue = C->use_end();
|
||||
ui != ue; ++ui) {
|
||||
const Constant *C = dyn_cast<Constant>(*ui);
|
||||
if (usedInGlobalVarDef(C))
|
||||
return true;
|
||||
}
|
||||
for (const User *U : C->users())
|
||||
if (const Constant *C = dyn_cast<Constant>(U))
|
||||
if (usedInGlobalVarDef(C))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -730,11 +729,10 @@ static bool usedInOneFunc(const User *U, Function const *&oneFunc) {
|
|||
(md->getName().str() == "llvm.dbg.sp")))
|
||||
return true;
|
||||
|
||||
for (User::const_use_iterator ui = U->use_begin(), ue = U->use_end();
|
||||
ui != ue; ++ui) {
|
||||
if (usedInOneFunc(*ui, oneFunc) == false)
|
||||
for (const User *UU : U->users())
|
||||
if (usedInOneFunc(UU, oneFunc) == false)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -765,12 +763,11 @@ static bool canDemoteGlobalVar(const GlobalVariable *gv, Function const *&f) {
|
|||
|
||||
static bool useFuncSeen(const Constant *C,
|
||||
llvm::DenseMap<const Function *, bool> &seenMap) {
|
||||
for (Value::const_use_iterator ui = C->use_begin(), ue = C->use_end();
|
||||
ui != ue; ++ui) {
|
||||
if (const Constant *cu = dyn_cast<Constant>(*ui)) {
|
||||
for (const User *U : C->users()) {
|
||||
if (const Constant *cu = dyn_cast<Constant>(U)) {
|
||||
if (useFuncSeen(cu, seenMap))
|
||||
return true;
|
||||
} else if (const Instruction *I = dyn_cast<Instruction>(*ui)) {
|
||||
} else if (const Instruction *I = dyn_cast<Instruction>(U)) {
|
||||
const BasicBlock *bb = I->getParent();
|
||||
if (!bb)
|
||||
continue;
|
||||
|
@ -797,10 +794,8 @@ void NVPTXAsmPrinter::emitDeclarations(const Module &M, raw_ostream &O) {
|
|||
emitDeclaration(F, O);
|
||||
continue;
|
||||
}
|
||||
for (Value::const_use_iterator iter = F->use_begin(),
|
||||
iterEnd = F->use_end();
|
||||
iter != iterEnd; ++iter) {
|
||||
if (const Constant *C = dyn_cast<Constant>(*iter)) {
|
||||
for (const User *U : F->users()) {
|
||||
if (const Constant *C = dyn_cast<Constant>(U)) {
|
||||
if (usedInGlobalVarDef(C)) {
|
||||
// The use is in the initialization of a global variable
|
||||
// that is a function pointer, so print a declaration
|
||||
|
@ -816,9 +811,9 @@ void NVPTXAsmPrinter::emitDeclarations(const Module &M, raw_ostream &O) {
|
|||
}
|
||||
}
|
||||
|
||||
if (!isa<Instruction>(*iter))
|
||||
if (!isa<Instruction>(U))
|
||||
continue;
|
||||
const Instruction *instr = cast<Instruction>(*iter);
|
||||
const Instruction *instr = cast<Instruction>(U);
|
||||
const BasicBlock *bb = instr->getParent();
|
||||
if (!bb)
|
||||
continue;
|
||||
|
|
|
@ -146,10 +146,8 @@ bool GenericToNVVM::runOnModule(Module &M) {
|
|||
// variable initializers, as other uses have been already been removed
|
||||
// while walking through the instructions in function definitions.
|
||||
for (Value::use_iterator UI = GV->use_begin(), UE = GV->use_end();
|
||||
UI != UE;) {
|
||||
Use &U = (UI++).getUse();
|
||||
U.set(BitCastNewGV);
|
||||
}
|
||||
UI != UE;)
|
||||
(UI++)->set(BitCastNewGV);
|
||||
std::string Name = GV->getName();
|
||||
GV->removeDeadConstantUsers();
|
||||
GV->eraseFromParent();
|
||||
|
|
|
@ -123,7 +123,7 @@ bool NVPTXLowerAggrCopies::runOnFunction(Function &F) {
|
|||
if (DL->getTypeStoreSize(load->getType()) < MaxAggrCopySize)
|
||||
continue;
|
||||
|
||||
User *use = *(load->use_begin());
|
||||
User *use = load->user_back();
|
||||
if (StoreInst *store = dyn_cast<StoreInst>(use)) {
|
||||
if (store->getOperand(0) != load) //getValueOperand
|
||||
continue;
|
||||
|
@ -163,7 +163,7 @@ bool NVPTXLowerAggrCopies::runOnFunction(Function &F) {
|
|||
//
|
||||
for (unsigned i = 0, e = aggrLoads.size(); i != e; ++i) {
|
||||
LoadInst *load = aggrLoads[i];
|
||||
StoreInst *store = dyn_cast<StoreInst>(*load->use_begin());
|
||||
StoreInst *store = dyn_cast<StoreInst>(*load->user_begin());
|
||||
Value *srcAddr = load->getOperand(0);
|
||||
Value *dstAddr = store->getOperand(1);
|
||||
unsigned numLoads = DL->getTypeStoreSize(load->getType());
|
||||
|
|
|
@ -143,11 +143,9 @@ bool NVVMReflect::runOnModule(Module &M) {
|
|||
// ConstantArray can be found successfully, see if it can be
|
||||
// found in VarMap. If so, replace the uses of CallInst with the
|
||||
// value found in VarMap. If not, replace the use with value 0.
|
||||
for (Value::use_iterator I = ReflectFunction->use_begin(),
|
||||
E = ReflectFunction->use_end();
|
||||
I != E; ++I) {
|
||||
assert(isa<CallInst>(*I) && "Only a call instruction can use _reflect");
|
||||
CallInst *Reflect = cast<CallInst>(*I);
|
||||
for (User *U : ReflectFunction->users()) {
|
||||
assert(isa<CallInst>(U) && "Only a call instruction can use _reflect");
|
||||
CallInst *Reflect = cast<CallInst>(U);
|
||||
|
||||
assert((Reflect->getNumOperands() == 2) &&
|
||||
"Only one operand expect for _reflect function");
|
||||
|
|
|
@ -127,10 +127,7 @@ createReplacementInstr(ConstantExpr *CE, Instruction *Instr) {
|
|||
|
||||
static bool replaceConstantExprOp(ConstantExpr *CE, Pass *P) {
|
||||
do {
|
||||
SmallVector<WeakVH,8> WUsers;
|
||||
for (Value::use_iterator I = CE->use_begin(), E = CE->use_end();
|
||||
I != E; ++I)
|
||||
WUsers.push_back(WeakVH(*I));
|
||||
SmallVector<WeakVH,8> WUsers(CE->user_begin(), CE->user_end());
|
||||
std::sort(WUsers.begin(), WUsers.end());
|
||||
WUsers.erase(std::unique(WUsers.begin(), WUsers.end()), WUsers.end());
|
||||
while (!WUsers.empty())
|
||||
|
@ -162,9 +159,9 @@ static bool replaceConstantExprOp(ConstantExpr *CE, Pass *P) {
|
|||
|
||||
static bool rewriteNonInstructionUses(GlobalVariable *GV, Pass *P) {
|
||||
SmallVector<WeakVH,8> WUsers;
|
||||
for (Value::use_iterator I = GV->use_begin(), E = GV->use_end(); I != E; ++I)
|
||||
if (!isa<Instruction>(*I))
|
||||
WUsers.push_back(WeakVH(*I));
|
||||
for (User *U : GV->users())
|
||||
if (!isa<Instruction>(U))
|
||||
WUsers.push_back(WeakVH(U));
|
||||
while (!WUsers.empty())
|
||||
if (WeakVH WU = WUsers.pop_back_val()) {
|
||||
ConstantExpr *CE = dyn_cast<ConstantExpr>(WU);
|
||||
|
@ -203,7 +200,7 @@ bool XCoreLowerThreadLocal::lowerGlobal(GlobalVariable *GV) {
|
|||
GV->isExternallyInitialized());
|
||||
|
||||
// Update uses.
|
||||
SmallVector<User *, 16> Users(GV->use_begin(), GV->use_end());
|
||||
SmallVector<User *, 16> Users(GV->user_begin(), GV->user_end());
|
||||
for (unsigned I = 0, E = Users.size(); I != E; ++I) {
|
||||
User *U = Users[I];
|
||||
Instruction *Inst = cast<Instruction>(U);
|
||||
|
|
|
@ -136,11 +136,10 @@ CallGraphNode *ArgPromotion::PromoteArguments(CallGraphNode *CGN) {
|
|||
// transform functions that have indirect callers. Also see if the function
|
||||
// is self-recursive.
|
||||
bool isSelfRecursive = false;
|
||||
for (Value::use_iterator UI = F->use_begin(), E = F->use_end();
|
||||
UI != E; ++UI) {
|
||||
CallSite CS(*UI);
|
||||
for (Use &U : F->uses()) {
|
||||
CallSite CS(U.getUser());
|
||||
// Must be a direct call.
|
||||
if (CS.getInstruction() == 0 || !CS.isCallee(UI)) return 0;
|
||||
if (CS.getInstruction() == 0 || !CS.isCallee(&U)) return 0;
|
||||
|
||||
if (CS.getInstruction()->getParent()->getParent() == F)
|
||||
isSelfRecursive = true;
|
||||
|
@ -222,9 +221,8 @@ static bool AllCallersPassInValidPointerForArgument(Argument *Arg) {
|
|||
|
||||
// Look at all call sites of the function. At this pointer we know we only
|
||||
// have direct callees.
|
||||
for (Value::use_iterator UI = Callee->use_begin(), E = Callee->use_end();
|
||||
UI != E; ++UI) {
|
||||
CallSite CS(*UI);
|
||||
for (User *U : Callee->users()) {
|
||||
CallSite CS(U);
|
||||
assert(CS && "Should only have direct calls!");
|
||||
|
||||
if (!CS.getArgument(ArgNo)->isDereferenceablePointer())
|
||||
|
@ -375,17 +373,16 @@ bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg,
|
|||
// not (GEP+)loads, or any (GEP+)loads that are not safe to promote.
|
||||
SmallVector<LoadInst*, 16> Loads;
|
||||
IndicesVector Operands;
|
||||
for (Value::use_iterator UI = Arg->use_begin(), E = Arg->use_end();
|
||||
UI != E; ++UI) {
|
||||
User *U = *UI;
|
||||
for (Use &U : Arg->uses()) {
|
||||
User *UR = U.getUser();
|
||||
Operands.clear();
|
||||
if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
|
||||
if (LoadInst *LI = dyn_cast<LoadInst>(UR)) {
|
||||
// Don't hack volatile/atomic loads
|
||||
if (!LI->isSimple()) return false;
|
||||
Loads.push_back(LI);
|
||||
// Direct loads are equivalent to a GEP with a zero index and then a load.
|
||||
Operands.push_back(0);
|
||||
} else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
|
||||
} else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UR)) {
|
||||
if (GEP->use_empty()) {
|
||||
// Dead GEP's cause trouble later. Just remove them if we run into
|
||||
// them.
|
||||
|
@ -406,9 +403,8 @@ bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg,
|
|||
return false; // Not a constant operand GEP!
|
||||
|
||||
// Ensure that the only users of the GEP are load instructions.
|
||||
for (Value::use_iterator UI = GEP->use_begin(), E = GEP->use_end();
|
||||
UI != E; ++UI)
|
||||
if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
|
||||
for (User *GEPU : GEP->users())
|
||||
if (LoadInst *LI = dyn_cast<LoadInst>(GEPU)) {
|
||||
// Don't hack volatile/atomic loads
|
||||
if (!LI->isSimple()) return false;
|
||||
Loads.push_back(LI);
|
||||
|
@ -554,16 +550,15 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
|
|||
// In this table, we will track which indices are loaded from the argument
|
||||
// (where direct loads are tracked as no indices).
|
||||
ScalarizeTable &ArgIndices = ScalarizedElements[I];
|
||||
for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E;
|
||||
++UI) {
|
||||
Instruction *User = cast<Instruction>(*UI);
|
||||
assert(isa<LoadInst>(User) || isa<GetElementPtrInst>(User));
|
||||
for (User *U : I->users()) {
|
||||
Instruction *UI = cast<Instruction>(U);
|
||||
assert(isa<LoadInst>(UI) || isa<GetElementPtrInst>(UI));
|
||||
IndicesVector Indices;
|
||||
Indices.reserve(User->getNumOperands() - 1);
|
||||
Indices.reserve(UI->getNumOperands() - 1);
|
||||
// Since loads will only have a single operand, and GEPs only a single
|
||||
// non-index operand, this will record direct loads without any indices,
|
||||
// and gep+loads with the GEP indices.
|
||||
for (User::op_iterator II = User->op_begin() + 1, IE = User->op_end();
|
||||
for (User::op_iterator II = UI->op_begin() + 1, IE = UI->op_end();
|
||||
II != IE; ++II)
|
||||
Indices.push_back(cast<ConstantInt>(*II)->getSExtValue());
|
||||
// GEPs with a single 0 index can be merged with direct loads
|
||||
|
@ -571,11 +566,11 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
|
|||
Indices.clear();
|
||||
ArgIndices.insert(Indices);
|
||||
LoadInst *OrigLoad;
|
||||
if (LoadInst *L = dyn_cast<LoadInst>(User))
|
||||
if (LoadInst *L = dyn_cast<LoadInst>(UI))
|
||||
OrigLoad = L;
|
||||
else
|
||||
// Take any load, we will use it only to update Alias Analysis
|
||||
OrigLoad = cast<LoadInst>(User->use_back());
|
||||
OrigLoad = cast<LoadInst>(UI->user_back());
|
||||
OriginalLoads[std::make_pair(I, Indices)] = OrigLoad;
|
||||
}
|
||||
|
||||
|
@ -636,7 +631,7 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
|
|||
//
|
||||
SmallVector<Value*, 16> Args;
|
||||
while (!F->use_empty()) {
|
||||
CallSite CS(F->use_back());
|
||||
CallSite CS(F->user_back());
|
||||
assert(CS.getCalledFunction() == F);
|
||||
Instruction *Call = CS.getInstruction();
|
||||
const AttributeSet &CallPAL = CS.getAttributes();
|
||||
|
@ -815,9 +810,8 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
|
|||
|
||||
// If the alloca is used in a call, we must clear the tail flag since
|
||||
// the callee now uses an alloca from the caller.
|
||||
for (Value::use_iterator UI = TheAlloca->use_begin(),
|
||||
E = TheAlloca->use_end(); UI != E; ++UI) {
|
||||
CallInst *Call = dyn_cast<CallInst>(*UI);
|
||||
for (User *U : TheAlloca->users()) {
|
||||
CallInst *Call = dyn_cast<CallInst>(U);
|
||||
if (!Call)
|
||||
continue;
|
||||
Call->setTailCall(false);
|
||||
|
@ -836,7 +830,7 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
|
|||
ScalarizeTable &ArgIndices = ScalarizedElements[I];
|
||||
|
||||
while (!I->use_empty()) {
|
||||
if (LoadInst *LI = dyn_cast<LoadInst>(I->use_back())) {
|
||||
if (LoadInst *LI = dyn_cast<LoadInst>(I->user_back())) {
|
||||
assert(ArgIndices.begin()->empty() &&
|
||||
"Load element should sort to front!");
|
||||
I2->setName(I->getName()+".val");
|
||||
|
@ -846,7 +840,7 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
|
|||
DEBUG(dbgs() << "*** Promoted load of argument '" << I->getName()
|
||||
<< "' in function '" << F->getName() << "'\n");
|
||||
} else {
|
||||
GetElementPtrInst *GEP = cast<GetElementPtrInst>(I->use_back());
|
||||
GetElementPtrInst *GEP = cast<GetElementPtrInst>(I->user_back());
|
||||
IndicesVector Operands;
|
||||
Operands.reserve(GEP->getNumIndices());
|
||||
for (User::op_iterator II = GEP->idx_begin(), IE = GEP->idx_end();
|
||||
|
@ -876,7 +870,7 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F,
|
|||
// All of the uses must be load instructions. Replace them all with
|
||||
// the argument specified by ArgNo.
|
||||
while (!GEP->use_empty()) {
|
||||
LoadInst *L = cast<LoadInst>(GEP->use_back());
|
||||
LoadInst *L = cast<LoadInst>(GEP->user_back());
|
||||
L->replaceAllUsesWith(TheArg);
|
||||
AA.replaceWithNewValue(L, TheArg);
|
||||
L->eraseFromParent();
|
||||
|
|
|
@ -144,7 +144,7 @@ namespace {
|
|||
|
||||
private:
|
||||
Liveness MarkIfNotLive(RetOrArg Use, UseVector &MaybeLiveUses);
|
||||
Liveness SurveyUse(Value::const_use_iterator U, UseVector &MaybeLiveUses,
|
||||
Liveness SurveyUse(const Use *U, UseVector &MaybeLiveUses,
|
||||
unsigned RetValNum = 0);
|
||||
Liveness SurveyUses(const Value *V, UseVector &MaybeLiveUses);
|
||||
|
||||
|
@ -260,7 +260,7 @@ bool DAE::DeleteDeadVarargs(Function &Fn) {
|
|||
// to pass in a smaller number of arguments into the new function.
|
||||
//
|
||||
std::vector<Value*> Args;
|
||||
for (Value::use_iterator I = Fn.use_begin(), E = Fn.use_end(); I != E; ) {
|
||||
for (Value::user_iterator I = Fn.user_begin(), E = Fn.user_end(); I != E; ) {
|
||||
CallSite CS(*I++);
|
||||
if (!CS)
|
||||
continue;
|
||||
|
@ -382,10 +382,9 @@ bool DAE::RemoveDeadArgumentsFromCallers(Function &Fn)
|
|||
|
||||
bool Changed = false;
|
||||
|
||||
for (Function::use_iterator I = Fn.use_begin(), E = Fn.use_end();
|
||||
I != E; ++I) {
|
||||
CallSite CS(*I);
|
||||
if (!CS || !CS.isCallee(I))
|
||||
for (Use &U : Fn.uses()) {
|
||||
CallSite CS(U.getUser());
|
||||
if (!CS || !CS.isCallee(&U))
|
||||
continue;
|
||||
|
||||
// Now go through all unused args and replace them with "undef".
|
||||
|
@ -436,9 +435,9 @@ DAE::Liveness DAE::MarkIfNotLive(RetOrArg Use, UseVector &MaybeLiveUses) {
|
|||
/// RetValNum is the return value number to use when this use is used in a
|
||||
/// return instruction. This is used in the recursion, you should always leave
|
||||
/// it at 0.
|
||||
DAE::Liveness DAE::SurveyUse(Value::const_use_iterator U,
|
||||
DAE::Liveness DAE::SurveyUse(const Use *U,
|
||||
UseVector &MaybeLiveUses, unsigned RetValNum) {
|
||||
const User *V = *U;
|
||||
const User *V = U->getUser();
|
||||
if (const ReturnInst *RI = dyn_cast<ReturnInst>(V)) {
|
||||
// The value is returned from a function. It's only live when the
|
||||
// function's return value is live. We use RetValNum here, for the case
|
||||
|
@ -449,7 +448,7 @@ DAE::Liveness DAE::SurveyUse(Value::const_use_iterator U,
|
|||
return MarkIfNotLive(Use, MaybeLiveUses);
|
||||
}
|
||||
if (const InsertValueInst *IV = dyn_cast<InsertValueInst>(V)) {
|
||||
if (U.getOperandNo() != InsertValueInst::getAggregateOperandIndex()
|
||||
if (U->getOperandNo() != InsertValueInst::getAggregateOperandIndex()
|
||||
&& IV->hasIndices())
|
||||
// The use we are examining is inserted into an aggregate. Our liveness
|
||||
// depends on all uses of that aggregate, but if it is used as a return
|
||||
|
@ -460,9 +459,8 @@ DAE::Liveness DAE::SurveyUse(Value::const_use_iterator U,
|
|||
// we don't change RetValNum, but do survey all our uses.
|
||||
|
||||
Liveness Result = MaybeLive;
|
||||
for (Value::const_use_iterator I = IV->use_begin(),
|
||||
E = V->use_end(); I != E; ++I) {
|
||||
Result = SurveyUse(I, MaybeLiveUses, RetValNum);
|
||||
for (const Use &UU : IV->uses()) {
|
||||
Result = SurveyUse(&UU, MaybeLiveUses, RetValNum);
|
||||
if (Result == Live)
|
||||
break;
|
||||
}
|
||||
|
@ -485,7 +483,7 @@ DAE::Liveness DAE::SurveyUse(Value::const_use_iterator U,
|
|||
return Live;
|
||||
|
||||
assert(CS.getArgument(ArgNo)
|
||||
== CS->getOperand(U.getOperandNo())
|
||||
== CS->getOperand(U->getOperandNo())
|
||||
&& "Argument is not where we expected it");
|
||||
|
||||
// Value passed to a normal call. It's only live when the corresponding
|
||||
|
@ -508,9 +506,8 @@ DAE::Liveness DAE::SurveyUses(const Value *V, UseVector &MaybeLiveUses) {
|
|||
// Assume it's dead (which will only hold if there are no uses at all..).
|
||||
Liveness Result = MaybeLive;
|
||||
// Check each use.
|
||||
for (Value::const_use_iterator I = V->use_begin(),
|
||||
E = V->use_end(); I != E; ++I) {
|
||||
Result = SurveyUse(I, MaybeLiveUses);
|
||||
for (const Use &U : V->uses()) {
|
||||
Result = SurveyUse(&U, MaybeLiveUses);
|
||||
if (Result == Live)
|
||||
break;
|
||||
}
|
||||
|
@ -564,12 +561,11 @@ void DAE::SurveyFunction(const Function &F) {
|
|||
unsigned NumLiveRetVals = 0;
|
||||
Type *STy = dyn_cast<StructType>(F.getReturnType());
|
||||
// Loop all uses of the function.
|
||||
for (Value::const_use_iterator I = F.use_begin(), E = F.use_end();
|
||||
I != E; ++I) {
|
||||
for (const Use &U : F.uses()) {
|
||||
// If the function is PASSED IN as an argument, its address has been
|
||||
// taken.
|
||||
ImmutableCallSite CS(*I);
|
||||
if (!CS || !CS.isCallee(I)) {
|
||||
ImmutableCallSite CS(U.getUser());
|
||||
if (!CS || !CS.isCallee(&U)) {
|
||||
MarkLive(F);
|
||||
return;
|
||||
}
|
||||
|
@ -588,9 +584,8 @@ void DAE::SurveyFunction(const Function &F) {
|
|||
if (NumLiveRetVals != RetCount) {
|
||||
if (STy) {
|
||||
// Check all uses of the return value.
|
||||
for (Value::const_use_iterator I = TheCall->use_begin(),
|
||||
E = TheCall->use_end(); I != E; ++I) {
|
||||
const ExtractValueInst *Ext = dyn_cast<ExtractValueInst>(*I);
|
||||
for (const User *U : TheCall->users()) {
|
||||
const ExtractValueInst *Ext = dyn_cast<ExtractValueInst>(U);
|
||||
if (Ext && Ext->hasIndices()) {
|
||||
// This use uses a part of our return value, survey the uses of
|
||||
// that part and store the results for this index only.
|
||||
|
@ -893,7 +888,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
|
|||
//
|
||||
std::vector<Value*> Args;
|
||||
while (!F->use_empty()) {
|
||||
CallSite CS(F->use_back());
|
||||
CallSite CS(F->user_back());
|
||||
Instruction *Call = CS.getInstruction();
|
||||
|
||||
AttributesVec.clear();
|
||||
|
|
|
@ -421,14 +421,12 @@ determinePointerReadAttrs(Argument *A,
|
|||
bool IsRead = false;
|
||||
// We don't need to track IsWritten. If A is written to, return immediately.
|
||||
|
||||
for (Value::use_iterator UI = A->use_begin(), UE = A->use_end();
|
||||
UI != UE; ++UI) {
|
||||
for (Use &U : A->uses()) {
|
||||
if (Count++ >= 20)
|
||||
return Attribute::None;
|
||||
|
||||
Use *U = &UI.getUse();
|
||||
Visited.insert(U);
|
||||
Worklist.push_back(U);
|
||||
Visited.insert(&U);
|
||||
Worklist.push_back(&U);
|
||||
}
|
||||
|
||||
while (!Worklist.empty()) {
|
||||
|
@ -443,12 +441,9 @@ determinePointerReadAttrs(Argument *A,
|
|||
case Instruction::Select:
|
||||
case Instruction::AddrSpaceCast:
|
||||
// The original value is not read/written via this if the new value isn't.
|
||||
for (Instruction::use_iterator UI = I->use_begin(), UE = I->use_end();
|
||||
UI != UE; ++UI) {
|
||||
Use *U = &UI.getUse();
|
||||
if (Visited.insert(U))
|
||||
Worklist.push_back(U);
|
||||
}
|
||||
for (Use &UU : I->uses())
|
||||
if (Visited.insert(&UU))
|
||||
Worklist.push_back(&UU);
|
||||
break;
|
||||
|
||||
case Instruction::Call:
|
||||
|
|
|
@ -196,7 +196,7 @@ static bool CleanupPointerRootUsers(GlobalVariable *GV,
|
|||
SmallVector<std::pair<Instruction *, Instruction *>, 32> Dead;
|
||||
|
||||
// Constants can't be pointers to dynamically allocated memory.
|
||||
for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end();
|
||||
for (Value::user_iterator UI = GV->user_begin(), E = GV->user_end();
|
||||
UI != E;) {
|
||||
User *U = *UI++;
|
||||
if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
|
||||
|
@ -273,7 +273,7 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
|
|||
// we delete a constant array, we may also be holding pointer to one of its
|
||||
// elements (or an element of one of its elements if we're dealing with an
|
||||
// array of arrays) in the worklist.
|
||||
SmallVector<WeakVH, 8> WorkList(V->use_begin(), V->use_end());
|
||||
SmallVector<WeakVH, 8> WorkList(V->user_begin(), V->user_end());
|
||||
while (!WorkList.empty()) {
|
||||
Value *UV = WorkList.pop_back_val();
|
||||
if (!UV)
|
||||
|
@ -376,9 +376,8 @@ static bool isSafeSROAElementUse(Value *V) {
|
|||
!cast<Constant>(GEPI->getOperand(1))->isNullValue())
|
||||
return false;
|
||||
|
||||
for (Value::use_iterator I = GEPI->use_begin(), E = GEPI->use_end();
|
||||
I != E; ++I)
|
||||
if (!isSafeSROAElementUse(*I))
|
||||
for (User *U : GEPI->users())
|
||||
if (!isSafeSROAElementUse(U))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
@ -444,9 +443,10 @@ static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) {
|
|||
}
|
||||
}
|
||||
|
||||
for (Value::use_iterator I = U->use_begin(), E = U->use_end(); I != E; ++I)
|
||||
if (!isSafeSROAElementUse(*I))
|
||||
for (User *UU : U->users())
|
||||
if (!isSafeSROAElementUse(UU))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -454,11 +454,10 @@ static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) {
|
|||
/// is safe for us to perform this transformation.
|
||||
///
|
||||
static bool GlobalUsersSafeToSRA(GlobalValue *GV) {
|
||||
for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end();
|
||||
UI != E; ++UI) {
|
||||
if (!IsUserOfGlobalSafeForSRA(*UI, GV))
|
||||
for (User *U : GV->users())
|
||||
if (!IsUserOfGlobalSafeForSRA(U, GV))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -551,7 +550,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) {
|
|||
// Loop over all of the uses of the global, replacing the constantexpr geps,
|
||||
// with smaller constantexpr geps or direct references.
|
||||
while (!GV->use_empty()) {
|
||||
User *GEP = GV->use_back();
|
||||
User *GEP = GV->user_back();
|
||||
assert(((isa<ConstantExpr>(GEP) &&
|
||||
cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)||
|
||||
isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!");
|
||||
|
@ -612,10 +611,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) {
|
|||
/// phi nodes we've seen to avoid reprocessing them.
|
||||
static bool AllUsesOfValueWillTrapIfNull(const Value *V,
|
||||
SmallPtrSet<const PHINode*, 8> &PHIs) {
|
||||
for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;
|
||||
++UI) {
|
||||
const User *U = *UI;
|
||||
|
||||
for (const User *U : V->users())
|
||||
if (isa<LoadInst>(U)) {
|
||||
// Will trap.
|
||||
} else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
|
||||
|
@ -643,13 +639,13 @@ static bool AllUsesOfValueWillTrapIfNull(const Value *V,
|
|||
if (PHIs.insert(PN) && !AllUsesOfValueWillTrapIfNull(PN, PHIs))
|
||||
return false;
|
||||
} else if (isa<ICmpInst>(U) &&
|
||||
isa<ConstantPointerNull>(UI->getOperand(1))) {
|
||||
isa<ConstantPointerNull>(U->getOperand(1))) {
|
||||
// Ignore icmp X, null
|
||||
} else {
|
||||
//cerr << "NONTRAPPING USE: " << *U;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -657,10 +653,7 @@ static bool AllUsesOfValueWillTrapIfNull(const Value *V,
|
|||
/// from GV will trap if the loaded value is null. Note that this also permits
|
||||
/// comparisons of the loaded value against null, as a special case.
|
||||
static bool AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) {
|
||||
for (Value::const_use_iterator UI = GV->use_begin(), E = GV->use_end();
|
||||
UI != E; ++UI) {
|
||||
const User *U = *UI;
|
||||
|
||||
for (const User *U : GV->users())
|
||||
if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
|
||||
SmallPtrSet<const PHINode*, 8> PHIs;
|
||||
if (!AllUsesOfValueWillTrapIfNull(LI, PHIs))
|
||||
|
@ -672,13 +665,12 @@ static bool AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) {
|
|||
//cerr << "UNKNOWN USER OF GLOBAL!: " << *U;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) {
|
||||
bool Changed = false;
|
||||
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ) {
|
||||
for (auto UI = V->user_begin(), E = V->user_end(); UI != E; ) {
|
||||
Instruction *I = cast<Instruction>(*UI++);
|
||||
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
|
||||
LI->setOperand(0, NewV);
|
||||
|
@ -704,7 +696,7 @@ static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) {
|
|||
|
||||
if (PassedAsArg) {
|
||||
// Being passed as an argument also. Be careful to not invalidate UI!
|
||||
UI = V->use_begin();
|
||||
UI = V->user_begin();
|
||||
}
|
||||
}
|
||||
} else if (CastInst *CI = dyn_cast<CastInst>(I)) {
|
||||
|
@ -753,7 +745,7 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
|
|||
bool AllNonStoreUsesGone = true;
|
||||
|
||||
// Replace all uses of loads with uses of uses of the stored value.
|
||||
for (Value::use_iterator GUI = GV->use_begin(), E = GV->use_end(); GUI != E;){
|
||||
for (Value::user_iterator GUI = GV->user_begin(), E = GV->user_end(); GUI != E;){
|
||||
User *GlobalUser = *GUI++;
|
||||
if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) {
|
||||
Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV);
|
||||
|
@ -809,7 +801,7 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
|
|||
/// instructions that are foldable.
|
||||
static void ConstantPropUsersOf(Value *V, const DataLayout *DL,
|
||||
TargetLibraryInfo *TLI) {
|
||||
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; )
|
||||
for (Value::user_iterator UI = V->user_begin(), E = V->user_end(); UI != E; )
|
||||
if (Instruction *I = dyn_cast<Instruction>(*UI++))
|
||||
if (Constant *NewC = ConstantFoldInstruction(I, DL, TLI)) {
|
||||
I->replaceAllUsesWith(NewC);
|
||||
|
@ -857,7 +849,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
|
|||
// other users to use the global as well.
|
||||
BitCastInst *TheBC = 0;
|
||||
while (!CI->use_empty()) {
|
||||
Instruction *User = cast<Instruction>(CI->use_back());
|
||||
Instruction *User = cast<Instruction>(CI->user_back());
|
||||
if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) {
|
||||
if (BCI->getType() == NewGV->getType()) {
|
||||
BCI->replaceAllUsesWith(NewGV);
|
||||
|
@ -888,7 +880,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
|
|||
|
||||
// Loop over all uses of GV, processing them in turn.
|
||||
while (!GV->use_empty()) {
|
||||
if (StoreInst *SI = dyn_cast<StoreInst>(GV->use_back())) {
|
||||
if (StoreInst *SI = dyn_cast<StoreInst>(GV->user_back())) {
|
||||
// The global is initialized when the store to it occurs.
|
||||
new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, false, 0,
|
||||
SI->getOrdering(), SI->getSynchScope(), SI);
|
||||
|
@ -896,15 +888,15 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
|
|||
continue;
|
||||
}
|
||||
|
||||
LoadInst *LI = cast<LoadInst>(GV->use_back());
|
||||
LoadInst *LI = cast<LoadInst>(GV->user_back());
|
||||
while (!LI->use_empty()) {
|
||||
Use &LoadUse = LI->use_begin().getUse();
|
||||
if (!isa<ICmpInst>(LoadUse.getUser())) {
|
||||
Use &LoadUse = *LI->use_begin();
|
||||
ICmpInst *ICI = dyn_cast<ICmpInst>(LoadUse.getUser());
|
||||
if (!ICI) {
|
||||
LoadUse = RepValue;
|
||||
continue;
|
||||
}
|
||||
|
||||
ICmpInst *ICI = cast<ICmpInst>(LoadUse.getUser());
|
||||
// Replace the cmp X, 0 with a use of the bool value.
|
||||
// Sink the load to where the compare was, if atomic rules allow us to.
|
||||
Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", false, 0,
|
||||
|
@ -938,7 +930,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
|
|||
// If the initialization boolean was used, insert it, otherwise delete it.
|
||||
if (!InitBoolUsed) {
|
||||
while (!InitBool->use_empty()) // Delete initializations
|
||||
cast<StoreInst>(InitBool->use_back())->eraseFromParent();
|
||||
cast<StoreInst>(InitBool->user_back())->eraseFromParent();
|
||||
delete InitBool;
|
||||
} else
|
||||
GV->getParent()->getGlobalList().insert(GV, InitBool);
|
||||
|
@ -964,9 +956,8 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
|
|||
static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V,
|
||||
const GlobalVariable *GV,
|
||||
SmallPtrSet<const PHINode*, 8> &PHIs) {
|
||||
for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end();
|
||||
UI != E; ++UI) {
|
||||
const Instruction *Inst = cast<Instruction>(*UI);
|
||||
for (const User *U : V->users()) {
|
||||
const Instruction *Inst = cast<Instruction>(U);
|
||||
|
||||
if (isa<LoadInst>(Inst) || isa<CmpInst>(Inst)) {
|
||||
continue; // Fine, ignore.
|
||||
|
@ -1013,7 +1004,7 @@ static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V,
|
|||
static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc,
|
||||
GlobalVariable *GV) {
|
||||
while (!Alloc->use_empty()) {
|
||||
Instruction *U = cast<Instruction>(*Alloc->use_begin());
|
||||
Instruction *U = cast<Instruction>(*Alloc->user_begin());
|
||||
Instruction *InsertPt = U;
|
||||
if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
|
||||
// If this is the store of the allocation into the global, remove it.
|
||||
|
@ -1024,7 +1015,7 @@ static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc,
|
|||
} else if (PHINode *PN = dyn_cast<PHINode>(U)) {
|
||||
// Insert the load in the corresponding predecessor, not right before the
|
||||
// PHI.
|
||||
InsertPt = PN->getIncomingBlock(Alloc->use_begin())->getTerminator();
|
||||
InsertPt = PN->getIncomingBlock(*Alloc->use_begin())->getTerminator();
|
||||
} else if (isa<BitCastInst>(U)) {
|
||||
// Must be bitcast between the malloc and store to initialize the global.
|
||||
ReplaceUsesOfMallocWithGlobal(U, GV);
|
||||
|
@ -1034,7 +1025,7 @@ static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc,
|
|||
// If this is a "GEP bitcast" and the user is a store to the global, then
|
||||
// just process it as a bitcast.
|
||||
if (GEPI->hasAllZeroIndices() && GEPI->hasOneUse())
|
||||
if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->use_back()))
|
||||
if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->user_back()))
|
||||
if (SI->getOperand(1) == GV) {
|
||||
// Must be bitcast GEP between the malloc and store to initialize
|
||||
// the global.
|
||||
|
@ -1058,19 +1049,18 @@ static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V,
|
|||
SmallPtrSet<const PHINode*, 32> &LoadUsingPHIsPerLoad) {
|
||||
// We permit two users of the load: setcc comparing against the null
|
||||
// pointer, and a getelementptr of a specific form.
|
||||
for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;
|
||||
++UI) {
|
||||
const Instruction *User = cast<Instruction>(*UI);
|
||||
for (const User *U : V->users()) {
|
||||
const Instruction *UI = cast<Instruction>(U);
|
||||
|
||||
// Comparison against null is ok.
|
||||
if (const ICmpInst *ICI = dyn_cast<ICmpInst>(User)) {
|
||||
if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UI)) {
|
||||
if (!isa<ConstantPointerNull>(ICI->getOperand(1)))
|
||||
return false;
|
||||
continue;
|
||||
}
|
||||
|
||||
// getelementptr is also ok, but only a simple form.
|
||||
if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) {
|
||||
if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(UI)) {
|
||||
// Must index into the array and into the struct.
|
||||
if (GEPI->getNumOperands() < 3)
|
||||
return false;
|
||||
|
@ -1079,7 +1069,7 @@ static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V,
|
|||
continue;
|
||||
}
|
||||
|
||||
if (const PHINode *PN = dyn_cast<PHINode>(User)) {
|
||||
if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
|
||||
if (!LoadUsingPHIsPerLoad.insert(PN))
|
||||
// This means some phi nodes are dependent on each other.
|
||||
// Avoid infinite looping!
|
||||
|
@ -1110,9 +1100,8 @@ static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable *GV,
|
|||
Instruction *StoredVal) {
|
||||
SmallPtrSet<const PHINode*, 32> LoadUsingPHIs;
|
||||
SmallPtrSet<const PHINode*, 32> LoadUsingPHIsPerLoad;
|
||||
for (Value::const_use_iterator UI = GV->use_begin(), E = GV->use_end();
|
||||
UI != E; ++UI)
|
||||
if (const LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
|
||||
for (const User *U : GV->users())
|
||||
if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
|
||||
if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs,
|
||||
LoadUsingPHIsPerLoad))
|
||||
return false;
|
||||
|
@ -1251,7 +1240,7 @@ static void RewriteHeapSROALoadUser(Instruction *LoadUser,
|
|||
|
||||
// If this is the first time we've seen this PHI, recursively process all
|
||||
// users.
|
||||
for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end(); UI != E; ) {
|
||||
for (auto UI = PN->user_begin(), E = PN->user_end(); UI != E;) {
|
||||
Instruction *User = cast<Instruction>(*UI++);
|
||||
RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite);
|
||||
}
|
||||
|
@ -1264,8 +1253,7 @@ static void RewriteHeapSROALoadUser(Instruction *LoadUser,
|
|||
static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
|
||||
DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
|
||||
std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) {
|
||||
for (Value::use_iterator UI = Load->use_begin(), E = Load->use_end();
|
||||
UI != E; ) {
|
||||
for (auto UI = Load->user_begin(), E = Load->user_end(); UI != E;) {
|
||||
Instruction *User = cast<Instruction>(*UI++);
|
||||
RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite);
|
||||
}
|
||||
|
@ -1396,7 +1384,7 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
|
|||
// Okay, the malloc site is completely handled. All of the uses of GV are now
|
||||
// loads, and all uses of those loads are simple. Rewrite them to use loads
|
||||
// of the per-field globals instead.
|
||||
for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); UI != E;) {
|
||||
for (auto UI = GV->user_begin(), E = GV->user_end(); UI != E;) {
|
||||
Instruction *User = cast<Instruction>(*UI++);
|
||||
|
||||
if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
|
||||
|
@ -1619,11 +1607,9 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
|
|||
|
||||
// Walk the use list of the global seeing if all the uses are load or store.
|
||||
// If there is anything else, bail out.
|
||||
for (Value::use_iterator I = GV->use_begin(), E = GV->use_end(); I != E; ++I){
|
||||
User *U = *I;
|
||||
for (User *U : GV->users())
|
||||
if (!isa<LoadInst>(U) && !isa<StoreInst>(U))
|
||||
return false;
|
||||
}
|
||||
|
||||
DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV);
|
||||
|
||||
|
@ -1648,7 +1634,7 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
|
|||
IsOneZero = InitVal->isNullValue() && CI->isOne();
|
||||
|
||||
while (!GV->use_empty()) {
|
||||
Instruction *UI = cast<Instruction>(GV->use_back());
|
||||
Instruction *UI = cast<Instruction>(GV->user_back());
|
||||
if (StoreInst *SI = dyn_cast<StoreInst>(UI)) {
|
||||
// Change the store into a boolean store.
|
||||
bool StoringOther = SI->getOperand(0) == OtherVal;
|
||||
|
@ -1871,11 +1857,11 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
|
|||
/// ChangeCalleesToFastCall - Walk all of the direct calls of the specified
|
||||
/// function, changing them to FastCC.
|
||||
static void ChangeCalleesToFastCall(Function *F) {
|
||||
for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){
|
||||
if (isa<BlockAddress>(*UI))
|
||||
for (User *U : F->users()) {
|
||||
if (isa<BlockAddress>(U))
|
||||
continue;
|
||||
CallSite User(cast<Instruction>(*UI));
|
||||
User.setCallingConv(CallingConv::Fast);
|
||||
CallSite CS(cast<Instruction>(U));
|
||||
CS.setCallingConv(CallingConv::Fast);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1894,11 +1880,11 @@ static AttributeSet StripNest(LLVMContext &C, const AttributeSet &Attrs) {
|
|||
|
||||
static void RemoveNestAttribute(Function *F) {
|
||||
F->setAttributes(StripNest(F->getContext(), F->getAttributes()));
|
||||
for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){
|
||||
if (isa<BlockAddress>(*UI))
|
||||
for (User *U : F->users()) {
|
||||
if (isa<BlockAddress>(U))
|
||||
continue;
|
||||
CallSite User(cast<Instruction>(*UI));
|
||||
User.setAttributes(StripNest(F->getContext(), User.getAttributes()));
|
||||
CallSite CS(cast<Instruction>(U));
|
||||
CS.setAttributes(StripNest(F->getContext(), CS.getAttributes()));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3140,8 +3126,8 @@ bool GlobalOpt::OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) {
|
|||
// and remove them.
|
||||
bool Changed = false;
|
||||
|
||||
for (Function::use_iterator I = CXAAtExitFn->use_begin(),
|
||||
E = CXAAtExitFn->use_end(); I != E;) {
|
||||
for (auto I = CXAAtExitFn->user_begin(), E = CXAAtExitFn->user_end();
|
||||
I != E;) {
|
||||
// We're only interested in calls. Theoretically, we could handle invoke
|
||||
// instructions as well, but neither llvm-gcc nor clang generate invokes
|
||||
// to __cxa_atexit.
|
||||
|
|
|
@ -86,18 +86,18 @@ bool IPCP::PropagateConstantsIntoArguments(Function &F) {
|
|||
ArgumentConstants.resize(F.arg_size());
|
||||
|
||||
unsigned NumNonconstant = 0;
|
||||
for (Value::use_iterator UI = F.use_begin(), E = F.use_end(); UI != E; ++UI) {
|
||||
User *U = *UI;
|
||||
for (Use &U : F.uses()) {
|
||||
User *UR = U.getUser();
|
||||
// Ignore blockaddress uses.
|
||||
if (isa<BlockAddress>(U)) continue;
|
||||
if (isa<BlockAddress>(UR)) continue;
|
||||
|
||||
// Used by a non-instruction, or not the callee of a function, do not
|
||||
// transform.
|
||||
if (!isa<CallInst>(U) && !isa<InvokeInst>(U))
|
||||
if (!isa<CallInst>(UR) && !isa<InvokeInst>(UR))
|
||||
return false;
|
||||
|
||||
CallSite CS(cast<Instruction>(U));
|
||||
if (!CS.isCallee(UI))
|
||||
CallSite CS(cast<Instruction>(UR));
|
||||
if (!CS.isCallee(&U))
|
||||
return false;
|
||||
|
||||
// Check out all of the potentially constant arguments. Note that we don't
|
||||
|
@ -220,13 +220,13 @@ bool IPCP::PropagateConstantReturn(Function &F) {
|
|||
// over all users, replacing any uses of the return value with the returned
|
||||
// constant.
|
||||
bool MadeChange = false;
|
||||
for (Value::use_iterator UI = F.use_begin(), E = F.use_end(); UI != E; ++UI) {
|
||||
CallSite CS(*UI);
|
||||
for (Use &U : F.uses()) {
|
||||
CallSite CS(U.getUser());
|
||||
Instruction* Call = CS.getInstruction();
|
||||
|
||||
// Not a call instruction or a call instruction that's not calling F
|
||||
// directly?
|
||||
if (!Call || !CS.isCallee(UI))
|
||||
if (!Call || !CS.isCallee(&U))
|
||||
continue;
|
||||
|
||||
// Call result not used?
|
||||
|
@ -244,9 +244,8 @@ bool IPCP::PropagateConstantReturn(Function &F) {
|
|||
Call->replaceAllUsesWith(New);
|
||||
continue;
|
||||
}
|
||||
|
||||
for (Value::use_iterator I = Call->use_begin(), E = Call->use_end();
|
||||
I != E;) {
|
||||
|
||||
for (auto I = Call->user_begin(), E = Call->user_end(); I != E;) {
|
||||
Instruction *Ins = cast<Instruction>(*I);
|
||||
|
||||
// Increment now, so we can remove the use
|
||||
|
|
|
@ -344,9 +344,8 @@ bool Inliner::shouldInline(CallSite CS) {
|
|||
bool callerWillBeRemoved = Caller->hasLocalLinkage();
|
||||
// This bool tracks what happens if we DO inline C into B.
|
||||
bool inliningPreventsSomeOuterInline = false;
|
||||
for (Value::use_iterator I = Caller->use_begin(), E =Caller->use_end();
|
||||
I != E; ++I) {
|
||||
CallSite CS2(*I);
|
||||
for (User *U : Caller->users()) {
|
||||
CallSite CS2(U);
|
||||
|
||||
// If this isn't a call to Caller (it could be some other sort
|
||||
// of reference) skip it. Such references will prevent the caller
|
||||
|
@ -377,7 +376,7 @@ bool Inliner::shouldInline(CallSite CS) {
|
|||
// one is set very low by getInlineCost, in anticipation that Caller will
|
||||
// be removed entirely. We did not account for this above unless there
|
||||
// is only one caller of Caller.
|
||||
if (callerWillBeRemoved && Caller->use_begin() != Caller->use_end())
|
||||
if (callerWillBeRemoved && !Caller->use_empty())
|
||||
TotalSecondaryCost += InlineConstants::LastCallToStaticBonus;
|
||||
|
||||
if (inliningPreventsSomeOuterInline && TotalSecondaryCost < IC.getCost()) {
|
||||
|
|
|
@ -697,14 +697,13 @@ bool DenseMapInfo<ComparableFunction>::isEqual(const ComparableFunction &LHS,
|
|||
// Replace direct callers of Old with New.
|
||||
void MergeFunctions::replaceDirectCallers(Function *Old, Function *New) {
|
||||
Constant *BitcastNew = ConstantExpr::getBitCast(New, Old->getType());
|
||||
for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
|
||||
UI != UE;) {
|
||||
Value::use_iterator TheIter = UI;
|
||||
for (auto UI = Old->use_begin(), UE = Old->use_end(); UI != UE;) {
|
||||
Use *U = &*UI;
|
||||
++UI;
|
||||
CallSite CS(*TheIter);
|
||||
if (CS && CS.isCallee(TheIter)) {
|
||||
CallSite CS(U->getUser());
|
||||
if (CS && CS.isCallee(U)) {
|
||||
remove(CS.getInstruction()->getParent()->getParent());
|
||||
TheIter.getUse().set(BitcastNew);
|
||||
U->set(BitcastNew);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -895,17 +894,14 @@ void MergeFunctions::removeUsers(Value *V) {
|
|||
Value *V = Worklist.back();
|
||||
Worklist.pop_back();
|
||||
|
||||
for (Value::use_iterator UI = V->use_begin(), UE = V->use_end();
|
||||
UI != UE; ++UI) {
|
||||
Use &U = UI.getUse();
|
||||
if (Instruction *I = dyn_cast<Instruction>(U.getUser())) {
|
||||
for (User *U : V->users()) {
|
||||
if (Instruction *I = dyn_cast<Instruction>(U)) {
|
||||
remove(I->getParent()->getParent());
|
||||
} else if (isa<GlobalValue>(U.getUser())) {
|
||||
} else if (isa<GlobalValue>(U)) {
|
||||
// do nothing
|
||||
} else if (Constant *C = dyn_cast<Constant>(U.getUser())) {
|
||||
for (Value::use_iterator CUI = C->use_begin(), CUE = C->use_end();
|
||||
CUI != CUE; ++CUI)
|
||||
Worklist.push_back(*CUI);
|
||||
} else if (Constant *C = dyn_cast<Constant>(U)) {
|
||||
for (User *UU : C->users())
|
||||
Worklist.push_back(UU);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -128,8 +128,8 @@ Function* PartialInliner::unswitchFunction(Function* F) {
|
|||
InlineFunctionInfo IFI;
|
||||
|
||||
// Inline the top-level if test into all callers.
|
||||
std::vector<User*> Users(duplicateFunction->use_begin(),
|
||||
duplicateFunction->use_end());
|
||||
std::vector<User *> Users(duplicateFunction->user_begin(),
|
||||
duplicateFunction->user_end());
|
||||
for (std::vector<User*>::iterator UI = Users.begin(), UE = Users.end();
|
||||
UI != UE; ++UI)
|
||||
if (CallInst *CI = dyn_cast<CallInst>(*UI))
|
||||
|
@ -162,9 +162,8 @@ bool PartialInliner::runOnModule(Module& M) {
|
|||
if (currFunc->use_empty()) continue;
|
||||
|
||||
bool recursive = false;
|
||||
for (Function::use_iterator UI = currFunc->use_begin(),
|
||||
UE = currFunc->use_end(); UI != UE; ++UI)
|
||||
if (Instruction* I = dyn_cast<Instruction>(*UI))
|
||||
for (User *U : currFunc->users())
|
||||
if (Instruction* I = dyn_cast<Instruction>(U))
|
||||
if (I->getParent()->getParent() == currFunc) {
|
||||
recursive = true;
|
||||
break;
|
||||
|
|
|
@ -132,11 +132,10 @@ ModulePass *llvm::createStripDeadDebugInfoPass() {
|
|||
|
||||
/// OnlyUsedBy - Return true if V is only used by Usr.
|
||||
static bool OnlyUsedBy(Value *V, Value *Usr) {
|
||||
for(Value::use_iterator I = V->use_begin(), E = V->use_end(); I != E; ++I) {
|
||||
User *U = *I;
|
||||
for (User *U : V->users())
|
||||
if (U != Usr)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -250,7 +249,7 @@ bool StripDebugDeclare::runOnModule(Module &M) {
|
|||
|
||||
if (Declare) {
|
||||
while (!Declare->use_empty()) {
|
||||
CallInst *CI = cast<CallInst>(Declare->use_back());
|
||||
CallInst *CI = cast<CallInst>(Declare->user_back());
|
||||
Value *Arg1 = CI->getArgOperand(0);
|
||||
Value *Arg2 = CI->getArgOperand(1);
|
||||
assert(CI->use_empty() && "llvm.dbg intrinsic should have void result");
|
||||
|
|
|
@ -788,15 +788,14 @@ static IntrinsicInst *FindInitTrampolineFromAlloca(Value *TrampMem) {
|
|||
// is good enough in practice and simpler than handling any number of casts.
|
||||
Value *Underlying = TrampMem->stripPointerCasts();
|
||||
if (Underlying != TrampMem &&
|
||||
(!Underlying->hasOneUse() || *Underlying->use_begin() != TrampMem))
|
||||
(!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
|
||||
return 0;
|
||||
if (!isa<AllocaInst>(Underlying))
|
||||
return 0;
|
||||
|
||||
IntrinsicInst *InitTrampoline = 0;
|
||||
for (Value::use_iterator I = TrampMem->use_begin(), E = TrampMem->use_end();
|
||||
I != E; I++) {
|
||||
IntrinsicInst *II = dyn_cast<IntrinsicInst>(*I);
|
||||
for (User *U : TrampMem->users()) {
|
||||
IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
|
||||
if (!II)
|
||||
return 0;
|
||||
if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
|
||||
|
@ -1010,9 +1009,8 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
|
|||
// the critical edge). Bail out in this case.
|
||||
if (!Caller->use_empty())
|
||||
if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
|
||||
for (Value::use_iterator UI = II->use_begin(), E = II->use_end();
|
||||
UI != E; ++UI)
|
||||
if (PHINode *PN = dyn_cast<PHINode>(*UI))
|
||||
for (User *U : II->users())
|
||||
if (PHINode *PN = dyn_cast<PHINode>(U))
|
||||
if (PN->getParent() == II->getNormalDest() ||
|
||||
PN->getParent() == II->getUnwindDest())
|
||||
return false;
|
||||
|
|
|
@ -757,7 +757,7 @@ static bool CanEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear) {
|
|||
Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
|
||||
// If this zero extend is only used by a truncate, let the truncate be
|
||||
// eliminated before we try to optimize this zext.
|
||||
if (CI.hasOneUse() && isa<TruncInst>(CI.use_back()))
|
||||
if (CI.hasOneUse() && isa<TruncInst>(CI.user_back()))
|
||||
return 0;
|
||||
|
||||
// If one of the common conversion will work, do it.
|
||||
|
@ -1038,7 +1038,7 @@ static bool CanEvaluateSExtd(Value *V, Type *Ty) {
|
|||
Instruction *InstCombiner::visitSExt(SExtInst &CI) {
|
||||
// If this sign extend is only used by a truncate, let the truncate be
|
||||
// eliminated before we try to optimize this sext.
|
||||
if (CI.hasOneUse() && isa<TruncInst>(CI.use_back()))
|
||||
if (CI.hasOneUse() && isa<TruncInst>(CI.user_back()))
|
||||
return 0;
|
||||
|
||||
if (Instruction *I = commonCastTransforms(CI))
|
||||
|
|
|
@ -1937,16 +1937,15 @@ static Instruction *ProcessUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
|
|||
// and truncates that discard the high bits of the add. Verify that this is
|
||||
// the case.
|
||||
Instruction *OrigAdd = cast<Instruction>(AddWithCst->getOperand(0));
|
||||
for (Value::use_iterator UI = OrigAdd->use_begin(), E = OrigAdd->use_end();
|
||||
UI != E; ++UI) {
|
||||
if (*UI == AddWithCst) continue;
|
||||
for (User *U : OrigAdd->users()) {
|
||||
if (U == AddWithCst) continue;
|
||||
|
||||
// Only accept truncates for now. We would really like a nice recursive
|
||||
// predicate like SimplifyDemandedBits, but which goes downwards the use-def
|
||||
// chain to see which bits of a value are actually demanded. If the
|
||||
// original add had another add which was then immediately truncated, we
|
||||
// could still do the transformation.
|
||||
TruncInst *TI = dyn_cast<TruncInst>(*UI);
|
||||
TruncInst *TI = dyn_cast<TruncInst>(U);
|
||||
if (TI == 0 ||
|
||||
TI->getType()->getPrimitiveSizeInBits() > NewWidth) return 0;
|
||||
}
|
||||
|
@ -2068,8 +2067,8 @@ static bool swapMayExposeCSEOpportunities(const Value * Op0,
|
|||
// At the end, if the benefit is greater than 0, Op0 should come second to
|
||||
// expose more CSE opportunities.
|
||||
int GlobalSwapBenefits = 0;
|
||||
for (Value::const_use_iterator UI = Op0->use_begin(), UIEnd = Op0->use_end(); UI != UIEnd; ++UI) {
|
||||
const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(*UI);
|
||||
for (const User *U : Op0->users()) {
|
||||
const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(U);
|
||||
if (!BinOp || BinOp->getOpcode() != Instruction::Sub)
|
||||
continue;
|
||||
// If Op0 is the first argument, this is not beneficial to swap the
|
||||
|
@ -2468,7 +2467,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
|
|||
// operands has at least one user besides the compare (the select),
|
||||
// which would often largely negate the benefit of folding anyway.
|
||||
if (I.hasOneUse())
|
||||
if (SelectInst *SI = dyn_cast<SelectInst>(*I.use_begin()))
|
||||
if (SelectInst *SI = dyn_cast<SelectInst>(*I.user_begin()))
|
||||
if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) ||
|
||||
(SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1))
|
||||
return 0;
|
||||
|
|
|
@ -51,22 +51,22 @@ isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
|
|||
// ahead and replace the value with the global, this lets the caller quickly
|
||||
// eliminate the markers.
|
||||
|
||||
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
|
||||
User *U = cast<Instruction>(*UI);
|
||||
for (Use &U : V->uses()) {
|
||||
Instruction *I = cast<Instruction>(U.getUser());
|
||||
|
||||
if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
|
||||
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
|
||||
// Ignore non-volatile loads, they are always ok.
|
||||
if (!LI->isSimple()) return false;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
|
||||
if (BitCastInst *BCI = dyn_cast<BitCastInst>(I)) {
|
||||
// If uses of the bitcast are ok, we are ok.
|
||||
if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, ToDelete, IsOffset))
|
||||
return false;
|
||||
continue;
|
||||
}
|
||||
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
|
||||
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
|
||||
// If the GEP has all zero indices, it doesn't offset the pointer. If it
|
||||
// doesn't, it does.
|
||||
if (!isOnlyCopiedFromConstantGlobal(
|
||||
|
@ -75,14 +75,14 @@ isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
|
|||
continue;
|
||||
}
|
||||
|
||||
if (CallSite CS = U) {
|
||||
if (CallSite CS = I) {
|
||||
// If this is the function being called then we treat it like a load and
|
||||
// ignore it.
|
||||
if (CS.isCallee(UI))
|
||||
if (CS.isCallee(&U))
|
||||
continue;
|
||||
|
||||
// Inalloca arguments are clobbered by the call.
|
||||
unsigned ArgNo = CS.getArgumentNo(UI);
|
||||
unsigned ArgNo = CS.getArgumentNo(&U);
|
||||
if (CS.isInAllocaArgument(ArgNo))
|
||||
return false;
|
||||
|
||||
|
@ -100,7 +100,7 @@ isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
|
|||
}
|
||||
|
||||
// Lifetime intrinsics can be handled by the caller.
|
||||
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
|
||||
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
|
||||
if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
|
||||
II->getIntrinsicID() == Intrinsic::lifetime_end) {
|
||||
assert(II->use_empty() && "Lifetime markers have no result to use!");
|
||||
|
@ -111,13 +111,13 @@ isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
|
|||
|
||||
// If this is isn't our memcpy/memmove, reject it as something we can't
|
||||
// handle.
|
||||
MemTransferInst *MI = dyn_cast<MemTransferInst>(U);
|
||||
MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
|
||||
if (MI == 0)
|
||||
return false;
|
||||
|
||||
// If the transfer is using the alloca as a source of the transfer, then
|
||||
// ignore it since it is a load (unless the transfer is volatile).
|
||||
if (UI.getOperandNo() == 1) {
|
||||
if (U.getOperandNo() == 1) {
|
||||
if (MI->isVolatile()) return false;
|
||||
continue;
|
||||
}
|
||||
|
@ -130,7 +130,7 @@ isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
|
|||
if (IsOffset) return false;
|
||||
|
||||
// If the memintrinsic isn't using the alloca as the dest, reject it.
|
||||
if (UI.getOperandNo() != 0) return false;
|
||||
if (U.getOperandNo() != 0) return false;
|
||||
|
||||
// If the source of the memcpy/move is not a constant global, reject it.
|
||||
if (!pointsToConstantGlobal(MI->getSource()))
|
||||
|
|
|
@ -255,9 +255,7 @@ static bool isSafeAndProfitableToSinkLoad(LoadInst *L) {
|
|||
// profitable to do this xform.
|
||||
if (AllocaInst *AI = dyn_cast<AllocaInst>(L->getOperand(0))) {
|
||||
bool isAddressTaken = false;
|
||||
for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
|
||||
UI != E; ++UI) {
|
||||
User *U = *UI;
|
||||
for (User *U : AI->users()) {
|
||||
if (isa<LoadInst>(U)) continue;
|
||||
if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
|
||||
// If storing TO the alloca, then the address isn't taken.
|
||||
|
@ -518,7 +516,7 @@ static bool DeadPHICycle(PHINode *PN,
|
|||
if (PotentiallyDeadPHIs.size() == 16)
|
||||
return false;
|
||||
|
||||
if (PHINode *PU = dyn_cast<PHINode>(PN->use_back()))
|
||||
if (PHINode *PU = dyn_cast<PHINode>(PN->user_back()))
|
||||
return DeadPHICycle(PU, PotentiallyDeadPHIs);
|
||||
|
||||
return false;
|
||||
|
@ -649,32 +647,30 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
|
||||
for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end();
|
||||
UI != E; ++UI) {
|
||||
Instruction *User = cast<Instruction>(*UI);
|
||||
for (User *U : PN->users()) {
|
||||
Instruction *UserI = cast<Instruction>(U);
|
||||
|
||||
// If the user is a PHI, inspect its uses recursively.
|
||||
if (PHINode *UserPN = dyn_cast<PHINode>(User)) {
|
||||
if (PHINode *UserPN = dyn_cast<PHINode>(UserI)) {
|
||||
if (PHIsInspected.insert(UserPN))
|
||||
PHIsToSlice.push_back(UserPN);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Truncates are always ok.
|
||||
if (isa<TruncInst>(User)) {
|
||||
PHIUsers.push_back(PHIUsageRecord(PHIId, 0, User));
|
||||
if (isa<TruncInst>(UserI)) {
|
||||
PHIUsers.push_back(PHIUsageRecord(PHIId, 0, UserI));
|
||||
continue;
|
||||
}
|
||||
|
||||
// Otherwise it must be a lshr which can only be used by one trunc.
|
||||
if (User->getOpcode() != Instruction::LShr ||
|
||||
!User->hasOneUse() || !isa<TruncInst>(User->use_back()) ||
|
||||
!isa<ConstantInt>(User->getOperand(1)))
|
||||
if (UserI->getOpcode() != Instruction::LShr ||
|
||||
!UserI->hasOneUse() || !isa<TruncInst>(UserI->user_back()) ||
|
||||
!isa<ConstantInt>(UserI->getOperand(1)))
|
||||
return 0;
|
||||
|
||||
unsigned Shift = cast<ConstantInt>(User->getOperand(1))->getZExtValue();
|
||||
PHIUsers.push_back(PHIUsageRecord(PHIId, Shift, User->use_back()));
|
||||
unsigned Shift = cast<ConstantInt>(UserI->getOperand(1))->getZExtValue();
|
||||
PHIUsers.push_back(PHIUsageRecord(PHIId, Shift, UserI->user_back()));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -809,7 +805,7 @@ Instruction *InstCombiner::visitPHINode(PHINode &PN) {
|
|||
// this PHI only has a single use (a PHI), and if that PHI only has one use (a
|
||||
// PHI)... break the cycle.
|
||||
if (PN.hasOneUse()) {
|
||||
Instruction *PHIUser = cast<Instruction>(PN.use_back());
|
||||
Instruction *PHIUser = cast<Instruction>(PN.user_back());
|
||||
if (PHINode *PU = dyn_cast<PHINode>(PHIUser)) {
|
||||
SmallPtrSet<PHINode*, 16> PotentiallyDeadPHIs;
|
||||
PotentiallyDeadPHIs.insert(&PN);
|
||||
|
@ -825,7 +821,7 @@ Instruction *InstCombiner::visitPHINode(PHINode &PN) {
|
|||
// late.
|
||||
if (PHIUser->hasOneUse() &&
|
||||
(isa<BinaryOperator>(PHIUser) || isa<GetElementPtrInst>(PHIUser)) &&
|
||||
PHIUser->use_back() == &PN) {
|
||||
PHIUser->user_back() == &PN) {
|
||||
return ReplaceInstUsesWith(PN, UndefValue::get(PN.getType()));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -118,7 +118,7 @@ Instruction *InstCombiner::scalarizePHI(ExtractElementInst &EI, PHINode *PN) {
|
|||
// If so, it's known at this point that one operand is PHI and the other is
|
||||
// an extractelement node. Find the PHI user that is not the extractelement
|
||||
// node.
|
||||
Value::use_iterator iu = PN->use_begin();
|
||||
auto iu = PN->user_begin();
|
||||
Instruction *PHIUser = dyn_cast<Instruction>(*iu);
|
||||
if (PHIUser == cast<Instruction>(&EI))
|
||||
PHIUser = cast<Instruction>(*(++iu));
|
||||
|
@ -126,7 +126,7 @@ Instruction *InstCombiner::scalarizePHI(ExtractElementInst &EI, PHINode *PN) {
|
|||
// Verify that this PHI user has one use, which is the PHI itself,
|
||||
// and that it is a binary operation which is cheap to scalarize.
|
||||
// otherwise return NULL.
|
||||
if (!PHIUser->hasOneUse() || !(PHIUser->use_back() == PN) ||
|
||||
if (!PHIUser->hasOneUse() || !(PHIUser->user_back() == PN) ||
|
||||
!(isa<BinaryOperator>(PHIUser)) || !CheapToScalarize(PHIUser, true))
|
||||
return NULL;
|
||||
|
||||
|
@ -521,7 +521,7 @@ Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) {
|
|||
|
||||
// If this insertelement isn't used by some other insertelement, turn it
|
||||
// (and any insertelements it points to), into one big shuffle.
|
||||
if (!IE.hasOneUse() || !isa<InsertElementInst>(IE.use_back())) {
|
||||
if (!IE.hasOneUse() || !isa<InsertElementInst>(IE.user_back())) {
|
||||
SmallVector<Constant*, 16> Mask;
|
||||
ShuffleOps LR = CollectShuffleElements(&IE, Mask, 0);
|
||||
|
||||
|
|
|
@ -84,9 +84,8 @@ public:
|
|||
/// now.
|
||||
///
|
||||
void AddUsersToWorkList(Instruction &I) {
|
||||
for (Value::use_iterator UI = I.use_begin(), UE = I.use_end();
|
||||
UI != UE; ++UI)
|
||||
Add(cast<Instruction>(*UI));
|
||||
for (User *U : I.users())
|
||||
Add(cast<Instruction>(U));
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -641,10 +641,9 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
|
|||
// uses into the PHI.
|
||||
if (!PN->hasOneUse()) {
|
||||
// Walk the use list for the instruction, comparing them to I.
|
||||
for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end();
|
||||
UI != E; ++UI) {
|
||||
Instruction *User = cast<Instruction>(*UI);
|
||||
if (User != &I && !I.isIdenticalTo(User))
|
||||
for (User *U : PN->users()) {
|
||||
Instruction *UI = cast<Instruction>(U);
|
||||
if (UI != &I && !I.isIdenticalTo(UI))
|
||||
return 0;
|
||||
}
|
||||
// Otherwise, we can replace *all* users with the new PHI we form.
|
||||
|
@ -759,8 +758,7 @@ Instruction *InstCombiner::FoldOpIntoPhi(Instruction &I) {
|
|||
}
|
||||
}
|
||||
|
||||
for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end();
|
||||
UI != E; ) {
|
||||
for (auto UI = PN->user_begin(), E = PN->user_end(); UI != E;) {
|
||||
Instruction *User = cast<Instruction>(*UI++);
|
||||
if (User == &I) continue;
|
||||
ReplaceInstUsesWith(*User, NewPN);
|
||||
|
@ -1080,7 +1078,7 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
|
|||
|
||||
// Move up one level in the expression.
|
||||
assert(Ancestor->hasOneUse() && "Drilled down when more than one use!");
|
||||
Ancestor = Ancestor->use_back();
|
||||
Ancestor = Ancestor->user_back();
|
||||
} while (1);
|
||||
}
|
||||
|
||||
|
@ -1425,9 +1423,8 @@ isAllocSiteRemovable(Instruction *AI, SmallVectorImpl<WeakVH> &Users,
|
|||
|
||||
do {
|
||||
Instruction *PI = Worklist.pop_back_val();
|
||||
for (Value::use_iterator UI = PI->use_begin(), UE = PI->use_end(); UI != UE;
|
||||
++UI) {
|
||||
Instruction *I = cast<Instruction>(*UI);
|
||||
for (User *U : PI->users()) {
|
||||
Instruction *I = cast<Instruction>(U);
|
||||
switch (I->getOpcode()) {
|
||||
default:
|
||||
// Give up the moment we see something we can't handle.
|
||||
|
@ -2404,12 +2401,12 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
|
|||
// See if we can trivially sink this instruction to a successor basic block.
|
||||
if (I->hasOneUse()) {
|
||||
BasicBlock *BB = I->getParent();
|
||||
Instruction *UserInst = cast<Instruction>(I->use_back());
|
||||
Instruction *UserInst = cast<Instruction>(*I->user_begin());
|
||||
BasicBlock *UserParent;
|
||||
|
||||
// Get the block the use occurs in.
|
||||
if (PHINode *PN = dyn_cast<PHINode>(UserInst))
|
||||
UserParent = PN->getIncomingBlock(I->use_begin().getUse());
|
||||
UserParent = PN->getIncomingBlock(*I->use_begin());
|
||||
else
|
||||
UserParent = UserInst->getParent();
|
||||
|
||||
|
|
|
@ -607,10 +607,10 @@ bool DataFlowSanitizer::runOnModule(Module &M) {
|
|||
}
|
||||
NewF->getBasicBlockList().splice(NewF->begin(), F.getBasicBlockList());
|
||||
|
||||
for (Function::use_iterator ui = F.use_begin(), ue = F.use_end();
|
||||
ui != ue;) {
|
||||
BlockAddress *BA = dyn_cast<BlockAddress>(ui.getUse().getUser());
|
||||
++ui;
|
||||
for (Function::user_iterator UI = F.user_begin(), UE = F.user_end();
|
||||
UI != UE;) {
|
||||
BlockAddress *BA = dyn_cast<BlockAddress>(*UI);
|
||||
++UI;
|
||||
if (BA) {
|
||||
BA->replaceAllUsesWith(
|
||||
BlockAddress::get(NewF, BA->getBasicBlock()));
|
||||
|
@ -1105,12 +1105,11 @@ void DFSanVisitor::visitInsertValueInst(InsertValueInst &I) {
|
|||
|
||||
void DFSanVisitor::visitAllocaInst(AllocaInst &I) {
|
||||
bool AllLoadsStores = true;
|
||||
for (Instruction::use_iterator i = I.use_begin(), e = I.use_end(); i != e;
|
||||
++i) {
|
||||
if (isa<LoadInst>(*i))
|
||||
for (User *U : I.users()) {
|
||||
if (isa<LoadInst>(U))
|
||||
continue;
|
||||
|
||||
if (StoreInst *SI = dyn_cast<StoreInst>(*i)) {
|
||||
if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
|
||||
if (SI->getPointerOperand() == &I)
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -440,17 +440,17 @@ bool ObjCARCContract::runOnFunction(Function &F) {
|
|||
|
||||
// Don't use GetObjCArg because we don't want to look through bitcasts
|
||||
// and such; to do the replacement, the argument must have type i8*.
|
||||
const Value *Arg = cast<CallInst>(Inst)->getArgOperand(0);
|
||||
Value *Arg = cast<CallInst>(Inst)->getArgOperand(0);
|
||||
for (;;) {
|
||||
// If we're compiling bugpointed code, don't get in trouble.
|
||||
if (!isa<Instruction>(Arg) && !isa<Argument>(Arg))
|
||||
break;
|
||||
// Look through the uses of the pointer.
|
||||
for (Value::const_use_iterator UI = Arg->use_begin(), UE = Arg->use_end();
|
||||
for (Value::use_iterator UI = Arg->use_begin(), UE = Arg->use_end();
|
||||
UI != UE; ) {
|
||||
Use &U = UI.getUse();
|
||||
unsigned OperandNo = UI.getOperandNo();
|
||||
++UI; // Increment UI now, because we may unlink its element.
|
||||
// Increment UI now, because we may unlink its element.
|
||||
Use &U = *UI++;
|
||||
unsigned OperandNo = U.getOperandNo();
|
||||
|
||||
// If the call's return value dominates a use of the call's argument
|
||||
// value, rewrite the use to use the return value. We check for
|
||||
|
@ -476,8 +476,7 @@ bool ObjCARCContract::runOnFunction(Function &F) {
|
|||
if (PHI->getIncomingBlock(i) == BB) {
|
||||
// Keep the UI iterator valid.
|
||||
if (&PHI->getOperandUse(
|
||||
PHINode::getOperandNumForIncomingValue(i)) ==
|
||||
&UI.getUse())
|
||||
PHINode::getOperandNumForIncomingValue(i)) == &U)
|
||||
++UI;
|
||||
PHI->setIncomingValue(i, Replacement);
|
||||
}
|
||||
|
|
|
@ -163,12 +163,9 @@ static const Value *FindSingleUseIdentifiedObject(const Value *Arg) {
|
|||
// If we found an identifiable object but it has multiple uses, but they are
|
||||
// trivial uses, we can still consider this to be a single-use value.
|
||||
if (IsObjCIdentifiedObject(Arg)) {
|
||||
for (Value::const_use_iterator UI = Arg->use_begin(), UE = Arg->use_end();
|
||||
UI != UE; ++UI) {
|
||||
const User *U = *UI;
|
||||
for (const User *U : Arg->users())
|
||||
if (!U->use_empty() || StripPointerCastsAndObjCCalls(U) != Arg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return Arg;
|
||||
}
|
||||
|
@ -1266,13 +1263,11 @@ ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
|
|||
Users.push_back(Ptr);
|
||||
do {
|
||||
Ptr = Users.pop_back_val();
|
||||
for (Value::const_use_iterator UI = Ptr->use_begin(), UE = Ptr->use_end();
|
||||
UI != UE; ++UI) {
|
||||
const User *I = *UI;
|
||||
if (isa<ReturnInst>(I) || GetBasicInstructionClass(I) == IC_RetainRV)
|
||||
for (const User *U : Ptr->users()) {
|
||||
if (isa<ReturnInst>(U) || GetBasicInstructionClass(U) == IC_RetainRV)
|
||||
return;
|
||||
if (isa<BitCastInst>(I))
|
||||
Users.push_back(I);
|
||||
if (isa<BitCastInst>(U))
|
||||
Users.push_back(U);
|
||||
}
|
||||
} while (!Users.empty());
|
||||
|
||||
|
@ -2787,9 +2782,8 @@ void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
|
|||
CallInst *Call = cast<CallInst>(Inst);
|
||||
Value *Arg = Call->getArgOperand(0);
|
||||
if (AllocaInst *Alloca = dyn_cast<AllocaInst>(Arg)) {
|
||||
for (Value::use_iterator UI = Alloca->use_begin(),
|
||||
UE = Alloca->use_end(); UI != UE; ++UI) {
|
||||
const Instruction *UserInst = cast<Instruction>(*UI);
|
||||
for (User *U : Alloca->users()) {
|
||||
const Instruction *UserInst = cast<Instruction>(U);
|
||||
switch (GetBasicInstructionClass(UserInst)) {
|
||||
case IC_InitWeak:
|
||||
case IC_StoreWeak:
|
||||
|
@ -2800,8 +2794,7 @@ void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
|
|||
}
|
||||
}
|
||||
Changed = true;
|
||||
for (Value::use_iterator UI = Alloca->use_begin(),
|
||||
UE = Alloca->use_end(); UI != UE; ) {
|
||||
for (auto UI = Alloca->user_begin(), UE = Alloca->user_end(); UI != UE;) {
|
||||
CallInst *UserInst = cast<CallInst>(*UI++);
|
||||
switch (GetBasicInstructionClass(UserInst)) {
|
||||
case IC_InitWeak:
|
||||
|
|
|
@ -79,11 +79,10 @@ static bool IsStoredObjCPointer(const Value *P) {
|
|||
Visited.insert(P);
|
||||
do {
|
||||
P = Worklist.pop_back_val();
|
||||
for (Value::const_use_iterator UI = P->use_begin(), UE = P->use_end();
|
||||
UI != UE; ++UI) {
|
||||
const User *Ur = *UI;
|
||||
for (const Use &U : P->uses()) {
|
||||
const User *Ur = U.getUser();
|
||||
if (isa<StoreInst>(Ur)) {
|
||||
if (UI.getOperandNo() == 0)
|
||||
if (U.getOperandNo() == 0)
|
||||
// The pointer is stored.
|
||||
return true;
|
||||
// The pointed is stored through.
|
||||
|
|
|
@ -266,12 +266,11 @@ static void CollectBasicBlocks(SmallPtrSet<BasicBlock *, 4> &BBs, Function &F,
|
|||
BBs.insert(I->getParent());
|
||||
else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U))
|
||||
// Find all users of this constant expression.
|
||||
for (Value::use_iterator UU = CE->use_begin(), E = CE->use_end();
|
||||
UU != E; ++UU)
|
||||
for (User *UU : CE->users())
|
||||
// Only record users that are instructions. We don't want to go down a
|
||||
// nested constant expression chain. Also check if the instruction is even
|
||||
// in the current function.
|
||||
if (Instruction *I = dyn_cast<Instruction>(*UU))
|
||||
if (Instruction *I = dyn_cast<Instruction>(UU))
|
||||
if(I->getParent()->getParent() == &F)
|
||||
BBs.insert(I->getParent());
|
||||
}
|
||||
|
@ -350,12 +349,11 @@ void ConstantHoisting::EmitBaseConstants(Function &F, User *U,
|
|||
ConstantExpr *CE = cast<ConstantExpr>(U);
|
||||
SmallVector<std::pair<Instruction *, Instruction *>, 8> WorkList;
|
||||
DEBUG(dbgs() << "Visit ConstantExpr " << *CE << '\n');
|
||||
for (Value::use_iterator UU = CE->use_begin(), E = CE->use_end();
|
||||
UU != E; ++UU) {
|
||||
for (User *UU : CE->users()) {
|
||||
DEBUG(dbgs() << "Check user "; UU->print(dbgs()); dbgs() << '\n');
|
||||
// We only handel instructions here and won't walk down a ConstantExpr chain
|
||||
// to replace all ConstExpr with instructions.
|
||||
if (Instruction *I = dyn_cast<Instruction>(*UU)) {
|
||||
if (Instruction *I = dyn_cast<Instruction>(UU)) {
|
||||
// Only update constant expressions in the current function.
|
||||
if (I->getParent()->getParent() != &F) {
|
||||
DEBUG(dbgs() << "Not in the same function - skip.\n");
|
||||
|
@ -423,9 +421,9 @@ bool ConstantHoisting::EmitBaseConstants(Function &F) {
|
|||
|
||||
// Use the same debug location as the last user of the constant.
|
||||
assert(!Base->use_empty() && "The use list is empty!?");
|
||||
assert(isa<Instruction>(Base->use_back()) &&
|
||||
assert(isa<Instruction>(Base->user_back()) &&
|
||||
"All uses should be instructions.");
|
||||
Base->setDebugLoc(cast<Instruction>(Base->use_back())->getDebugLoc());
|
||||
Base->setDebugLoc(cast<Instruction>(Base->user_back())->getDebugLoc());
|
||||
|
||||
// Correct for base constant, which we counted above too.
|
||||
NumConstantsRebased--;
|
||||
|
|
|
@ -79,9 +79,8 @@ bool ConstantPropagation::runOnFunction(Function &F) {
|
|||
if (Constant *C = ConstantFoldInstruction(I, DL, TLI)) {
|
||||
// Add all of the users of this instruction to the worklist, they might
|
||||
// be constant propagatable now...
|
||||
for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
|
||||
UI != UE; ++UI)
|
||||
WorkList.insert(cast<Instruction>(*UI));
|
||||
for (User *U : I->users())
|
||||
WorkList.insert(cast<Instruction>(U));
|
||||
|
||||
// Replace all of the uses of a variable with uses of the constant.
|
||||
I->replaceAllUsesWith(C);
|
||||
|
|
|
@ -2028,7 +2028,7 @@ unsigned GVN::replaceAllDominatedUsesWith(Value *From, Value *To,
|
|||
unsigned Count = 0;
|
||||
for (Value::use_iterator UI = From->use_begin(), UE = From->use_end();
|
||||
UI != UE; ) {
|
||||
Use &U = (UI++).getUse();
|
||||
Use &U = *UI++;
|
||||
|
||||
if (DT->dominates(Root, U)) {
|
||||
U.set(To);
|
||||
|
|
|
@ -269,11 +269,11 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PN) {
|
|||
|
||||
// Check Incr uses. One user is PN and the other user is an exit condition
|
||||
// used by the conditional terminator.
|
||||
Value::use_iterator IncrUse = Incr->use_begin();
|
||||
Value::user_iterator IncrUse = Incr->user_begin();
|
||||
Instruction *U1 = cast<Instruction>(*IncrUse++);
|
||||
if (IncrUse == Incr->use_end()) return;
|
||||
if (IncrUse == Incr->user_end()) return;
|
||||
Instruction *U2 = cast<Instruction>(*IncrUse++);
|
||||
if (IncrUse != Incr->use_end()) return;
|
||||
if (IncrUse != Incr->user_end()) return;
|
||||
|
||||
// Find exit condition, which is an fcmp. If it doesn't exist, or if it isn't
|
||||
// only used by a branch, we can't transform it.
|
||||
|
@ -281,10 +281,10 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PN) {
|
|||
if (!Compare)
|
||||
Compare = dyn_cast<FCmpInst>(U2);
|
||||
if (Compare == 0 || !Compare->hasOneUse() ||
|
||||
!isa<BranchInst>(Compare->use_back()))
|
||||
!isa<BranchInst>(Compare->user_back()))
|
||||
return;
|
||||
|
||||
BranchInst *TheBr = cast<BranchInst>(Compare->use_back());
|
||||
BranchInst *TheBr = cast<BranchInst>(Compare->user_back());
|
||||
|
||||
// We need to verify that the branch actually controls the iteration count
|
||||
// of the loop. If not, the new IV can overflow and no one will notice.
|
||||
|
@ -563,8 +563,8 @@ void IndVarSimplify::RewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter) {
|
|||
unsigned NumHardInternalUses = 0;
|
||||
unsigned NumSoftExternalUses = 0;
|
||||
unsigned NumUses = 0;
|
||||
for (Value::use_iterator IB=Inst->use_begin(), IE=Inst->use_end();
|
||||
IB!=IE && NumUses<=6 ; ++IB) {
|
||||
for (auto IB = Inst->user_begin(), IE = Inst->user_end();
|
||||
IB != IE && NumUses <= 6; ++IB) {
|
||||
Instruction *UseInstr = cast<Instruction>(*IB);
|
||||
unsigned Opc = UseInstr->getOpcode();
|
||||
NumUses++;
|
||||
|
@ -576,9 +576,9 @@ void IndVarSimplify::RewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter) {
|
|||
// Do not count the Phi as a use. LCSSA may have inserted
|
||||
// plenty of trivial ones.
|
||||
NumUses--;
|
||||
for (Value::use_iterator PB=UseInstr->use_begin(),
|
||||
PE=UseInstr->use_end();
|
||||
PB!=PE && NumUses<=6 ; ++PB, ++NumUses) {
|
||||
for (auto PB = UseInstr->user_begin(),
|
||||
PE = UseInstr->user_end();
|
||||
PB != PE && NumUses <= 6; ++PB, ++NumUses) {
|
||||
unsigned PhiOpc = cast<Instruction>(*PB)->getOpcode();
|
||||
if (PhiOpc != Instruction::Call && PhiOpc != Instruction::Ret)
|
||||
NumSoftExternalUses++;
|
||||
|
@ -1018,15 +1018,14 @@ Instruction *WidenIV::WidenIVUse(NarrowIVDefUse DU, SCEVExpander &Rewriter) {
|
|||
/// pushNarrowIVUsers - Add eligible users of NarrowDef to NarrowIVUsers.
|
||||
///
|
||||
void WidenIV::pushNarrowIVUsers(Instruction *NarrowDef, Instruction *WideDef) {
|
||||
for (Value::use_iterator UI = NarrowDef->use_begin(),
|
||||
UE = NarrowDef->use_end(); UI != UE; ++UI) {
|
||||
Instruction *NarrowUse = cast<Instruction>(*UI);
|
||||
for (User *U : NarrowDef->users()) {
|
||||
Instruction *NarrowUser = cast<Instruction>(U);
|
||||
|
||||
// Handle data flow merges and bizarre phi cycles.
|
||||
if (!Widened.insert(NarrowUse))
|
||||
if (!Widened.insert(NarrowUser))
|
||||
continue;
|
||||
|
||||
NarrowIVUsers.push_back(NarrowIVDefUse(NarrowDef, NarrowUse, WideDef));
|
||||
NarrowIVUsers.push_back(NarrowIVDefUse(NarrowDef, NarrowUser, WideDef));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1417,15 +1416,11 @@ static bool AlmostDeadIV(PHINode *Phi, BasicBlock *LatchBlock, Value *Cond) {
|
|||
int LatchIdx = Phi->getBasicBlockIndex(LatchBlock);
|
||||
Value *IncV = Phi->getIncomingValue(LatchIdx);
|
||||
|
||||
for (Value::use_iterator UI = Phi->use_begin(), UE = Phi->use_end();
|
||||
UI != UE; ++UI) {
|
||||
if (*UI != Cond && *UI != IncV) return false;
|
||||
}
|
||||
for (User *U : Phi->users())
|
||||
if (U != Cond && U != IncV) return false;
|
||||
|
||||
for (Value::use_iterator UI = IncV->use_begin(), UE = IncV->use_end();
|
||||
UI != UE; ++UI) {
|
||||
if (*UI != Cond && *UI != Phi) return false;
|
||||
}
|
||||
for (User *U : IncV->users())
|
||||
if (U != Cond && U != Phi) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1755,13 +1750,12 @@ void IndVarSimplify::SinkUnusedInvariants(Loop *L) {
|
|||
// Determine if there is a use in or before the loop (direct or
|
||||
// otherwise).
|
||||
bool UsedInLoop = false;
|
||||
for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
|
||||
UI != UE; ++UI) {
|
||||
User *U = *UI;
|
||||
BasicBlock *UseBB = cast<Instruction>(U)->getParent();
|
||||
if (PHINode *P = dyn_cast<PHINode>(U)) {
|
||||
for (Use &U : I->uses()) {
|
||||
Instruction *User = cast<Instruction>(U.getUser());
|
||||
BasicBlock *UseBB = User->getParent();
|
||||
if (PHINode *P = dyn_cast<PHINode>(User)) {
|
||||
unsigned i =
|
||||
PHINode::getIncomingValueNumForOperand(UI.getOperandNo());
|
||||
PHINode::getIncomingValueNumForOperand(U.getOperandNo());
|
||||
UseBB = P->getIncomingBlock(i);
|
||||
}
|
||||
if (UseBB == Preheader || L->contains(UseBB)) {
|
||||
|
|
|
@ -1435,16 +1435,15 @@ bool JumpThreading::ThreadEdge(BasicBlock *BB,
|
|||
for (BasicBlock::iterator I = BB->begin(); I != BB->end(); ++I) {
|
||||
// Scan all uses of this instruction to see if it is used outside of its
|
||||
// block, and if so, record them in UsesToRename.
|
||||
for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E;
|
||||
++UI) {
|
||||
Instruction *User = cast<Instruction>(*UI);
|
||||
for (Use &U : I->uses()) {
|
||||
Instruction *User = cast<Instruction>(U.getUser());
|
||||
if (PHINode *UserPN = dyn_cast<PHINode>(User)) {
|
||||
if (UserPN->getIncomingBlock(UI) == BB)
|
||||
if (UserPN->getIncomingBlock(U) == BB)
|
||||
continue;
|
||||
} else if (User->getParent() == BB)
|
||||
continue;
|
||||
|
||||
UsesToRename.push_back(&UI.getUse());
|
||||
UsesToRename.push_back(&U);
|
||||
}
|
||||
|
||||
// If there are no uses outside the block, we're done with this instruction.
|
||||
|
@ -1589,16 +1588,15 @@ bool JumpThreading::DuplicateCondBranchOnPHIIntoPred(BasicBlock *BB,
|
|||
for (BasicBlock::iterator I = BB->begin(); I != BB->end(); ++I) {
|
||||
// Scan all uses of this instruction to see if it is used outside of its
|
||||
// block, and if so, record them in UsesToRename.
|
||||
for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E;
|
||||
++UI) {
|
||||
Instruction *User = cast<Instruction>(*UI);
|
||||
for (Use &U : I->uses()) {
|
||||
Instruction *User = cast<Instruction>(U.getUser());
|
||||
if (PHINode *UserPN = dyn_cast<PHINode>(User)) {
|
||||
if (UserPN->getIncomingBlock(UI) == BB)
|
||||
if (UserPN->getIncomingBlock(U) == BB)
|
||||
continue;
|
||||
} else if (User->getParent() == BB)
|
||||
continue;
|
||||
|
||||
UsesToRename.push_back(&UI.getUse());
|
||||
UsesToRename.push_back(&U);
|
||||
}
|
||||
|
||||
// If there are no uses outside the block, we're done with this instruction.
|
||||
|
|
|
@ -500,9 +500,9 @@ static bool isTriviallyReplacablePHI(PHINode &PN, Instruction &I) {
|
|||
/// exit blocks of the loop.
|
||||
///
|
||||
bool LICM::isNotUsedInLoop(Instruction &I) {
|
||||
for (Value::use_iterator UI = I.use_begin(), E = I.use_end(); UI != E; ++UI) {
|
||||
Instruction *User = cast<Instruction>(*UI);
|
||||
if (PHINode *PN = dyn_cast<PHINode>(User)) {
|
||||
for (User *U : I.users()) {
|
||||
Instruction *UI = cast<Instruction>(U);
|
||||
if (PHINode *PN = dyn_cast<PHINode>(UI)) {
|
||||
// A PHI node where all of the incoming values are this instruction are
|
||||
// special -- they can just be RAUW'ed with the instruction and thus
|
||||
// don't require a use in the predecessor. This is a particular important
|
||||
|
@ -524,7 +524,7 @@ bool LICM::isNotUsedInLoop(Instruction &I) {
|
|||
continue;
|
||||
}
|
||||
|
||||
if (CurLoop->contains(User))
|
||||
if (CurLoop->contains(UI))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
@ -554,7 +554,7 @@ void LICM::sink(Instruction &I) {
|
|||
// the instruction.
|
||||
while (!I.use_empty()) {
|
||||
// The user must be a PHI node.
|
||||
PHINode *PN = cast<PHINode>(I.use_back());
|
||||
PHINode *PN = cast<PHINode>(I.user_back());
|
||||
|
||||
BasicBlock *ExitBlock = PN->getParent();
|
||||
assert(ExitBlockSet.count(ExitBlock) &&
|
||||
|
@ -789,23 +789,22 @@ void LICM::PromoteAliasSet(AliasSet &AS,
|
|||
if (SomePtr->getType() != ASIV->getType())
|
||||
return;
|
||||
|
||||
for (Value::use_iterator UI = ASIV->use_begin(), UE = ASIV->use_end();
|
||||
UI != UE; ++UI) {
|
||||
for (User *U : ASIV->users()) {
|
||||
// Ignore instructions that are outside the loop.
|
||||
Instruction *Use = dyn_cast<Instruction>(*UI);
|
||||
if (!Use || !CurLoop->contains(Use))
|
||||
Instruction *UI = dyn_cast<Instruction>(U);
|
||||
if (!UI || !CurLoop->contains(UI))
|
||||
continue;
|
||||
|
||||
// If there is an non-load/store instruction in the loop, we can't promote
|
||||
// it.
|
||||
if (LoadInst *load = dyn_cast<LoadInst>(Use)) {
|
||||
if (LoadInst *load = dyn_cast<LoadInst>(UI)) {
|
||||
assert(!load->isVolatile() && "AST broken");
|
||||
if (!load->isSimple())
|
||||
return;
|
||||
} else if (StoreInst *store = dyn_cast<StoreInst>(Use)) {
|
||||
} else if (StoreInst *store = dyn_cast<StoreInst>(UI)) {
|
||||
// Stores *of* the pointer are not interesting, only stores *to* the
|
||||
// pointer.
|
||||
if (Use->getOperand(1) != ASIV)
|
||||
if (UI->getOperand(1) != ASIV)
|
||||
continue;
|
||||
assert(!store->isVolatile() && "AST broken");
|
||||
if (!store->isSimple())
|
||||
|
@ -821,13 +820,13 @@ void LICM::PromoteAliasSet(AliasSet &AS,
|
|||
// Larger is better, with the exception of 0 being the best alignment.
|
||||
unsigned InstAlignment = store->getAlignment();
|
||||
if ((InstAlignment > Alignment || InstAlignment == 0) && Alignment != 0)
|
||||
if (isGuaranteedToExecute(*Use)) {
|
||||
if (isGuaranteedToExecute(*UI)) {
|
||||
GuaranteedToExecute = true;
|
||||
Alignment = InstAlignment;
|
||||
}
|
||||
|
||||
if (!GuaranteedToExecute)
|
||||
GuaranteedToExecute = isGuaranteedToExecute(*Use);
|
||||
GuaranteedToExecute = isGuaranteedToExecute(*UI);
|
||||
|
||||
} else
|
||||
return; // Not a load or store.
|
||||
|
@ -835,13 +834,13 @@ void LICM::PromoteAliasSet(AliasSet &AS,
|
|||
// Merge the TBAA tags.
|
||||
if (LoopUses.empty()) {
|
||||
// On the first load/store, just take its TBAA tag.
|
||||
TBAATag = Use->getMetadata(LLVMContext::MD_tbaa);
|
||||
TBAATag = UI->getMetadata(LLVMContext::MD_tbaa);
|
||||
} else if (TBAATag) {
|
||||
TBAATag = MDNode::getMostGenericTBAA(TBAATag,
|
||||
Use->getMetadata(LLVMContext::MD_tbaa));
|
||||
UI->getMetadata(LLVMContext::MD_tbaa));
|
||||
}
|
||||
|
||||
LoopUses.push_back(Use);
|
||||
|
||||
LoopUses.push_back(UI);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -464,9 +464,8 @@ bool NclPopcountRecognize::detectIdiom(Instruction *&CntInst,
|
|||
|
||||
// Check if the result of the instruction is live of the loop.
|
||||
bool LiveOutLoop = false;
|
||||
for (Value::use_iterator I = Inst->use_begin(), E = Inst->use_end();
|
||||
I != E; I++) {
|
||||
if ((cast<Instruction>(*I))->getParent() != LoopEntry) {
|
||||
for (User *U : Inst->users()) {
|
||||
if ((cast<Instruction>(U))->getParent() != LoopEntry) {
|
||||
LiveOutLoop = true; break;
|
||||
}
|
||||
}
|
||||
|
@ -602,11 +601,9 @@ void NclPopcountRecognize::transform(Instruction *CntInst,
|
|||
// __builtin_ctpop().
|
||||
{
|
||||
SmallVector<Value *, 4> CntUses;
|
||||
for (Value::use_iterator I = CntInst->use_begin(), E = CntInst->use_end();
|
||||
I != E; I++) {
|
||||
if (cast<Instruction>(*I)->getParent() != Body)
|
||||
CntUses.push_back(*I);
|
||||
}
|
||||
for (User *U : CntInst->users())
|
||||
if (cast<Instruction>(U)->getParent() != Body)
|
||||
CntUses.push_back(U);
|
||||
for (unsigned Idx = 0; Idx < CntUses.size(); Idx++) {
|
||||
(cast<Instruction>(CntUses[Idx]))->replaceUsesOfWith(CntInst, NewCount);
|
||||
}
|
||||
|
|
|
@ -118,9 +118,8 @@ bool LoopInstSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
|
|||
Value *V = SimplifyInstruction(I, DL, TLI, DT);
|
||||
if (V && LI->replacementPreservesLCSSAForm(I, V)) {
|
||||
// Mark all uses for resimplification next time round the loop.
|
||||
for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
|
||||
UI != UE; ++UI)
|
||||
Next->insert(cast<Instruction>(*UI));
|
||||
for (User *U : I->users())
|
||||
Next->insert(cast<Instruction>(U));
|
||||
|
||||
I->replaceAllUsesWith(V);
|
||||
LocalChanged = true;
|
||||
|
|
|
@ -354,12 +354,9 @@ Pass *llvm::createLoopRerollPass() {
|
|||
// This operates like Instruction::isUsedOutsideOfBlock, but considers PHIs in
|
||||
// non-loop blocks to be outside the loop.
|
||||
static bool hasUsesOutsideLoop(Instruction *I, Loop *L) {
|
||||
for (Value::use_iterator UI = I->use_begin(),
|
||||
UIE = I->use_end(); UI != UIE; ++UI) {
|
||||
Instruction *User = cast<Instruction>(*UI);
|
||||
if (!L->contains(User))
|
||||
for (User *U : I->users())
|
||||
if (!L->contains(cast<Instruction>(U)))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -409,7 +406,7 @@ void LoopReroll::SimpleLoopReduction::add(Loop *L) {
|
|||
Instruction *C = Instructions.front();
|
||||
|
||||
do {
|
||||
C = cast<Instruction>(*C->use_begin());
|
||||
C = cast<Instruction>(*C->user_begin());
|
||||
if (C->hasOneUse()) {
|
||||
if (!C->isBinaryOp())
|
||||
return;
|
||||
|
@ -424,17 +421,15 @@ void LoopReroll::SimpleLoopReduction::add(Loop *L) {
|
|||
|
||||
if (Instructions.size() < 2 ||
|
||||
!C->isSameOperationAs(Instructions.back()) ||
|
||||
C->use_begin() == C->use_end())
|
||||
C->use_empty())
|
||||
return;
|
||||
|
||||
// C is now the (potential) last instruction in the reduction chain.
|
||||
for (Value::use_iterator UI = C->use_begin(), UIE = C->use_end();
|
||||
UI != UIE; ++UI) {
|
||||
for (User *U : C->users())
|
||||
// The only in-loop user can be the initial PHI.
|
||||
if (L->contains(cast<Instruction>(*UI)))
|
||||
if (cast<Instruction>(*UI ) != Instructions.front())
|
||||
if (L->contains(cast<Instruction>(U)))
|
||||
if (cast<Instruction>(U) != Instructions.front())
|
||||
return;
|
||||
}
|
||||
|
||||
Instructions.push_back(C);
|
||||
Valid = true;
|
||||
|
@ -484,12 +479,11 @@ void LoopReroll::collectInLoopUserSet(Loop *L,
|
|||
continue;
|
||||
|
||||
if (!Final.count(I))
|
||||
for (Value::use_iterator UI = I->use_begin(),
|
||||
UIE = I->use_end(); UI != UIE; ++UI) {
|
||||
Instruction *User = cast<Instruction>(*UI);
|
||||
for (Use &U : I->uses()) {
|
||||
Instruction *User = cast<Instruction>(U.getUser());
|
||||
if (PHINode *PN = dyn_cast<PHINode>(User)) {
|
||||
// Ignore "wrap-around" uses to PHIs of this loop's header.
|
||||
if (PN->getIncomingBlock(UI) == L->getHeader())
|
||||
if (PN->getIncomingBlock(U) == L->getHeader())
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -560,8 +554,8 @@ bool LoopReroll::findScaleFromMul(Instruction *RealIV, uint64_t &Scale,
|
|||
if (RealIV->getNumUses() != 2)
|
||||
return false;
|
||||
const SCEVAddRecExpr *RealIVSCEV = cast<SCEVAddRecExpr>(SE->getSCEV(RealIV));
|
||||
Instruction *User1 = cast<Instruction>(*RealIV->use_begin()),
|
||||
*User2 = cast<Instruction>(*std::next(RealIV->use_begin()));
|
||||
Instruction *User1 = cast<Instruction>(*RealIV->user_begin()),
|
||||
*User2 = cast<Instruction>(*std::next(RealIV->user_begin()));
|
||||
if (!SE->isSCEVable(User1->getType()) || !SE->isSCEVable(User2->getType()))
|
||||
return false;
|
||||
const SCEVAddRecExpr *User1SCEV =
|
||||
|
@ -617,26 +611,25 @@ bool LoopReroll::collectAllRoots(Loop *L, uint64_t Inc, uint64_t Scale,
|
|||
SmallVector<SmallInstructionVector, 32> &Roots,
|
||||
SmallInstructionSet &AllRoots,
|
||||
SmallInstructionVector &LoopIncs) {
|
||||
for (Value::use_iterator UI = IV->use_begin(),
|
||||
UIE = IV->use_end(); UI != UIE; ++UI) {
|
||||
Instruction *User = cast<Instruction>(*UI);
|
||||
if (!SE->isSCEVable(User->getType()))
|
||||
for (User *U : IV->users()) {
|
||||
Instruction *UI = cast<Instruction>(U);
|
||||
if (!SE->isSCEVable(UI->getType()))
|
||||
continue;
|
||||
if (User->getType() != IV->getType())
|
||||
if (UI->getType() != IV->getType())
|
||||
continue;
|
||||
if (!L->contains(User))
|
||||
if (!L->contains(UI))
|
||||
continue;
|
||||
if (hasUsesOutsideLoop(User, L))
|
||||
if (hasUsesOutsideLoop(UI, L))
|
||||
continue;
|
||||
|
||||
if (const SCEVConstant *Diff = dyn_cast<SCEVConstant>(SE->getMinusSCEV(
|
||||
SE->getSCEV(User), SE->getSCEV(IV)))) {
|
||||
SE->getSCEV(UI), SE->getSCEV(IV)))) {
|
||||
uint64_t Idx = Diff->getValue()->getValue().getZExtValue();
|
||||
if (Idx > 0 && Idx < Scale) {
|
||||
Roots[Idx-1].push_back(User);
|
||||
AllRoots.insert(User);
|
||||
Roots[Idx-1].push_back(UI);
|
||||
AllRoots.insert(UI);
|
||||
} else if (Idx == Scale && Inc > 1) {
|
||||
LoopIncs.push_back(User);
|
||||
LoopIncs.push_back(UI);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -720,10 +713,8 @@ void LoopReroll::ReductionTracker::replaceSelected() {
|
|||
|
||||
// Replace users with the new end-of-chain value.
|
||||
SmallInstructionVector Users;
|
||||
for (Value::use_iterator UI =
|
||||
PossibleReds[i].getReducedValue()->use_begin(),
|
||||
UIE = PossibleReds[i].getReducedValue()->use_end(); UI != UIE; ++UI)
|
||||
Users.push_back(cast<Instruction>(*UI));
|
||||
for (User *U : PossibleReds[i].getReducedValue()->users())
|
||||
Users.push_back(cast<Instruction>(U));
|
||||
|
||||
for (SmallInstructionVector::iterator J = Users.begin(),
|
||||
JE = Users.end(); J != JE; ++J)
|
||||
|
|
|
@ -134,7 +134,7 @@ static void RewriteUsesOfClonedInstructions(BasicBlock *OrigHeader,
|
|||
for (Value::use_iterator UI = OrigHeaderVal->use_begin(),
|
||||
UE = OrigHeaderVal->use_end(); UI != UE; ) {
|
||||
// Grab the use before incrementing the iterator.
|
||||
Use &U = UI.getUse();
|
||||
Use &U = *UI;
|
||||
|
||||
// Increment the iterator before removing the use from the list.
|
||||
++UI;
|
||||
|
|
|
@ -722,13 +722,12 @@ static bool isHighCostExpansion(const SCEV *S,
|
|||
// multiplication already generates this expression.
|
||||
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Mul->getOperand(1))) {
|
||||
Value *UVal = U->getValue();
|
||||
for (Value::use_iterator UI = UVal->use_begin(), UE = UVal->use_end();
|
||||
UI != UE; ++UI) {
|
||||
for (User *UR : UVal->users()) {
|
||||
// If U is a constant, it may be used by a ConstantExpr.
|
||||
Instruction *User = dyn_cast<Instruction>(*UI);
|
||||
if (User && User->getOpcode() == Instruction::Mul
|
||||
&& SE.isSCEVable(User->getType())) {
|
||||
return SE.getSCEV(User) == Mul;
|
||||
Instruction *UI = dyn_cast<Instruction>(UR);
|
||||
if (UI && UI->getOpcode() == Instruction::Mul &&
|
||||
SE.isSCEVable(UI->getType())) {
|
||||
return SE.getSCEV(UI) == Mul;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2635,9 +2634,8 @@ void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper,
|
|||
// they will eventually be used be the current chain, or can be computed
|
||||
// from one of the chain increments. To be more precise we could
|
||||
// transitively follow its user and only add leaf IV users to the set.
|
||||
for (Value::use_iterator UseIter = IVOper->use_begin(),
|
||||
UseEnd = IVOper->use_end(); UseIter != UseEnd; ++UseIter) {
|
||||
Instruction *OtherUse = dyn_cast<Instruction>(*UseIter);
|
||||
for (User *U : IVOper->users()) {
|
||||
Instruction *OtherUse = dyn_cast<Instruction>(U);
|
||||
if (!OtherUse)
|
||||
continue;
|
||||
// Uses in the chain will no longer be uses if the chain is formed.
|
||||
|
@ -3048,18 +3046,17 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
|
|||
else if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
|
||||
Worklist.push_back(D->getLHS());
|
||||
Worklist.push_back(D->getRHS());
|
||||
} else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
|
||||
if (!Inserted.insert(U)) continue;
|
||||
const Value *V = U->getValue();
|
||||
} else if (const SCEVUnknown *US = dyn_cast<SCEVUnknown>(S)) {
|
||||
if (!Inserted.insert(US)) continue;
|
||||
const Value *V = US->getValue();
|
||||
if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
|
||||
// Look for instructions defined outside the loop.
|
||||
if (L->contains(Inst)) continue;
|
||||
} else if (isa<UndefValue>(V))
|
||||
// Undef doesn't have a live range, so it doesn't matter.
|
||||
continue;
|
||||
for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end();
|
||||
UI != UE; ++UI) {
|
||||
const Instruction *UserInst = dyn_cast<Instruction>(*UI);
|
||||
for (const Use &U : V->uses()) {
|
||||
const Instruction *UserInst = dyn_cast<Instruction>(U.getUser());
|
||||
// Ignore non-instructions.
|
||||
if (!UserInst)
|
||||
continue;
|
||||
|
@ -3071,7 +3068,7 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
|
|||
const BasicBlock *UseBB = !isa<PHINode>(UserInst) ?
|
||||
UserInst->getParent() :
|
||||
cast<PHINode>(UserInst)->getIncomingBlock(
|
||||
PHINode::getIncomingValueNumForOperand(UI.getOperandNo()));
|
||||
PHINode::getIncomingValueNumForOperand(U.getOperandNo()));
|
||||
if (!DT.dominates(L->getHeader(), UseBB))
|
||||
continue;
|
||||
// Ignore uses which are part of other SCEV expressions, to avoid
|
||||
|
@ -3081,7 +3078,7 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
|
|||
// If the user is a no-op, look through to its uses.
|
||||
if (!isa<SCEVUnknown>(UserS))
|
||||
continue;
|
||||
if (UserS == U) {
|
||||
if (UserS == US) {
|
||||
Worklist.push_back(
|
||||
SE.getUnknown(const_cast<Instruction *>(UserInst)));
|
||||
continue;
|
||||
|
@ -3089,7 +3086,7 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
|
|||
}
|
||||
// Ignore icmp instructions which are already being analyzed.
|
||||
if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UserInst)) {
|
||||
unsigned OtherIdx = !UI.getOperandNo();
|
||||
unsigned OtherIdx = !U.getOperandNo();
|
||||
Value *OtherOp = const_cast<Value *>(ICI->getOperand(OtherIdx));
|
||||
if (SE.hasComputableLoopEvolution(SE.getSCEV(OtherOp), L))
|
||||
continue;
|
||||
|
@ -3097,7 +3094,7 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
|
|||
|
||||
LSRFixup &LF = getNewFixup();
|
||||
LF.UserInst = const_cast<Instruction *>(UserInst);
|
||||
LF.OperandValToReplace = UI.getUse();
|
||||
LF.OperandValToReplace = U;
|
||||
std::pair<size_t, int64_t> P = getUse(S, LSRUse::Basic, 0);
|
||||
LF.LUIdx = P.first;
|
||||
LF.Offset = P.second;
|
||||
|
@ -3107,7 +3104,7 @@ LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
|
|||
SE.getTypeSizeInBits(LU.WidestFixupType) <
|
||||
SE.getTypeSizeInBits(LF.OperandValToReplace->getType()))
|
||||
LU.WidestFixupType = LF.OperandValToReplace->getType();
|
||||
InsertSupplementalFormula(U, LU, LF.LUIdx);
|
||||
InsertSupplementalFormula(US, LU, LF.LUIdx);
|
||||
CountRegisters(LU.Formulae.back(), Uses.size() - 1);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -939,9 +939,8 @@ static void ReplaceUsesOfWith(Instruction *I, Value *V,
|
|||
Worklist.push_back(Use);
|
||||
|
||||
// Add users to the worklist which may be simplified now.
|
||||
for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
|
||||
UI != E; ++UI)
|
||||
Worklist.push_back(cast<Instruction>(*UI));
|
||||
for (User *U : I->users())
|
||||
Worklist.push_back(cast<Instruction>(U));
|
||||
LPM->deleteSimpleAnalysisValue(I, L);
|
||||
RemoveFromWorklist(I, Worklist);
|
||||
I->replaceAllUsesWith(V);
|
||||
|
@ -991,12 +990,11 @@ void LoopUnswitch::RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC,
|
|||
Replacement = ConstantInt::get(Type::getInt1Ty(Val->getContext()),
|
||||
!cast<ConstantInt>(Val)->getZExtValue());
|
||||
|
||||
for (Value::use_iterator UI = LIC->use_begin(), E = LIC->use_end();
|
||||
UI != E; ++UI) {
|
||||
Instruction *U = dyn_cast<Instruction>(*UI);
|
||||
if (!U || !L->contains(U))
|
||||
for (User *U : LIC->users()) {
|
||||
Instruction *UI = dyn_cast<Instruction>(U);
|
||||
if (!UI || !L->contains(UI))
|
||||
continue;
|
||||
Worklist.push_back(U);
|
||||
Worklist.push_back(UI);
|
||||
}
|
||||
|
||||
for (std::vector<Instruction*>::iterator UI = Worklist.begin(),
|
||||
|
@ -1010,19 +1008,18 @@ void LoopUnswitch::RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC,
|
|||
// Otherwise, we don't know the precise value of LIC, but we do know that it
|
||||
// is certainly NOT "Val". As such, simplify any uses in the loop that we
|
||||
// can. This case occurs when we unswitch switch statements.
|
||||
for (Value::use_iterator UI = LIC->use_begin(), E = LIC->use_end();
|
||||
UI != E; ++UI) {
|
||||
Instruction *U = dyn_cast<Instruction>(*UI);
|
||||
if (!U || !L->contains(U))
|
||||
for (User *U : LIC->users()) {
|
||||
Instruction *UI = dyn_cast<Instruction>(U);
|
||||
if (!UI || !L->contains(UI))
|
||||
continue;
|
||||
|
||||
Worklist.push_back(U);
|
||||
Worklist.push_back(UI);
|
||||
|
||||
// TODO: We could do other simplifications, for example, turning
|
||||
// 'icmp eq LIC, Val' -> false.
|
||||
|
||||
// If we know that LIC is not Val, use this info to simplify code.
|
||||
SwitchInst *SI = dyn_cast<SwitchInst>(U);
|
||||
SwitchInst *SI = dyn_cast<SwitchInst>(UI);
|
||||
if (SI == 0 || !isa<ConstantInt>(Val)) continue;
|
||||
|
||||
SwitchInst::CaseIt DeadCase = SI->findCaseValue(cast<ConstantInt>(Val));
|
||||
|
|
|
@ -657,23 +657,21 @@ bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
|
|||
// guarantees that it holds only undefined values when passed in (so the final
|
||||
// memcpy can be dropped), that it is not read or written between the call and
|
||||
// the memcpy, and that writing beyond the end of it is undefined.
|
||||
SmallVector<User*, 8> srcUseList(srcAlloca->use_begin(),
|
||||
srcAlloca->use_end());
|
||||
SmallVector<User*, 8> srcUseList(srcAlloca->user_begin(),
|
||||
srcAlloca->user_end());
|
||||
while (!srcUseList.empty()) {
|
||||
User *UI = srcUseList.pop_back_val();
|
||||
User *U = srcUseList.pop_back_val();
|
||||
|
||||
if (isa<BitCastInst>(UI) || isa<AddrSpaceCastInst>(UI)) {
|
||||
for (User::use_iterator I = UI->use_begin(), E = UI->use_end();
|
||||
I != E; ++I)
|
||||
srcUseList.push_back(*I);
|
||||
} else if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(UI)) {
|
||||
if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) {
|
||||
for (User *UU : U->users())
|
||||
srcUseList.push_back(UU);
|
||||
} else if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) {
|
||||
if (G->hasAllZeroIndices())
|
||||
for (User::use_iterator I = UI->use_begin(), E = UI->use_end();
|
||||
I != E; ++I)
|
||||
srcUseList.push_back(*I);
|
||||
for (User *UU : U->users())
|
||||
srcUseList.push_back(UU);
|
||||
else
|
||||
return false;
|
||||
} else if (UI != C && UI != cpy) {
|
||||
} else if (U != C && U != cpy) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -820,7 +820,7 @@ void Reassociate::RewriteExprTree(BinaryOperator *I,
|
|||
if (ExpressionChanged == I)
|
||||
break;
|
||||
ExpressionChanged->moveBefore(I);
|
||||
ExpressionChanged = cast<BinaryOperator>(*ExpressionChanged->use_begin());
|
||||
ExpressionChanged = cast<BinaryOperator>(*ExpressionChanged->user_begin());
|
||||
} while (1);
|
||||
|
||||
// Throw away any left over nodes from the original expression.
|
||||
|
@ -862,8 +862,7 @@ static Value *NegateValue(Value *V, Instruction *BI) {
|
|||
|
||||
// Okay, we need to materialize a negated version of V with an instruction.
|
||||
// Scan the use lists of V to see if we have one already.
|
||||
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
|
||||
User *U = *UI;
|
||||
for (User *U : V->users()) {
|
||||
if (!BinaryOperator::isNeg(U)) continue;
|
||||
|
||||
// We found one! Now we have to make sure that the definition dominates
|
||||
|
@ -913,8 +912,8 @@ static bool ShouldBreakUpSubtract(Instruction *Sub) {
|
|||
isReassociableOp(Sub->getOperand(1), Instruction::Sub))
|
||||
return true;
|
||||
if (Sub->hasOneUse() &&
|
||||
(isReassociableOp(Sub->use_back(), Instruction::Add) ||
|
||||
isReassociableOp(Sub->use_back(), Instruction::Sub)))
|
||||
(isReassociableOp(Sub->user_back(), Instruction::Add) ||
|
||||
isReassociableOp(Sub->user_back(), Instruction::Sub)))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
@ -1781,9 +1780,9 @@ void Reassociate::EraseInst(Instruction *I) {
|
|||
// If this is a node in an expression tree, climb to the expression root
|
||||
// and add that since that's where optimization actually happens.
|
||||
unsigned Opcode = Op->getOpcode();
|
||||
while (Op->hasOneUse() && Op->use_back()->getOpcode() == Opcode &&
|
||||
while (Op->hasOneUse() && Op->user_back()->getOpcode() == Opcode &&
|
||||
Visited.insert(Op))
|
||||
Op = Op->use_back();
|
||||
Op = Op->user_back();
|
||||
RedoInsts.insert(Op);
|
||||
}
|
||||
}
|
||||
|
@ -1801,8 +1800,8 @@ void Reassociate::OptimizeInst(Instruction *I) {
|
|||
// is used by a reassociable multiply or add, turn into a multiply.
|
||||
if (isReassociableOp(I->getOperand(0), Instruction::Mul) ||
|
||||
(I->hasOneUse() &&
|
||||
(isReassociableOp(I->use_back(), Instruction::Mul) ||
|
||||
isReassociableOp(I->use_back(), Instruction::Add)))) {
|
||||
(isReassociableOp(I->user_back(), Instruction::Mul) ||
|
||||
isReassociableOp(I->user_back(), Instruction::Add)))) {
|
||||
Instruction *NI = ConvertShiftToMul(I);
|
||||
RedoInsts.insert(I);
|
||||
MadeChange = true;
|
||||
|
@ -1855,7 +1854,7 @@ void Reassociate::OptimizeInst(Instruction *I) {
|
|||
// and if this is not an inner node of a multiply tree.
|
||||
if (isReassociableOp(I->getOperand(1), Instruction::Mul) &&
|
||||
(!I->hasOneUse() ||
|
||||
!isReassociableOp(I->use_back(), Instruction::Mul))) {
|
||||
!isReassociableOp(I->user_back(), Instruction::Mul))) {
|
||||
Instruction *NI = LowerNegateToMultiply(I);
|
||||
RedoInsts.insert(I);
|
||||
MadeChange = true;
|
||||
|
@ -1871,13 +1870,13 @@ void Reassociate::OptimizeInst(Instruction *I) {
|
|||
// If this is an interior node of a reassociable tree, ignore it until we
|
||||
// get to the root of the tree, to avoid N^2 analysis.
|
||||
unsigned Opcode = BO->getOpcode();
|
||||
if (BO->hasOneUse() && BO->use_back()->getOpcode() == Opcode)
|
||||
if (BO->hasOneUse() && BO->user_back()->getOpcode() == Opcode)
|
||||
return;
|
||||
|
||||
// If this is an add tree that is used by a sub instruction, ignore it
|
||||
// until we process the subtract.
|
||||
if (BO->hasOneUse() && BO->getOpcode() == Instruction::Add &&
|
||||
cast<Instruction>(BO->use_back())->getOpcode() == Instruction::Sub)
|
||||
cast<Instruction>(BO->user_back())->getOpcode() == Instruction::Sub)
|
||||
return;
|
||||
|
||||
ReassociateExpression(BO);
|
||||
|
@ -1929,7 +1928,7 @@ void Reassociate::ReassociateExpression(BinaryOperator *I) {
|
|||
// In this case we reassociate to put the negation on the outside so that we
|
||||
// can fold the negation into the add: (-X)*Y + Z -> Z-X*Y
|
||||
if (I->getOpcode() == Instruction::Mul && I->hasOneUse() &&
|
||||
cast<Instruction>(I->use_back())->getOpcode() == Instruction::Add &&
|
||||
cast<Instruction>(I->user_back())->getOpcode() == Instruction::Add &&
|
||||
isa<ConstantInt>(Ops.back().Op) &&
|
||||
cast<ConstantInt>(Ops.back().Op)->isAllOnesValue()) {
|
||||
ValueEntry Tmp = Ops.pop_back_val();
|
||||
|
|
|
@ -45,12 +45,11 @@ namespace {
|
|||
AU.addPreservedID(BreakCriticalEdgesID);
|
||||
}
|
||||
|
||||
bool valueEscapes(const Instruction *Inst) const {
|
||||
const BasicBlock *BB = Inst->getParent();
|
||||
for (Value::const_use_iterator UI = Inst->use_begin(),E = Inst->use_end();
|
||||
UI != E; ++UI) {
|
||||
const Instruction *I = cast<Instruction>(*UI);
|
||||
if (I->getParent() != BB || isa<PHINode>(I))
|
||||
bool valueEscapes(const Instruction *Inst) const {
|
||||
const BasicBlock *BB = Inst->getParent();
|
||||
for (const User *U : Inst->users()) {
|
||||
const Instruction *UI = cast<Instruction>(U);
|
||||
if (UI->getParent() != BB || isa<PHINode>(UI))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
|
|
@ -1181,10 +1181,9 @@ void SCCPSolver::Solve() {
|
|||
// since all of its users will have already been marked as overdefined
|
||||
// Update all of the users of this instruction's value.
|
||||
//
|
||||
for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
|
||||
UI != E; ++UI)
|
||||
if (Instruction *I = dyn_cast<Instruction>(*UI))
|
||||
OperandChangedState(I);
|
||||
for (User *U : I->users())
|
||||
if (Instruction *UI = dyn_cast<Instruction>(U))
|
||||
OperandChangedState(UI);
|
||||
}
|
||||
|
||||
// Process the instruction work list.
|
||||
|
@ -1201,10 +1200,9 @@ void SCCPSolver::Solve() {
|
|||
// Update all of the users of this instruction's value.
|
||||
//
|
||||
if (I->getType()->isStructTy() || !getValueState(I).isOverdefined())
|
||||
for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
|
||||
UI != E; ++UI)
|
||||
if (Instruction *I = dyn_cast<Instruction>(*UI))
|
||||
OperandChangedState(I);
|
||||
for (User *U : I->users())
|
||||
if (Instruction *UI = dyn_cast<Instruction>(U))
|
||||
OperandChangedState(UI);
|
||||
}
|
||||
|
||||
// Process the basic block work list.
|
||||
|
@ -1662,21 +1660,20 @@ static bool AddressIsTaken(const GlobalValue *GV) {
|
|||
// Delete any dead constantexpr klingons.
|
||||
GV->removeDeadConstantUsers();
|
||||
|
||||
for (Value::const_use_iterator UI = GV->use_begin(), E = GV->use_end();
|
||||
UI != E; ++UI) {
|
||||
const User *U = *UI;
|
||||
if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
|
||||
for (const Use &U : GV->uses()) {
|
||||
const User *UR = U.getUser();
|
||||
if (const StoreInst *SI = dyn_cast<StoreInst>(UR)) {
|
||||
if (SI->getOperand(0) == GV || SI->isVolatile())
|
||||
return true; // Storing addr of GV.
|
||||
} else if (isa<InvokeInst>(U) || isa<CallInst>(U)) {
|
||||
} else if (isa<InvokeInst>(UR) || isa<CallInst>(UR)) {
|
||||
// Make sure we are calling the function, not passing the address.
|
||||
ImmutableCallSite CS(cast<Instruction>(U));
|
||||
if (!CS.isCallee(UI))
|
||||
ImmutableCallSite CS(cast<Instruction>(UR));
|
||||
if (!CS.isCallee(&U))
|
||||
return true;
|
||||
} else if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
|
||||
} else if (const LoadInst *LI = dyn_cast<LoadInst>(UR)) {
|
||||
if (LI->isVolatile())
|
||||
return true;
|
||||
} else if (isa<BlockAddress>(U)) {
|
||||
} else if (isa<BlockAddress>(UR)) {
|
||||
// blockaddress doesn't take the address of the function, it takes addr
|
||||
// of label.
|
||||
} else {
|
||||
|
@ -1839,8 +1836,9 @@ bool IPSCCP::runOnModule(Module &M) {
|
|||
for (unsigned i = 0, e = BlocksToErase.size(); i != e; ++i) {
|
||||
// If there are any PHI nodes in this successor, drop entries for BB now.
|
||||
BasicBlock *DeadBB = BlocksToErase[i];
|
||||
for (Value::use_iterator UI = DeadBB->use_begin(), UE = DeadBB->use_end();
|
||||
UI != UE; ) {
|
||||
for (Value::user_iterator UI = DeadBB->user_begin(),
|
||||
UE = DeadBB->user_end();
|
||||
UI != UE;) {
|
||||
// Grab the user and then increment the iterator early, as the user
|
||||
// will be deleted. Step past all adjacent uses from the same user.
|
||||
Instruction *I = dyn_cast<Instruction>(*UI);
|
||||
|
@ -1930,7 +1928,7 @@ bool IPSCCP::runOnModule(Module &M) {
|
|||
"Overdefined values should have been taken out of the map!");
|
||||
DEBUG(dbgs() << "Found that GV '" << GV->getName() << "' is constant!\n");
|
||||
while (!GV->use_empty()) {
|
||||
StoreInst *SI = cast<StoreInst>(GV->use_back());
|
||||
StoreInst *SI = cast<StoreInst>(GV->user_back());
|
||||
SI->eraseFromParent();
|
||||
}
|
||||
M.getGlobalList().erase(GV);
|
||||
|
|
|
@ -637,10 +637,9 @@ private:
|
|||
return I;
|
||||
}
|
||||
|
||||
for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); UI != UE;
|
||||
++UI)
|
||||
if (Visited.insert(cast<Instruction>(*UI)))
|
||||
Uses.push_back(std::make_pair(I, cast<Instruction>(*UI)));
|
||||
for (User *U : I->users())
|
||||
if (Visited.insert(cast<Instruction>(U)))
|
||||
Uses.push_back(std::make_pair(I, cast<Instruction>(U)));
|
||||
} while (!Uses.empty());
|
||||
|
||||
return 0;
|
||||
|
@ -817,12 +816,10 @@ public:
|
|||
// Retain the debug information attached to the alloca for use when
|
||||
// rewriting loads and stores.
|
||||
if (MDNode *DebugNode = MDNode::getIfExists(AI.getContext(), &AI)) {
|
||||
for (Value::use_iterator UI = DebugNode->use_begin(),
|
||||
UE = DebugNode->use_end();
|
||||
UI != UE; ++UI)
|
||||
if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(*UI))
|
||||
for (User *U : DebugNode->users())
|
||||
if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U))
|
||||
DDIs.push_back(DDI);
|
||||
else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(*UI))
|
||||
else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(U))
|
||||
DVIs.push_back(DVI);
|
||||
}
|
||||
|
||||
|
@ -1085,9 +1082,8 @@ static bool isSafePHIToSpeculate(PHINode &PN,
|
|||
BasicBlock *BB = PN.getParent();
|
||||
unsigned MaxAlign = 0;
|
||||
bool HaveLoad = false;
|
||||
for (Value::use_iterator UI = PN.use_begin(), UE = PN.use_end(); UI != UE;
|
||||
++UI) {
|
||||
LoadInst *LI = dyn_cast<LoadInst>(*UI);
|
||||
for (User *U : PN.users()) {
|
||||
LoadInst *LI = dyn_cast<LoadInst>(U);
|
||||
if (LI == 0 || !LI->isSimple())
|
||||
return false;
|
||||
|
||||
|
@ -1151,13 +1147,13 @@ static void speculatePHINodeLoads(PHINode &PN) {
|
|||
|
||||
// Get the TBAA tag and alignment to use from one of the loads. It doesn't
|
||||
// matter which one we get and if any differ.
|
||||
LoadInst *SomeLoad = cast<LoadInst>(*PN.use_begin());
|
||||
LoadInst *SomeLoad = cast<LoadInst>(PN.user_back());
|
||||
MDNode *TBAATag = SomeLoad->getMetadata(LLVMContext::MD_tbaa);
|
||||
unsigned Align = SomeLoad->getAlignment();
|
||||
|
||||
// Rewrite all loads of the PN to use the new PHI.
|
||||
while (!PN.use_empty()) {
|
||||
LoadInst *LI = cast<LoadInst>(*PN.use_begin());
|
||||
LoadInst *LI = cast<LoadInst>(PN.user_back());
|
||||
LI->replaceAllUsesWith(NewPN);
|
||||
LI->eraseFromParent();
|
||||
}
|
||||
|
@ -1201,9 +1197,8 @@ static bool isSafeSelectToSpeculate(SelectInst &SI, const DataLayout *DL = 0) {
|
|||
bool TDerefable = TValue->isDereferenceablePointer();
|
||||
bool FDerefable = FValue->isDereferenceablePointer();
|
||||
|
||||
for (Value::use_iterator UI = SI.use_begin(), UE = SI.use_end(); UI != UE;
|
||||
++UI) {
|
||||
LoadInst *LI = dyn_cast<LoadInst>(*UI);
|
||||
for (User *U : SI.users()) {
|
||||
LoadInst *LI = dyn_cast<LoadInst>(U);
|
||||
if (LI == 0 || !LI->isSimple())
|
||||
return false;
|
||||
|
||||
|
@ -1229,7 +1224,7 @@ static void speculateSelectInstLoads(SelectInst &SI) {
|
|||
Value *FV = SI.getFalseValue();
|
||||
// Replace the loads of the select with a select of two loads.
|
||||
while (!SI.use_empty()) {
|
||||
LoadInst *LI = cast<LoadInst>(*SI.use_begin());
|
||||
LoadInst *LI = cast<LoadInst>(SI.user_back());
|
||||
assert(LI->isSimple() && "We only speculate simple loads");
|
||||
|
||||
IRB.SetInsertPoint(LI);
|
||||
|
@ -2782,10 +2777,9 @@ private:
|
|||
/// Enqueue all the users of the given instruction for further processing.
|
||||
/// This uses a set to de-duplicate users.
|
||||
void enqueueUsers(Instruction &I) {
|
||||
for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); UI != UE;
|
||||
++UI)
|
||||
if (Visited.insert(*UI))
|
||||
Queue.push_back(&UI.getUse());
|
||||
for (Use &U : I.uses())
|
||||
if (Visited.insert(U.getUser()))
|
||||
Queue.push_back(&U);
|
||||
}
|
||||
|
||||
// Conservative default is to not rewrite anything.
|
||||
|
@ -3516,10 +3510,9 @@ void SROA::deleteDeadInstructions(SmallPtrSet<AllocaInst*, 4> &DeletedAllocas) {
|
|||
static void enqueueUsersInWorklist(Instruction &I,
|
||||
SmallVectorImpl<Instruction *> &Worklist,
|
||||
SmallPtrSet<Instruction *, 8> &Visited) {
|
||||
for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); UI != UE;
|
||||
++UI)
|
||||
if (Visited.insert(cast<Instruction>(*UI)))
|
||||
Worklist.push_back(cast<Instruction>(*UI));
|
||||
for (User *U : I.users())
|
||||
if (Visited.insert(cast<Instruction>(U)))
|
||||
Worklist.push_back(cast<Instruction>(U));
|
||||
}
|
||||
|
||||
/// \brief Promote the allocas, using the best available technique.
|
||||
|
|
|
@ -466,10 +466,10 @@ bool ConvertToScalarInfo::MergeInVectorType(VectorType *VInTy,
|
|||
/// SawVec flag.
|
||||
bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset,
|
||||
Value* NonConstantIdx) {
|
||||
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
|
||||
Instruction *User = cast<Instruction>(*UI);
|
||||
for (User *U : V->users()) {
|
||||
Instruction *UI = cast<Instruction>(U);
|
||||
|
||||
if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
|
||||
if (LoadInst *LI = dyn_cast<LoadInst>(UI)) {
|
||||
// Don't break volatile loads.
|
||||
if (!LI->isSimple())
|
||||
return false;
|
||||
|
@ -481,7 +481,7 @@ bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset,
|
|||
continue;
|
||||
}
|
||||
|
||||
if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
|
||||
if (StoreInst *SI = dyn_cast<StoreInst>(UI)) {
|
||||
// Storing the pointer, not into the value?
|
||||
if (SI->getOperand(0) == V || !SI->isSimple()) return false;
|
||||
// Don't touch MMX operations.
|
||||
|
@ -492,7 +492,7 @@ bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset,
|
|||
continue;
|
||||
}
|
||||
|
||||
if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) {
|
||||
if (BitCastInst *BCI = dyn_cast<BitCastInst>(UI)) {
|
||||
if (!onlyUsedByLifetimeMarkers(BCI))
|
||||
IsNotTrivial = true; // Can't be mem2reg'd.
|
||||
if (!CanConvertToScalar(BCI, Offset, NonConstantIdx))
|
||||
|
@ -500,7 +500,7 @@ bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset,
|
|||
continue;
|
||||
}
|
||||
|
||||
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
|
||||
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UI)) {
|
||||
// If this is a GEP with a variable indices, we can't handle it.
|
||||
PointerType* PtrTy = dyn_cast<PointerType>(GEP->getPointerOperandType());
|
||||
if (!PtrTy)
|
||||
|
@ -532,7 +532,7 @@ bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset,
|
|||
|
||||
// If this is a constant sized memset of a constant value (e.g. 0) we can
|
||||
// handle it.
|
||||
if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) {
|
||||
if (MemSetInst *MSI = dyn_cast<MemSetInst>(UI)) {
|
||||
// Store to dynamic index.
|
||||
if (NonConstantIdx)
|
||||
return false;
|
||||
|
@ -559,7 +559,7 @@ bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset,
|
|||
|
||||
// If this is a memcpy or memmove into or out of the whole allocation, we
|
||||
// can handle it like a load or store of the scalar type.
|
||||
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) {
|
||||
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(UI)) {
|
||||
// Store to dynamic index.
|
||||
if (NonConstantIdx)
|
||||
return false;
|
||||
|
@ -572,7 +572,7 @@ bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset,
|
|||
}
|
||||
|
||||
// If this is a lifetime intrinsic, we can handle it.
|
||||
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(User)) {
|
||||
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(UI)) {
|
||||
if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
|
||||
II->getIntrinsicID() == Intrinsic::lifetime_end) {
|
||||
continue;
|
||||
|
@ -597,7 +597,7 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
|
|||
uint64_t Offset,
|
||||
Value* NonConstantIdx) {
|
||||
while (!Ptr->use_empty()) {
|
||||
Instruction *User = cast<Instruction>(Ptr->use_back());
|
||||
Instruction *User = cast<Instruction>(Ptr->user_back());
|
||||
|
||||
if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) {
|
||||
ConvertUsesToScalar(CI, NewAI, Offset, NonConstantIdx);
|
||||
|
@ -1060,11 +1060,10 @@ public:
|
|||
// Remember which alloca we're promoting (for isInstInList).
|
||||
this->AI = AI;
|
||||
if (MDNode *DebugNode = MDNode::getIfExists(AI->getContext(), AI)) {
|
||||
for (Value::use_iterator UI = DebugNode->use_begin(),
|
||||
E = DebugNode->use_end(); UI != E; ++UI)
|
||||
if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(*UI))
|
||||
for (User *U : DebugNode->users())
|
||||
if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U))
|
||||
DDIs.push_back(DDI);
|
||||
else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(*UI))
|
||||
else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(U))
|
||||
DVIs.push_back(DVI);
|
||||
}
|
||||
|
||||
|
@ -1142,9 +1141,8 @@ static bool isSafeSelectToSpeculate(SelectInst *SI, const DataLayout *DL) {
|
|||
bool TDerefable = SI->getTrueValue()->isDereferenceablePointer();
|
||||
bool FDerefable = SI->getFalseValue()->isDereferenceablePointer();
|
||||
|
||||
for (Value::use_iterator UI = SI->use_begin(), UE = SI->use_end();
|
||||
UI != UE; ++UI) {
|
||||
LoadInst *LI = dyn_cast<LoadInst>(*UI);
|
||||
for (User *U : SI->users()) {
|
||||
LoadInst *LI = dyn_cast<LoadInst>(U);
|
||||
if (LI == 0 || !LI->isSimple()) return false;
|
||||
|
||||
// Both operands to the select need to be dereferencable, either absolutely
|
||||
|
@ -1183,9 +1181,8 @@ static bool isSafePHIToSpeculate(PHINode *PN, const DataLayout *DL) {
|
|||
// TODO: Allow stores.
|
||||
BasicBlock *BB = PN->getParent();
|
||||
unsigned MaxAlign = 0;
|
||||
for (Value::use_iterator UI = PN->use_begin(), UE = PN->use_end();
|
||||
UI != UE; ++UI) {
|
||||
LoadInst *LI = dyn_cast<LoadInst>(*UI);
|
||||
for (User *U : PN->users()) {
|
||||
LoadInst *LI = dyn_cast<LoadInst>(U);
|
||||
if (LI == 0 || !LI->isSimple()) return false;
|
||||
|
||||
// For now we only allow loads in the same block as the PHI. This is a
|
||||
|
@ -1243,10 +1240,7 @@ static bool isSafePHIToSpeculate(PHINode *PN, const DataLayout *DL) {
|
|||
static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *DL) {
|
||||
SetVector<Instruction*, SmallVector<Instruction*, 4>,
|
||||
SmallPtrSet<Instruction*, 4> > InstsToRewrite;
|
||||
|
||||
for (Value::use_iterator UI = AI->use_begin(), UE = AI->use_end();
|
||||
UI != UE; ++UI) {
|
||||
User *U = *UI;
|
||||
for (User *U : AI->users()) {
|
||||
if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
|
||||
if (!LI->isSimple())
|
||||
return false;
|
||||
|
@ -1316,12 +1310,9 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *DL) {
|
|||
for (unsigned i = 0, e = InstsToRewrite.size(); i != e; ++i) {
|
||||
if (BitCastInst *BCI = dyn_cast<BitCastInst>(InstsToRewrite[i])) {
|
||||
// This could only be a bitcast used by nothing but lifetime intrinsics.
|
||||
for (BitCastInst::use_iterator I = BCI->use_begin(), E = BCI->use_end();
|
||||
I != E;) {
|
||||
Use &U = I.getUse();
|
||||
++I;
|
||||
cast<Instruction>(U.getUser())->eraseFromParent();
|
||||
}
|
||||
for (BitCastInst::user_iterator I = BCI->user_begin(), E = BCI->user_end();
|
||||
I != E;)
|
||||
cast<Instruction>(*I++)->eraseFromParent();
|
||||
BCI->eraseFromParent();
|
||||
continue;
|
||||
}
|
||||
|
@ -1330,7 +1321,7 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *DL) {
|
|||
// Selects in InstsToRewrite only have load uses. Rewrite each as two
|
||||
// loads with a new select.
|
||||
while (!SI->use_empty()) {
|
||||
LoadInst *LI = cast<LoadInst>(SI->use_back());
|
||||
LoadInst *LI = cast<LoadInst>(SI->user_back());
|
||||
|
||||
IRBuilder<> Builder(LI);
|
||||
LoadInst *TrueLoad =
|
||||
|
@ -1371,13 +1362,13 @@ static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const DataLayout *DL) {
|
|||
|
||||
// Get the TBAA tag and alignment to use from one of the loads. It doesn't
|
||||
// matter which one we get and if any differ, it doesn't matter.
|
||||
LoadInst *SomeLoad = cast<LoadInst>(PN->use_back());
|
||||
LoadInst *SomeLoad = cast<LoadInst>(PN->user_back());
|
||||
MDNode *TBAATag = SomeLoad->getMetadata(LLVMContext::MD_tbaa);
|
||||
unsigned Align = SomeLoad->getAlignment();
|
||||
|
||||
// Rewrite all loads of the PN to use the new PHI.
|
||||
while (!PN->use_empty()) {
|
||||
LoadInst *LI = cast<LoadInst>(PN->use_back());
|
||||
LoadInst *LI = cast<LoadInst>(PN->user_back());
|
||||
LI->replaceAllUsesWith(NewPN);
|
||||
LI->eraseFromParent();
|
||||
}
|
||||
|
@ -1437,9 +1428,8 @@ bool SROA::performPromotion(Function &F) {
|
|||
AllocaInst *AI = Allocas[i];
|
||||
|
||||
// Build list of instructions to promote.
|
||||
for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
|
||||
UI != E; ++UI)
|
||||
Insts.push_back(cast<Instruction>(*UI));
|
||||
for (User *U : AI->users())
|
||||
Insts.push_back(cast<Instruction>(U));
|
||||
AllocaPromoter(Insts, SSA, &DIB).run(AI, Insts);
|
||||
Insts.clear();
|
||||
}
|
||||
|
@ -1602,8 +1592,8 @@ void SROA::DeleteDeadInstructions() {
|
|||
/// referenced by this instruction.
|
||||
void SROA::isSafeForScalarRepl(Instruction *I, uint64_t Offset,
|
||||
AllocaInfo &Info) {
|
||||
for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E; ++UI) {
|
||||
Instruction *User = cast<Instruction>(*UI);
|
||||
for (Use &U : I->uses()) {
|
||||
Instruction *User = cast<Instruction>(U.getUser());
|
||||
|
||||
if (BitCastInst *BC = dyn_cast<BitCastInst>(User)) {
|
||||
isSafeForScalarRepl(BC, Offset, Info);
|
||||
|
@ -1620,7 +1610,7 @@ void SROA::isSafeForScalarRepl(Instruction *I, uint64_t Offset,
|
|||
return MarkUnsafe(Info, User);
|
||||
|
||||
isSafeMemAccess(Offset, Length->getZExtValue(), 0,
|
||||
UI.getOperandNo() == 0, Info, MI,
|
||||
U.getOperandNo() == 0, Info, MI,
|
||||
true /*AllowWholeAccess*/);
|
||||
} else if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
|
||||
if (!LI->isSimple())
|
||||
|
@ -1669,39 +1659,39 @@ void SROA::isSafePHISelectUseForScalarRepl(Instruction *I, uint64_t Offset,
|
|||
if (!Info.CheckedPHIs.insert(PN))
|
||||
return;
|
||||
|
||||
for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E; ++UI) {
|
||||
Instruction *User = cast<Instruction>(*UI);
|
||||
for (User *U : I->users()) {
|
||||
Instruction *UI = cast<Instruction>(U);
|
||||
|
||||
if (BitCastInst *BC = dyn_cast<BitCastInst>(User)) {
|
||||
if (BitCastInst *BC = dyn_cast<BitCastInst>(UI)) {
|
||||
isSafePHISelectUseForScalarRepl(BC, Offset, Info);
|
||||
} else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) {
|
||||
} else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(UI)) {
|
||||
// Only allow "bitcast" GEPs for simplicity. We could generalize this,
|
||||
// but would have to prove that we're staying inside of an element being
|
||||
// promoted.
|
||||
if (!GEPI->hasAllZeroIndices())
|
||||
return MarkUnsafe(Info, User);
|
||||
return MarkUnsafe(Info, UI);
|
||||
isSafePHISelectUseForScalarRepl(GEPI, Offset, Info);
|
||||
} else if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
|
||||
} else if (LoadInst *LI = dyn_cast<LoadInst>(UI)) {
|
||||
if (!LI->isSimple())
|
||||
return MarkUnsafe(Info, User);
|
||||
return MarkUnsafe(Info, UI);
|
||||
Type *LIType = LI->getType();
|
||||
isSafeMemAccess(Offset, DL->getTypeAllocSize(LIType),
|
||||
LIType, false, Info, LI, false /*AllowWholeAccess*/);
|
||||
Info.hasALoadOrStore = true;
|
||||
|
||||
} else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
|
||||
} else if (StoreInst *SI = dyn_cast<StoreInst>(UI)) {
|
||||
// Store is ok if storing INTO the pointer, not storing the pointer
|
||||
if (!SI->isSimple() || SI->getOperand(0) == I)
|
||||
return MarkUnsafe(Info, User);
|
||||
return MarkUnsafe(Info, UI);
|
||||
|
||||
Type *SIType = SI->getOperand(0)->getType();
|
||||
isSafeMemAccess(Offset, DL->getTypeAllocSize(SIType),
|
||||
SIType, true, Info, SI, false /*AllowWholeAccess*/);
|
||||
Info.hasALoadOrStore = true;
|
||||
} else if (isa<PHINode>(User) || isa<SelectInst>(User)) {
|
||||
isSafePHISelectUseForScalarRepl(User, Offset, Info);
|
||||
} else if (isa<PHINode>(UI) || isa<SelectInst>(UI)) {
|
||||
isSafePHISelectUseForScalarRepl(UI, Offset, Info);
|
||||
} else {
|
||||
return MarkUnsafe(Info, User);
|
||||
return MarkUnsafe(Info, UI);
|
||||
}
|
||||
if (Info.isUnsafe) return;
|
||||
}
|
||||
|
@ -1871,8 +1861,8 @@ bool SROA::TypeHasComponent(Type *T, uint64_t Offset, uint64_t Size) {
|
|||
void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
|
||||
SmallVectorImpl<AllocaInst *> &NewElts) {
|
||||
for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E;) {
|
||||
Use &TheUse = UI.getUse();
|
||||
Instruction *User = cast<Instruction>(*UI++);
|
||||
Use &TheUse = *UI++;
|
||||
Instruction *User = cast<Instruction>(TheUse.getUser());
|
||||
|
||||
if (BitCastInst *BC = dyn_cast<BitCastInst>(User)) {
|
||||
RewriteBitCast(BC, AI, Offset, NewElts);
|
||||
|
|
|
@ -76,15 +76,14 @@ bool Sinking::AllUsesDominatedByBlock(Instruction *Inst,
|
|||
// This may leave a referencing dbg_value in the original block, before
|
||||
// the definition of the vreg. Dwarf generator handles this although the
|
||||
// user might not get the right info at runtime.
|
||||
for (Value::use_iterator I = Inst->use_begin(),
|
||||
E = Inst->use_end(); I != E; ++I) {
|
||||
for (Use &U : Inst->uses()) {
|
||||
// Determine the block of the use.
|
||||
Instruction *UseInst = cast<Instruction>(*I);
|
||||
Instruction *UseInst = cast<Instruction>(U.getUser());
|
||||
BasicBlock *UseBlock = UseInst->getParent();
|
||||
if (PHINode *PN = dyn_cast<PHINode>(UseInst)) {
|
||||
// PHI nodes use the operand in the predecessor block, not the block with
|
||||
// the PHI.
|
||||
unsigned Num = PHINode::getIncomingValueNumForOperand(I.getOperandNo());
|
||||
unsigned Num = PHINode::getIncomingValueNumForOperand(U.getOperandNo());
|
||||
UseBlock = PN->getIncomingBlock(Num);
|
||||
}
|
||||
// Check that it dominates.
|
||||
|
|
|
@ -325,16 +325,10 @@ Value *StructurizeCFG::invert(Value *Condition) {
|
|||
if (Instruction *Inst = dyn_cast<Instruction>(Condition)) {
|
||||
// Third: Check all the users for an invert
|
||||
BasicBlock *Parent = Inst->getParent();
|
||||
for (Value::use_iterator I = Condition->use_begin(),
|
||||
E = Condition->use_end(); I != E; ++I) {
|
||||
|
||||
Instruction *User = dyn_cast<Instruction>(*I);
|
||||
if (!User || User->getParent() != Parent)
|
||||
continue;
|
||||
|
||||
if (match(*I, m_Not(m_Specific(Condition))))
|
||||
return *I;
|
||||
}
|
||||
for (User *U : Condition->users())
|
||||
if (Instruction *I = dyn_cast<Instruction>(U))
|
||||
if (I->getParent() == Parent && match(I, m_Not(m_Specific(Condition))))
|
||||
return I;
|
||||
|
||||
// Last option: Create a new instruction
|
||||
return BinaryOperator::CreateNot(Condition, "", Parent->getTerminator());
|
||||
|
@ -834,16 +828,14 @@ void StructurizeCFG::rebuildSSA() {
|
|||
II != IE; ++II) {
|
||||
|
||||
bool Initialized = false;
|
||||
for (Use *I = &II->use_begin().getUse(), *Next; I; I = Next) {
|
||||
|
||||
Next = I->getNext();
|
||||
|
||||
Instruction *User = cast<Instruction>(I->getUser());
|
||||
for (auto I = II->use_begin(), E = II->use_end(); I != E;) {
|
||||
Use &U = *I++;
|
||||
Instruction *User = cast<Instruction>(U.getUser());
|
||||
if (User->getParent() == BB) {
|
||||
continue;
|
||||
|
||||
} else if (PHINode *UserPN = dyn_cast<PHINode>(User)) {
|
||||
if (UserPN->getIncomingBlock(*I) == BB)
|
||||
if (UserPN->getIncomingBlock(U) == BB)
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -857,7 +849,7 @@ void StructurizeCFG::rebuildSSA() {
|
|||
Updater.AddAvailableValue(BB, II);
|
||||
Initialized = true;
|
||||
}
|
||||
Updater.RewriteUseAfterInsertions(*I);
|
||||
Updater.RewriteUseAfterInsertions(U);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -380,13 +380,13 @@ Value *TailCallElim::CanTransformAccumulatorRecursion(Instruction *I,
|
|||
return 0;
|
||||
|
||||
// The only user of this instruction we allow is a single return instruction.
|
||||
if (!I->hasOneUse() || !isa<ReturnInst>(I->use_back()))
|
||||
if (!I->hasOneUse() || !isa<ReturnInst>(I->user_back()))
|
||||
return 0;
|
||||
|
||||
// Ok, now we have to check all of the other return instructions in this
|
||||
// function. If they return non-constants or differing values, then we cannot
|
||||
// transform the function safely.
|
||||
return getCommonReturnValue(cast<ReturnInst>(I->use_back()), CI);
|
||||
return getCommonReturnValue(cast<ReturnInst>(I->user_back()), CI);
|
||||
}
|
||||
|
||||
static Instruction *FirstNonDbg(BasicBlock::iterator I) {
|
||||
|
|
|
@ -171,9 +171,8 @@ void CodeExtractor::findInputsOutputs(ValueSet &Inputs,
|
|||
if (definedInCaller(Blocks, *OI))
|
||||
Inputs.insert(*OI);
|
||||
|
||||
for (Value::use_iterator UI = II->use_begin(), UE = II->use_end();
|
||||
UI != UE; ++UI)
|
||||
if (!definedInRegion(Blocks, *UI)) {
|
||||
for (User *U : II->users())
|
||||
if (!definedInRegion(Blocks, U)) {
|
||||
Outputs.insert(II);
|
||||
break;
|
||||
}
|
||||
|
@ -369,7 +368,7 @@ Function *CodeExtractor::constructFunction(const ValueSet &inputs,
|
|||
} else
|
||||
RewriteVal = AI++;
|
||||
|
||||
std::vector<User*> Users(inputs[i]->use_begin(), inputs[i]->use_end());
|
||||
std::vector<User*> Users(inputs[i]->user_begin(), inputs[i]->user_end());
|
||||
for (std::vector<User*>::iterator use = Users.begin(), useE = Users.end();
|
||||
use != useE; ++use)
|
||||
if (Instruction* inst = dyn_cast<Instruction>(*use))
|
||||
|
@ -389,7 +388,7 @@ Function *CodeExtractor::constructFunction(const ValueSet &inputs,
|
|||
// Rewrite branches to basic blocks outside of the loop to new dummy blocks
|
||||
// within the new function. This must be done before we lose track of which
|
||||
// blocks were originally in the code region.
|
||||
std::vector<User*> Users(header->use_begin(), header->use_end());
|
||||
std::vector<User*> Users(header->user_begin(), header->user_end());
|
||||
for (unsigned i = 0, e = Users.size(); i != e; ++i)
|
||||
// The BasicBlock which contains the branch is not in the region
|
||||
// modify the branch target to a new block
|
||||
|
@ -405,13 +404,12 @@ Function *CodeExtractor::constructFunction(const ValueSet &inputs,
|
|||
/// that uses the value within the basic block, and return the predecessor
|
||||
/// block associated with that use, or return 0 if none is found.
|
||||
static BasicBlock* FindPhiPredForUseInBlock(Value* Used, BasicBlock* BB) {
|
||||
for (Value::use_iterator UI = Used->use_begin(),
|
||||
UE = Used->use_end(); UI != UE; ++UI) {
|
||||
PHINode *P = dyn_cast<PHINode>(*UI);
|
||||
for (Use &U : Used->uses()) {
|
||||
PHINode *P = dyn_cast<PHINode>(U.getUser());
|
||||
if (P && P->getParent() == BB)
|
||||
return P->getIncomingBlock(UI);
|
||||
return P->getIncomingBlock(U);
|
||||
}
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -502,7 +500,7 @@ emitCallAndSwitchStatement(Function *newFunction, BasicBlock *codeReplacer,
|
|||
LoadInst *load = new LoadInst(Output, outputs[i]->getName()+".reload");
|
||||
Reloads.push_back(load);
|
||||
codeReplacer->getInstList().push_back(load);
|
||||
std::vector<User*> Users(outputs[i]->use_begin(), outputs[i]->use_end());
|
||||
std::vector<User*> Users(outputs[i]->user_begin(), outputs[i]->user_end());
|
||||
for (unsigned u = 0, e = Users.size(); u != e; ++u) {
|
||||
Instruction *inst = cast<Instruction>(Users[u]);
|
||||
if (!Blocks.count(inst->getParent()))
|
||||
|
|
|
@ -41,7 +41,7 @@ AllocaInst *llvm::DemoteRegToStack(Instruction &I, bool VolatileLoads,
|
|||
|
||||
// Change all of the users of the instruction to read from the stack slot.
|
||||
while (!I.use_empty()) {
|
||||
Instruction *U = cast<Instruction>(I.use_back());
|
||||
Instruction *U = cast<Instruction>(I.user_back());
|
||||
if (PHINode *PN = dyn_cast<PHINode>(U)) {
|
||||
// If this is a PHI node, we can't insert a load of the value before the
|
||||
// use. Instead insert the load in the predecessor block corresponding
|
||||
|
|
|
@ -35,9 +35,8 @@ bool llvm::isSafeToDestroyConstant(const Constant *C) {
|
|||
if (isa<GlobalValue>(C))
|
||||
return false;
|
||||
|
||||
for (Value::const_use_iterator UI = C->use_begin(), E = C->use_end(); UI != E;
|
||||
++UI)
|
||||
if (const Constant *CU = dyn_cast<Constant>(*UI)) {
|
||||
for (const User *U : C->users())
|
||||
if (const Constant *CU = dyn_cast<Constant>(U)) {
|
||||
if (!isSafeToDestroyConstant(CU))
|
||||
return false;
|
||||
} else
|
||||
|
@ -47,10 +46,9 @@ bool llvm::isSafeToDestroyConstant(const Constant *C) {
|
|||
|
||||
static bool analyzeGlobalAux(const Value *V, GlobalStatus &GS,
|
||||
SmallPtrSet<const PHINode *, 16> &PhiUsers) {
|
||||
for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;
|
||||
++UI) {
|
||||
const User *U = *UI;
|
||||
if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
|
||||
for (const Use &U : V->uses()) {
|
||||
const User *UR = U.getUser();
|
||||
if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(UR)) {
|
||||
GS.HasNonInstructionUser = true;
|
||||
|
||||
// If the result of the constantexpr isn't pointer type, then we won't
|
||||
|
@ -60,7 +58,7 @@ static bool analyzeGlobalAux(const Value *V, GlobalStatus &GS,
|
|||
|
||||
if (analyzeGlobalAux(CE, GS, PhiUsers))
|
||||
return true;
|
||||
} else if (const Instruction *I = dyn_cast<Instruction>(U)) {
|
||||
} else if (const Instruction *I = dyn_cast<Instruction>(UR)) {
|
||||
if (!GS.HasMultipleAccessingFunctions) {
|
||||
const Function *F = I->getParent()->getParent();
|
||||
if (GS.AccessingFunction == 0)
|
||||
|
@ -150,13 +148,13 @@ static bool analyzeGlobalAux(const Value *V, GlobalStatus &GS,
|
|||
return true;
|
||||
GS.StoredType = GlobalStatus::Stored;
|
||||
} else if (ImmutableCallSite C = I) {
|
||||
if (!C.isCallee(UI))
|
||||
if (!C.isCallee(&U))
|
||||
return true;
|
||||
GS.IsLoaded = true;
|
||||
} else {
|
||||
return true; // Any other non-load instruction might take address!
|
||||
}
|
||||
} else if (const Constant *C = dyn_cast<Constant>(U)) {
|
||||
} else if (const Constant *C = dyn_cast<Constant>(UR)) {
|
||||
GS.HasNonInstructionUser = true;
|
||||
// We might have a dead and dangling constant hanging off of here.
|
||||
if (!isSafeToDestroyConstant(C))
|
||||
|
|
|
@ -401,9 +401,8 @@ static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
|
|||
// isUsedByLifetimeMarker - Check whether this Value is used by a lifetime
|
||||
// intrinsic.
|
||||
static bool isUsedByLifetimeMarker(Value *V) {
|
||||
for (Value::use_iterator UI = V->use_begin(), UE = V->use_end(); UI != UE;
|
||||
++UI) {
|
||||
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(*UI)) {
|
||||
for (User *U : V->users()) {
|
||||
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
|
||||
switch (II->getIntrinsicID()) {
|
||||
default: break;
|
||||
case Intrinsic::lifetime_start:
|
||||
|
@ -423,11 +422,10 @@ static bool hasLifetimeMarkers(AllocaInst *AI) {
|
|||
return isUsedByLifetimeMarker(AI);
|
||||
|
||||
// Do a scan to find all the casts to i8*.
|
||||
for (Value::use_iterator I = AI->use_begin(), E = AI->use_end(); I != E;
|
||||
++I) {
|
||||
if (I->getType() != Int8PtrTy) continue;
|
||||
if (I->stripPointerCasts() != AI) continue;
|
||||
if (isUsedByLifetimeMarker(*I))
|
||||
for (User *U : AI->users()) {
|
||||
if (U->getType() != Int8PtrTy) continue;
|
||||
if (U->stripPointerCasts() != AI) continue;
|
||||
if (isUsedByLifetimeMarker(U))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
|
|
@ -65,15 +65,14 @@ static bool processInstruction(Loop &L, Instruction &Inst, DominatorTree &DT,
|
|||
|
||||
BasicBlock *InstBB = Inst.getParent();
|
||||
|
||||
for (Value::use_iterator UI = Inst.use_begin(), E = Inst.use_end(); UI != E;
|
||||
++UI) {
|
||||
User *U = *UI;
|
||||
BasicBlock *UserBB = cast<Instruction>(U)->getParent();
|
||||
if (PHINode *PN = dyn_cast<PHINode>(U))
|
||||
UserBB = PN->getIncomingBlock(UI);
|
||||
for (Use &U : Inst.uses()) {
|
||||
Instruction *User = cast<Instruction>(U.getUser());
|
||||
BasicBlock *UserBB = User->getParent();
|
||||
if (PHINode *PN = dyn_cast<PHINode>(User))
|
||||
UserBB = PN->getIncomingBlock(U);
|
||||
|
||||
if (InstBB != UserBB && !L.contains(UserBB))
|
||||
UsesToRewrite.push_back(&UI.getUse());
|
||||
UsesToRewrite.push_back(&U);
|
||||
}
|
||||
|
||||
// If there are no uses outside the loop, exit with no change.
|
||||
|
@ -208,8 +207,8 @@ bool llvm::formLCSSA(Loop &L, DominatorTree &DT, ScalarEvolution *SE) {
|
|||
// Reject two common cases fast: instructions with no uses (like stores)
|
||||
// and instructions with one use that is in the same block as this.
|
||||
if (I->use_empty() ||
|
||||
(I->hasOneUse() && I->use_back()->getParent() == BB &&
|
||||
!isa<PHINode>(I->use_back())))
|
||||
(I->hasOneUse() && I->user_back()->getParent() == BB &&
|
||||
!isa<PHINode>(I->user_back())))
|
||||
continue;
|
||||
|
||||
Changed |= processInstruction(L, *I, DT, ExitBlocks, PredCache);
|
||||
|
|
|
@ -354,8 +354,8 @@ llvm::RecursivelyDeleteTriviallyDeadInstructions(Value *V,
|
|||
/// true when there are no uses or multiple uses that all refer to the same
|
||||
/// value.
|
||||
static bool areAllUsesEqual(Instruction *I) {
|
||||
Value::use_iterator UI = I->use_begin();
|
||||
Value::use_iterator UE = I->use_end();
|
||||
Value::user_iterator UI = I->user_begin();
|
||||
Value::user_iterator UE = I->user_end();
|
||||
if (UI == UE)
|
||||
return true;
|
||||
|
||||
|
@ -376,7 +376,7 @@ bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN,
|
|||
const TargetLibraryInfo *TLI) {
|
||||
SmallPtrSet<Instruction*, 4> Visited;
|
||||
for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
|
||||
I = cast<Instruction>(*I->use_begin())) {
|
||||
I = cast<Instruction>(*I->user_begin())) {
|
||||
if (I->use_empty())
|
||||
return RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
|
||||
|
||||
|
@ -752,10 +752,9 @@ bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB) {
|
|||
if (!Succ->getSinglePredecessor()) {
|
||||
BasicBlock::iterator BBI = BB->begin();
|
||||
while (isa<PHINode>(*BBI)) {
|
||||
for (Value::use_iterator UI = BBI->use_begin(), E = BBI->use_end();
|
||||
UI != E; ++UI) {
|
||||
if (PHINode* PN = dyn_cast<PHINode>(*UI)) {
|
||||
if (PN->getIncomingBlock(UI) != BB)
|
||||
for (Use &U : BBI->uses()) {
|
||||
if (PHINode* PN = dyn_cast<PHINode>(U.getUser())) {
|
||||
if (PN->getIncomingBlock(U) != BB)
|
||||
return false;
|
||||
} else {
|
||||
return false;
|
||||
|
@ -1056,11 +1055,10 @@ bool llvm::LowerDbgDeclare(Function &F) {
|
|||
// We only remove the dbg.declare intrinsic if all uses are
|
||||
// converted to dbg.value intrinsics.
|
||||
bool RemoveDDI = true;
|
||||
for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
|
||||
UI != E; ++UI)
|
||||
if (StoreInst *SI = dyn_cast<StoreInst>(*UI))
|
||||
for (User *U : AI->users())
|
||||
if (StoreInst *SI = dyn_cast<StoreInst>(U))
|
||||
ConvertDebugDeclareToDebugValue(DDI, SI, DIB);
|
||||
else if (LoadInst *LI = dyn_cast<LoadInst>(*UI))
|
||||
else if (LoadInst *LI = dyn_cast<LoadInst>(U))
|
||||
ConvertDebugDeclareToDebugValue(DDI, LI, DIB);
|
||||
else
|
||||
RemoveDDI = false;
|
||||
|
@ -1075,9 +1073,8 @@ bool llvm::LowerDbgDeclare(Function &F) {
|
|||
/// alloca 'V', if any.
|
||||
DbgDeclareInst *llvm::FindAllocaDbgDeclare(Value *V) {
|
||||
if (MDNode *DebugNode = MDNode::getIfExists(V->getContext(), V))
|
||||
for (Value::use_iterator UI = DebugNode->use_begin(),
|
||||
E = DebugNode->use_end(); UI != E; ++UI)
|
||||
if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(*UI))
|
||||
for (User *U : DebugNode->users())
|
||||
if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U))
|
||||
return DDI;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -325,8 +325,8 @@ splitLiveRangesLiveAcrossInvokes(SmallVectorImpl<InvokeInst*> &Invokes) {
|
|||
Instruction *Inst = II;
|
||||
if (Inst->use_empty()) continue;
|
||||
if (Inst->hasOneUse() &&
|
||||
cast<Instruction>(Inst->use_back())->getParent() == BB &&
|
||||
!isa<PHINode>(Inst->use_back())) continue;
|
||||
cast<Instruction>(Inst->user_back())->getParent() == BB &&
|
||||
!isa<PHINode>(Inst->user_back())) continue;
|
||||
|
||||
// If this is an alloca in the entry block, it's not a real register
|
||||
// value.
|
||||
|
@ -336,11 +336,10 @@ splitLiveRangesLiveAcrossInvokes(SmallVectorImpl<InvokeInst*> &Invokes) {
|
|||
|
||||
// Avoid iterator invalidation by copying users to a temporary vector.
|
||||
SmallVector<Instruction*,16> Users;
|
||||
for (Value::use_iterator UI = Inst->use_begin(), E = Inst->use_end();
|
||||
UI != E; ++UI) {
|
||||
Instruction *User = cast<Instruction>(*UI);
|
||||
if (User->getParent() != BB || isa<PHINode>(User))
|
||||
Users.push_back(User);
|
||||
for (User *U : Inst->users()) {
|
||||
Instruction *UI = cast<Instruction>(U);
|
||||
if (UI->getParent() != BB || isa<PHINode>(UI))
|
||||
Users.push_back(UI);
|
||||
}
|
||||
|
||||
// Scan all of the uses and see if the live range is live across an unwind
|
||||
|
|
|
@ -61,9 +61,7 @@ bool llvm::isAllocaPromotable(const AllocaInst *AI) {
|
|||
// assignments to subsections of the memory unit.
|
||||
|
||||
// Only allow direct and non-volatile loads and stores...
|
||||
for (Value::const_use_iterator UI = AI->use_begin(), UE = AI->use_end();
|
||||
UI != UE; ++UI) { // Loop over all of the uses of the alloca
|
||||
const User *U = *UI;
|
||||
for (const User *U : AI->users()) {
|
||||
if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
|
||||
// Note that atomic loads can be transformed; atomic semantics do
|
||||
// not have any meaning for a local alloca.
|
||||
|
@ -131,8 +129,7 @@ struct AllocaInfo {
|
|||
// As we scan the uses of the alloca instruction, keep track of stores,
|
||||
// and decide whether all of the loads and stores to the alloca are within
|
||||
// the same basic block.
|
||||
for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
|
||||
UI != E;) {
|
||||
for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) {
|
||||
Instruction *User = cast<Instruction>(*UI++);
|
||||
|
||||
if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
|
||||
|
@ -317,8 +314,7 @@ static void removeLifetimeIntrinsicUsers(AllocaInst *AI) {
|
|||
// Knowing that this alloca is promotable, we know that it's safe to kill all
|
||||
// instructions except for load and store.
|
||||
|
||||
for (Value::use_iterator UI = AI->use_begin(), UE = AI->use_end();
|
||||
UI != UE;) {
|
||||
for (auto UI = AI->user_begin(), UE = AI->user_end(); UI != UE;) {
|
||||
Instruction *I = cast<Instruction>(*UI);
|
||||
++UI;
|
||||
if (isa<LoadInst>(I) || isa<StoreInst>(I))
|
||||
|
@ -328,10 +324,9 @@ static void removeLifetimeIntrinsicUsers(AllocaInst *AI) {
|
|||
// The only users of this bitcast/GEP instruction are lifetime intrinsics.
|
||||
// Follow the use/def chain to erase them now instead of leaving it for
|
||||
// dead code elimination later.
|
||||
for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
|
||||
UI != UE;) {
|
||||
Instruction *Inst = cast<Instruction>(*UI);
|
||||
++UI;
|
||||
for (auto UUI = I->user_begin(), UUE = I->user_end(); UUI != UUE;) {
|
||||
Instruction *Inst = cast<Instruction>(*UUI);
|
||||
++UUI;
|
||||
Inst->eraseFromParent();
|
||||
}
|
||||
}
|
||||
|
@ -359,7 +354,7 @@ static bool rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info,
|
|||
// Clear out UsingBlocks. We will reconstruct it here if needed.
|
||||
Info.UsingBlocks.clear();
|
||||
|
||||
for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); UI != E;) {
|
||||
for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) {
|
||||
Instruction *UserInst = cast<Instruction>(*UI++);
|
||||
if (!isa<LoadInst>(UserInst)) {
|
||||
assert(UserInst == OnlyStore && "Should only have load/stores");
|
||||
|
@ -456,9 +451,8 @@ static void promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info,
|
|||
typedef SmallVector<std::pair<unsigned, StoreInst *>, 64> StoresByIndexTy;
|
||||
StoresByIndexTy StoresByIndex;
|
||||
|
||||
for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); UI != E;
|
||||
++UI)
|
||||
if (StoreInst *SI = dyn_cast<StoreInst>(*UI))
|
||||
for (User *U : AI->users())
|
||||
if (StoreInst *SI = dyn_cast<StoreInst>(U))
|
||||
StoresByIndex.push_back(std::make_pair(LBI.getInstructionIndex(SI), SI));
|
||||
|
||||
// Sort the stores by their index, making it efficient to do a lookup with a
|
||||
|
@ -467,7 +461,7 @@ static void promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info,
|
|||
|
||||
// Walk all of the loads from this alloca, replacing them with the nearest
|
||||
// store above them, if any.
|
||||
for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); UI != E;) {
|
||||
for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) {
|
||||
LoadInst *LI = dyn_cast<LoadInst>(*UI++);
|
||||
if (!LI)
|
||||
continue;
|
||||
|
@ -495,7 +489,7 @@ static void promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info,
|
|||
|
||||
// Remove the (now dead) stores and alloca.
|
||||
while (!AI->use_empty()) {
|
||||
StoreInst *SI = cast<StoreInst>(AI->use_back());
|
||||
StoreInst *SI = cast<StoreInst>(AI->user_back());
|
||||
// Record debuginfo for the store before removing it.
|
||||
if (DbgDeclareInst *DDI = Info.DbgDeclare) {
|
||||
DIBuilder DIB(*AI->getParent()->getParent()->getParent());
|
||||
|
|
|
@ -1590,10 +1590,9 @@ static bool BlockIsSimpleEnoughToThreadThrough(BasicBlock *BB) {
|
|||
|
||||
// We can only support instructions that do not define values that are
|
||||
// live outside of the current basic block.
|
||||
for (Value::use_iterator UI = BBI->use_begin(), E = BBI->use_end();
|
||||
UI != E; ++UI) {
|
||||
Instruction *U = cast<Instruction>(*UI);
|
||||
if (U->getParent() != BB || isa<PHINode>(U)) return false;
|
||||
for (User *U : BBI->users()) {
|
||||
Instruction *UI = cast<Instruction>(U);
|
||||
if (UI->getParent() != BB || isa<PHINode>(UI)) return false;
|
||||
}
|
||||
|
||||
// Looks ok, continue checking.
|
||||
|
@ -2016,7 +2015,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
|
|||
// register pressure or inhibit out-of-order execution.
|
||||
Instruction *BonusInst = 0;
|
||||
if (&*FrontIt != Cond &&
|
||||
FrontIt->hasOneUse() && *FrontIt->use_begin() == Cond &&
|
||||
FrontIt->hasOneUse() && FrontIt->user_back() == Cond &&
|
||||
isSafeToSpeculativelyExecute(FrontIt)) {
|
||||
BonusInst = &*FrontIt;
|
||||
++FrontIt;
|
||||
|
@ -2095,7 +2094,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
|
|||
// instructions that are used by the terminator's condition because it
|
||||
// exposes more merging opportunities.
|
||||
bool UsedByBranch = (BonusInst && BonusInst->hasOneUse() &&
|
||||
*BonusInst->use_begin() == Cond);
|
||||
BonusInst->user_back() == Cond);
|
||||
|
||||
if (BonusInst && !UsedByBranch) {
|
||||
// Collect the values used by the bonus inst
|
||||
|
@ -2689,7 +2688,7 @@ static bool TryToSimplifyUncondBranchWithICmpInIt(
|
|||
// The use of the icmp has to be in the 'end' block, by the only PHI node in
|
||||
// the block.
|
||||
BasicBlock *SuccBlock = BB->getTerminator()->getSuccessor(0);
|
||||
PHINode *PHIUse = dyn_cast<PHINode>(ICI->use_back());
|
||||
PHINode *PHIUse = dyn_cast<PHINode>(ICI->user_back());
|
||||
if (PHIUse == 0 || PHIUse != &SuccBlock->front() ||
|
||||
isa<PHINode>(++BasicBlock::iterator(PHIUse)))
|
||||
return false;
|
||||
|
@ -3807,8 +3806,8 @@ static bool SwitchToLookupTable(SwitchInst *SI,
|
|||
|
||||
// If the result is used to return immediately from the function, we want to
|
||||
// do that right here.
|
||||
if (PHI->hasOneUse() && isa<ReturnInst>(*PHI->use_begin()) &&
|
||||
*PHI->use_begin() == CommonDest->getFirstNonPHIOrDbg()) {
|
||||
if (PHI->hasOneUse() && isa<ReturnInst>(*PHI->user_begin()) &&
|
||||
PHI->user_back() == CommonDest->getFirstNonPHIOrDbg()) {
|
||||
Builder.CreateRet(Result);
|
||||
ReturnedEarly = true;
|
||||
break;
|
||||
|
@ -4043,7 +4042,7 @@ static bool passingValueIsAlwaysUndefined(Value *V, Instruction *I) {
|
|||
|
||||
if (C->isNullValue()) {
|
||||
// Only look at the first use, avoid hurting compile time with long uselists
|
||||
User *Use = *I->use_begin();
|
||||
User *Use = *I->user_begin();
|
||||
|
||||
// Now make sure that there are no instructions in between that can alter
|
||||
// control flow (eg. calls)
|
||||
|
|
|
@ -285,15 +285,14 @@ Instruction *SimplifyIndvar::splitOverflowIntrinsic(Instruction *IVUser,
|
|||
// Find a branch guarded by the overflow check.
|
||||
BranchInst *Branch = 0;
|
||||
Instruction *AddVal = 0;
|
||||
for (Value::use_iterator UI = II->use_begin(), E = II->use_end();
|
||||
UI != E; ++UI) {
|
||||
if (ExtractValueInst *ExtractInst = dyn_cast<ExtractValueInst>(*UI)) {
|
||||
for (User *U : II->users()) {
|
||||
if (ExtractValueInst *ExtractInst = dyn_cast<ExtractValueInst>(U)) {
|
||||
if (ExtractInst->getNumIndices() != 1)
|
||||
continue;
|
||||
if (ExtractInst->getIndices()[0] == 0)
|
||||
AddVal = ExtractInst;
|
||||
else if (ExtractInst->getIndices()[0] == 1 && ExtractInst->hasOneUse())
|
||||
Branch = dyn_cast<BranchInst>(ExtractInst->use_back());
|
||||
Branch = dyn_cast<BranchInst>(ExtractInst->user_back());
|
||||
}
|
||||
}
|
||||
if (!AddVal || !Branch)
|
||||
|
@ -305,12 +304,11 @@ Instruction *SimplifyIndvar::splitOverflowIntrinsic(Instruction *IVUser,
|
|||
|
||||
// Check if all users of the add are provably NSW.
|
||||
bool AllNSW = true;
|
||||
for (Value::use_iterator UI = AddVal->use_begin(), E = AddVal->use_end();
|
||||
UI != E; ++UI) {
|
||||
if (Instruction *UseInst = dyn_cast<Instruction>(*UI)) {
|
||||
for (Use &U : AddVal->uses()) {
|
||||
if (Instruction *UseInst = dyn_cast<Instruction>(U.getUser())) {
|
||||
BasicBlock *UseBB = UseInst->getParent();
|
||||
if (PHINode *PHI = dyn_cast<PHINode>(UseInst))
|
||||
UseBB = PHI->getIncomingBlock(UI);
|
||||
UseBB = PHI->getIncomingBlock(U);
|
||||
if (!DT->dominates(ContinueBB, UseBB)) {
|
||||
AllNSW = false;
|
||||
break;
|
||||
|
@ -343,16 +341,15 @@ static void pushIVUsers(
|
|||
SmallPtrSet<Instruction*,16> &Simplified,
|
||||
SmallVectorImpl< std::pair<Instruction*,Instruction*> > &SimpleIVUsers) {
|
||||
|
||||
for (Value::use_iterator UI = Def->use_begin(), E = Def->use_end();
|
||||
UI != E; ++UI) {
|
||||
Instruction *User = cast<Instruction>(*UI);
|
||||
for (User *U : Def->users()) {
|
||||
Instruction *UI = cast<Instruction>(U);
|
||||
|
||||
// Avoid infinite or exponential worklist processing.
|
||||
// Also ensure unique worklist users.
|
||||
// If Def is a LoopPhi, it may not be in the Simplified set, so check for
|
||||
// self edges first.
|
||||
if (User != Def && Simplified.insert(User))
|
||||
SimpleIVUsers.push_back(std::make_pair(User, Def));
|
||||
if (UI != Def && Simplified.insert(UI))
|
||||
SimpleIVUsers.push_back(std::make_pair(UI, Def));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -68,9 +68,8 @@ namespace {
|
|||
if (!I->use_empty())
|
||||
if (Value *V = SimplifyInstruction(I, DL, TLI, DT)) {
|
||||
// Mark all uses for resimplification next time round the loop.
|
||||
for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
|
||||
UI != UE; ++UI)
|
||||
Next->insert(cast<Instruction>(*UI));
|
||||
for (User *U : I->users())
|
||||
Next->insert(cast<Instruction>(U));
|
||||
I->replaceAllUsesWith(V);
|
||||
++NumSimplified;
|
||||
Changed = true;
|
||||
|
|
|
@ -88,9 +88,8 @@ public:
|
|||
/// isOnlyUsedInZeroEqualityComparison - Return true if it only matters that the
|
||||
/// value is equal or not-equal to zero.
|
||||
static bool isOnlyUsedInZeroEqualityComparison(Value *V) {
|
||||
for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
|
||||
UI != E; ++UI) {
|
||||
if (ICmpInst *IC = dyn_cast<ICmpInst>(*UI))
|
||||
for (User *U : V->users()) {
|
||||
if (ICmpInst *IC = dyn_cast<ICmpInst>(U))
|
||||
if (IC->isEquality())
|
||||
if (Constant *C = dyn_cast<Constant>(IC->getOperand(1)))
|
||||
if (C->isNullValue())
|
||||
|
@ -104,9 +103,8 @@ static bool isOnlyUsedInZeroEqualityComparison(Value *V) {
|
|||
/// isOnlyUsedInEqualityComparison - Return true if it is only used in equality
|
||||
/// comparisons with With.
|
||||
static bool isOnlyUsedInEqualityComparison(Value *V, Value *With) {
|
||||
for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
|
||||
UI != E; ++UI) {
|
||||
if (ICmpInst *IC = dyn_cast<ICmpInst>(*UI))
|
||||
for (User *U : V->users()) {
|
||||
if (ICmpInst *IC = dyn_cast<ICmpInst>(U))
|
||||
if (IC->isEquality() && IC->getOperand(1) == With)
|
||||
continue;
|
||||
// Unknown instruction.
|
||||
|
@ -936,8 +934,7 @@ struct StrStrOpt : public LibCallOptimization {
|
|||
StrLen, B, DL, TLI);
|
||||
if (!StrNCmp)
|
||||
return 0;
|
||||
for (Value::use_iterator UI = CI->use_begin(), UE = CI->use_end();
|
||||
UI != UE; ) {
|
||||
for (auto UI = CI->user_begin(), UE = CI->user_end(); UI != UE;) {
|
||||
ICmpInst *Old = cast<ICmpInst>(*UI++);
|
||||
Value *Cmp = B.CreateICmp(Old->getPredicate(), StrNCmp,
|
||||
ConstantInt::getNullValue(StrNCmp->getType()),
|
||||
|
@ -1110,9 +1107,8 @@ struct UnaryDoubleFPOpt : public LibCallOptimization {
|
|||
|
||||
if (CheckRetType) {
|
||||
// Check if all the uses for function like 'sin' are converted to float.
|
||||
for (Value::use_iterator UseI = CI->use_begin(); UseI != CI->use_end();
|
||||
++UseI) {
|
||||
FPTruncInst *Cast = dyn_cast<FPTruncInst>(*UseI);
|
||||
for (User *U : CI->users()) {
|
||||
FPTruncInst *Cast = dyn_cast<FPTruncInst>(U);
|
||||
if (Cast == 0 || !Cast->getType()->isFloatTy())
|
||||
return 0;
|
||||
}
|
||||
|
@ -1147,9 +1143,8 @@ struct BinaryDoubleFPOpt : public LibCallOptimization {
|
|||
if (CheckRetType) {
|
||||
// Check if all the uses for function like 'fmin/fmax' are converted to
|
||||
// float.
|
||||
for (Value::use_iterator UseI = CI->use_begin(); UseI != CI->use_end();
|
||||
++UseI) {
|
||||
FPTruncInst *Cast = dyn_cast<FPTruncInst>(*UseI);
|
||||
for (User *U : CI->users()) {
|
||||
FPTruncInst *Cast = dyn_cast<FPTruncInst>(U);
|
||||
if (Cast == 0 || !Cast->getType()->isFloatTy())
|
||||
return 0;
|
||||
}
|
||||
|
@ -1361,9 +1356,8 @@ struct SinCosPiOpt : public LibCallOptimization {
|
|||
// Look for all compatible sinpi, cospi and sincospi calls with the same
|
||||
// argument. If there are enough (in some sense) we can make the
|
||||
// substitution.
|
||||
for (Value::use_iterator UI = Arg->use_begin(), UE = Arg->use_end();
|
||||
UI != UE; ++UI)
|
||||
classifyArgUse(*UI, CI->getParent(), IsFloat, SinCalls, CosCalls,
|
||||
for (User *U : Arg->users())
|
||||
classifyArgUse(U, CI->getParent(), IsFloat, SinCalls, CosCalls,
|
||||
SinCosCalls);
|
||||
|
||||
// It's only worthwhile if both sinpi and cospi are actually used.
|
||||
|
|
|
@ -1318,13 +1318,15 @@ namespace {
|
|||
|
||||
// For each possible pairing for this variable, look at the uses of
|
||||
// the first value...
|
||||
for (Value::use_iterator I = P.first->use_begin(),
|
||||
E = P.first->use_end(); I != E; ++I) {
|
||||
if (isa<LoadInst>(*I)) {
|
||||
for (Value::user_iterator I = P.first->user_begin(),
|
||||
E = P.first->user_end();
|
||||
I != E; ++I) {
|
||||
User *UI = *I;
|
||||
if (isa<LoadInst>(UI)) {
|
||||
// A pair cannot be connected to a load because the load only takes one
|
||||
// operand (the address) and it is a scalar even after vectorization.
|
||||
continue;
|
||||
} else if ((SI = dyn_cast<StoreInst>(*I)) &&
|
||||
} else if ((SI = dyn_cast<StoreInst>(UI)) &&
|
||||
P.first == SI->getPointerOperand()) {
|
||||
// Similarly, a pair cannot be connected to a store through its
|
||||
// pointer operand.
|
||||
|
@ -1333,22 +1335,21 @@ namespace {
|
|||
|
||||
// For each use of the first variable, look for uses of the second
|
||||
// variable...
|
||||
for (Value::use_iterator J = P.second->use_begin(),
|
||||
E2 = P.second->use_end(); J != E2; ++J) {
|
||||
if ((SJ = dyn_cast<StoreInst>(*J)) &&
|
||||
for (User *UJ : P.second->users()) {
|
||||
if ((SJ = dyn_cast<StoreInst>(UJ)) &&
|
||||
P.second == SJ->getPointerOperand())
|
||||
continue;
|
||||
|
||||
// Look for <I, J>:
|
||||
if (CandidatePairsSet.count(ValuePair(*I, *J))) {
|
||||
VPPair VP(P, ValuePair(*I, *J));
|
||||
if (CandidatePairsSet.count(ValuePair(UI, UJ))) {
|
||||
VPPair VP(P, ValuePair(UI, UJ));
|
||||
ConnectedPairs[VP.first].push_back(VP.second);
|
||||
PairConnectionTypes.insert(VPPairWithType(VP, PairConnectionDirect));
|
||||
}
|
||||
|
||||
// Look for <J, I>:
|
||||
if (CandidatePairsSet.count(ValuePair(*J, *I))) {
|
||||
VPPair VP(P, ValuePair(*J, *I));
|
||||
if (CandidatePairsSet.count(ValuePair(UJ, UI))) {
|
||||
VPPair VP(P, ValuePair(UJ, UI));
|
||||
ConnectedPairs[VP.first].push_back(VP.second);
|
||||
PairConnectionTypes.insert(VPPairWithType(VP, PairConnectionSwap));
|
||||
}
|
||||
|
@ -1357,13 +1358,14 @@ namespace {
|
|||
if (Config.SplatBreaksChain) continue;
|
||||
// Look for cases where just the first value in the pair is used by
|
||||
// both members of another pair (splatting).
|
||||
for (Value::use_iterator J = P.first->use_begin(); J != E; ++J) {
|
||||
if ((SJ = dyn_cast<StoreInst>(*J)) &&
|
||||
for (Value::user_iterator J = P.first->user_begin(); J != E; ++J) {
|
||||
User *UJ = *J;
|
||||
if ((SJ = dyn_cast<StoreInst>(UJ)) &&
|
||||
P.first == SJ->getPointerOperand())
|
||||
continue;
|
||||
|
||||
if (CandidatePairsSet.count(ValuePair(*I, *J))) {
|
||||
VPPair VP(P, ValuePair(*I, *J));
|
||||
if (CandidatePairsSet.count(ValuePair(UI, UJ))) {
|
||||
VPPair VP(P, ValuePair(UI, UJ));
|
||||
ConnectedPairs[VP.first].push_back(VP.second);
|
||||
PairConnectionTypes.insert(VPPairWithType(VP, PairConnectionSplat));
|
||||
}
|
||||
|
@ -1373,21 +1375,24 @@ namespace {
|
|||
if (Config.SplatBreaksChain) return;
|
||||
// Look for cases where just the second value in the pair is used by
|
||||
// both members of another pair (splatting).
|
||||
for (Value::use_iterator I = P.second->use_begin(),
|
||||
E = P.second->use_end(); I != E; ++I) {
|
||||
if (isa<LoadInst>(*I))
|
||||
for (Value::user_iterator I = P.second->user_begin(),
|
||||
E = P.second->user_end();
|
||||
I != E; ++I) {
|
||||
User *UI = *I;
|
||||
if (isa<LoadInst>(UI))
|
||||
continue;
|
||||
else if ((SI = dyn_cast<StoreInst>(*I)) &&
|
||||
else if ((SI = dyn_cast<StoreInst>(UI)) &&
|
||||
P.second == SI->getPointerOperand())
|
||||
continue;
|
||||
|
||||
for (Value::use_iterator J = P.second->use_begin(); J != E; ++J) {
|
||||
if ((SJ = dyn_cast<StoreInst>(*J)) &&
|
||||
for (Value::user_iterator J = P.second->user_begin(); J != E; ++J) {
|
||||
User *UJ = *J;
|
||||
if ((SJ = dyn_cast<StoreInst>(UJ)) &&
|
||||
P.second == SJ->getPointerOperand())
|
||||
continue;
|
||||
|
||||
if (CandidatePairsSet.count(ValuePair(*I, *J))) {
|
||||
VPPair VP(P, ValuePair(*I, *J));
|
||||
if (CandidatePairsSet.count(ValuePair(UI, UJ))) {
|
||||
VPPair VP(P, ValuePair(UI, UJ));
|
||||
ConnectedPairs[VP.first].push_back(VP.second);
|
||||
PairConnectionTypes.insert(VPPairWithType(VP, PairConnectionSplat));
|
||||
}
|
||||
|
@ -1947,16 +1952,15 @@ namespace {
|
|||
Type *VTy = getVecTypeForPair(Ty1, Ty2);
|
||||
|
||||
bool NeedsExtraction = false;
|
||||
for (Value::use_iterator I = S->first->use_begin(),
|
||||
IE = S->first->use_end(); I != IE; ++I) {
|
||||
if (ShuffleVectorInst *SI = dyn_cast<ShuffleVectorInst>(*I)) {
|
||||
for (User *U : S->first->users()) {
|
||||
if (ShuffleVectorInst *SI = dyn_cast<ShuffleVectorInst>(U)) {
|
||||
// Shuffle can be folded if it has no other input
|
||||
if (isa<UndefValue>(SI->getOperand(1)))
|
||||
continue;
|
||||
}
|
||||
if (isa<ExtractElementInst>(*I))
|
||||
if (isa<ExtractElementInst>(U))
|
||||
continue;
|
||||
if (PrunedDAGInstrs.count(*I))
|
||||
if (PrunedDAGInstrs.count(U))
|
||||
continue;
|
||||
NeedsExtraction = true;
|
||||
break;
|
||||
|
@ -1979,16 +1983,15 @@ namespace {
|
|||
}
|
||||
|
||||
NeedsExtraction = false;
|
||||
for (Value::use_iterator I = S->second->use_begin(),
|
||||
IE = S->second->use_end(); I != IE; ++I) {
|
||||
if (ShuffleVectorInst *SI = dyn_cast<ShuffleVectorInst>(*I)) {
|
||||
for (User *U : S->second->users()) {
|
||||
if (ShuffleVectorInst *SI = dyn_cast<ShuffleVectorInst>(U)) {
|
||||
// Shuffle can be folded if it has no other input
|
||||
if (isa<UndefValue>(SI->getOperand(1)))
|
||||
continue;
|
||||
}
|
||||
if (isa<ExtractElementInst>(*I))
|
||||
if (isa<ExtractElementInst>(U))
|
||||
continue;
|
||||
if (PrunedDAGInstrs.count(*I))
|
||||
if (PrunedDAGInstrs.count(U))
|
||||
continue;
|
||||
NeedsExtraction = true;
|
||||
break;
|
||||
|
|
|
@ -3344,12 +3344,11 @@ static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst,
|
|||
// instructions must not have external users.
|
||||
if (!Reductions.count(Inst))
|
||||
//Check that all of the users of the loop are inside the BB.
|
||||
for (Value::use_iterator I = Inst->use_begin(), E = Inst->use_end();
|
||||
I != E; ++I) {
|
||||
Instruction *U = cast<Instruction>(*I);
|
||||
for (User *U : Inst->users()) {
|
||||
Instruction *UI = cast<Instruction>(U);
|
||||
// This user may be a reduction exit value.
|
||||
if (!TheLoop->contains(U)) {
|
||||
DEBUG(dbgs() << "LV: Found an outside user for : " << *U << '\n');
|
||||
if (!TheLoop->contains(UI)) {
|
||||
DEBUG(dbgs() << "LV: Found an outside user for : " << *UI << '\n');
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -3545,9 +3544,8 @@ static Value *stripGetElementPtr(Value *Ptr, ScalarEvolution *SE,
|
|||
///\brief Look for a cast use of the passed value.
|
||||
static Value *getUniqueCastUse(Value *Ptr, Loop *Lp, Type *Ty) {
|
||||
Value *UniqueCast = 0;
|
||||
for (Value::use_iterator UI = Ptr->use_begin(), UE = Ptr->use_end(); UI != UE;
|
||||
++UI) {
|
||||
CastInst *CI = dyn_cast<CastInst>(*UI);
|
||||
for (User *U : Ptr->users()) {
|
||||
CastInst *CI = dyn_cast<CastInst>(U);
|
||||
if (CI && CI->getType() == Ty) {
|
||||
if (!UniqueCast)
|
||||
UniqueCast = CI;
|
||||
|
@ -4714,12 +4712,11 @@ bool LoopVectorizationLegality::AddReductionVar(PHINode *Phi,
|
|||
// nodes once we get to them.
|
||||
SmallVector<Instruction *, 8> NonPHIs;
|
||||
SmallVector<Instruction *, 8> PHIs;
|
||||
for (Value::use_iterator UI = Cur->use_begin(), E = Cur->use_end(); UI != E;
|
||||
++UI) {
|
||||
Instruction *Usr = cast<Instruction>(*UI);
|
||||
for (User *U : Cur->users()) {
|
||||
Instruction *UI = cast<Instruction>(U);
|
||||
|
||||
// Check if we found the exit user.
|
||||
BasicBlock *Parent = Usr->getParent();
|
||||
BasicBlock *Parent = UI->getParent();
|
||||
if (!TheLoop->contains(Parent)) {
|
||||
// Exit if you find multiple outside users or if the header phi node is
|
||||
// being used. In this case the user uses the value of the previous
|
||||
|
@ -4742,20 +4739,20 @@ bool LoopVectorizationLegality::AddReductionVar(PHINode *Phi,
|
|||
// value must only be used once, except by phi nodes and min/max
|
||||
// reductions which are represented as a cmp followed by a select.
|
||||
ReductionInstDesc IgnoredVal(false, 0);
|
||||
if (VisitedInsts.insert(Usr)) {
|
||||
if (isa<PHINode>(Usr))
|
||||
PHIs.push_back(Usr);
|
||||
if (VisitedInsts.insert(UI)) {
|
||||
if (isa<PHINode>(UI))
|
||||
PHIs.push_back(UI);
|
||||
else
|
||||
NonPHIs.push_back(Usr);
|
||||
} else if (!isa<PHINode>(Usr) &&
|
||||
((!isa<FCmpInst>(Usr) &&
|
||||
!isa<ICmpInst>(Usr) &&
|
||||
!isa<SelectInst>(Usr)) ||
|
||||
!isMinMaxSelectCmpPattern(Usr, IgnoredVal).IsReduction))
|
||||
NonPHIs.push_back(UI);
|
||||
} else if (!isa<PHINode>(UI) &&
|
||||
((!isa<FCmpInst>(UI) &&
|
||||
!isa<ICmpInst>(UI) &&
|
||||
!isa<SelectInst>(UI)) ||
|
||||
!isMinMaxSelectCmpPattern(UI, IgnoredVal).IsReduction))
|
||||
return false;
|
||||
|
||||
// Remember that we completed the cycle.
|
||||
if (Usr == Phi)
|
||||
if (UI == Phi)
|
||||
FoundStartPHI = true;
|
||||
}
|
||||
Worklist.append(PHIs.begin(), PHIs.end());
|
||||
|
@ -4801,7 +4798,7 @@ LoopVectorizationLegality::isMinMaxSelectCmpPattern(Instruction *I,
|
|||
// We must handle the select(cmp()) as a single instruction. Advance to the
|
||||
// select.
|
||||
if ((Cmp = dyn_cast<ICmpInst>(I)) || (Cmp = dyn_cast<FCmpInst>(I))) {
|
||||
if (!Cmp->hasOneUse() || !(Select = dyn_cast<SelectInst>(*I->use_begin())))
|
||||
if (!Cmp->hasOneUse() || !(Select = dyn_cast<SelectInst>(*I->user_begin())))
|
||||
return ReductionInstDesc(false, I);
|
||||
return ReductionInstDesc(Select, Prev.MinMaxKind);
|
||||
}
|
||||
|
|
|
@ -561,19 +561,18 @@ void BoUpSLP::buildTree(ArrayRef<Value *> Roots, ValueSet *Rdx) {
|
|||
if (Entry->NeedToGather)
|
||||
continue;
|
||||
|
||||
for (Value::use_iterator User = Scalar->use_begin(),
|
||||
UE = Scalar->use_end(); User != UE; ++User) {
|
||||
DEBUG(dbgs() << "SLP: Checking user:" << **User << ".\n");
|
||||
for (User *U : Scalar->users()) {
|
||||
DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n");
|
||||
|
||||
// Skip in-tree scalars that become vectors.
|
||||
if (ScalarToTreeEntry.count(*User)) {
|
||||
if (ScalarToTreeEntry.count(U)) {
|
||||
DEBUG(dbgs() << "SLP: \tInternal user will be removed:" <<
|
||||
**User << ".\n");
|
||||
int Idx = ScalarToTreeEntry[*User]; (void) Idx;
|
||||
*U << ".\n");
|
||||
int Idx = ScalarToTreeEntry[U]; (void) Idx;
|
||||
assert(!VectorizableTree[Idx].NeedToGather && "Bad state");
|
||||
continue;
|
||||
}
|
||||
Instruction *UserInst = dyn_cast<Instruction>(*User);
|
||||
Instruction *UserInst = dyn_cast<Instruction>(U);
|
||||
if (!UserInst)
|
||||
continue;
|
||||
|
||||
|
@ -581,9 +580,9 @@ void BoUpSLP::buildTree(ArrayRef<Value *> Roots, ValueSet *Rdx) {
|
|||
if (Rdx && std::find(Rdx->begin(), Rdx->end(), UserInst) != Rdx->end())
|
||||
continue;
|
||||
|
||||
DEBUG(dbgs() << "SLP: Need to extract:" << **User << " from lane " <<
|
||||
DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " <<
|
||||
Lane << " from " << *Scalar << ".\n");
|
||||
ExternalUses.push_back(ExternalUser(Scalar, *User, Lane));
|
||||
ExternalUses.push_back(ExternalUser(Scalar, U, Lane));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -670,57 +669,56 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
|
|||
for (unsigned i = 0, e = VL.size(); i != e; ++i) {
|
||||
Instruction *Scalar = cast<Instruction>(VL[i]);
|
||||
DEBUG(dbgs() << "SLP: Checking users of " << *Scalar << ". \n");
|
||||
for (Value::use_iterator U = Scalar->use_begin(), UE = Scalar->use_end();
|
||||
U != UE; ++U) {
|
||||
DEBUG(dbgs() << "SLP: \tUser " << **U << ". \n");
|
||||
Instruction *User = dyn_cast<Instruction>(*U);
|
||||
if (!User) {
|
||||
for (User *U : Scalar->users()) {
|
||||
DEBUG(dbgs() << "SLP: \tUser " << *U << ". \n");
|
||||
Instruction *UI = dyn_cast<Instruction>(U);
|
||||
if (!UI) {
|
||||
DEBUG(dbgs() << "SLP: Gathering due unknown user. \n");
|
||||
newTreeEntry(VL, false);
|
||||
return;
|
||||
}
|
||||
|
||||
// We don't care if the user is in a different basic block.
|
||||
BasicBlock *UserBlock = User->getParent();
|
||||
BasicBlock *UserBlock = UI->getParent();
|
||||
if (UserBlock != BB) {
|
||||
DEBUG(dbgs() << "SLP: User from a different basic block "
|
||||
<< *User << ". \n");
|
||||
<< *UI << ". \n");
|
||||
continue;
|
||||
}
|
||||
|
||||
// If this is a PHINode within this basic block then we can place the
|
||||
// extract wherever we want.
|
||||
if (isa<PHINode>(*User)) {
|
||||
DEBUG(dbgs() << "SLP: \tWe can schedule PHIs:" << *User << ". \n");
|
||||
if (isa<PHINode>(*UI)) {
|
||||
DEBUG(dbgs() << "SLP: \tWe can schedule PHIs:" << *UI << ". \n");
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if this is a safe in-tree user.
|
||||
if (ScalarToTreeEntry.count(User)) {
|
||||
int Idx = ScalarToTreeEntry[User];
|
||||
if (ScalarToTreeEntry.count(UI)) {
|
||||
int Idx = ScalarToTreeEntry[UI];
|
||||
int VecLocation = VectorizableTree[Idx].LastScalarIndex;
|
||||
if (VecLocation <= MyLastIndex) {
|
||||
DEBUG(dbgs() << "SLP: Gathering due to unschedulable vector. \n");
|
||||
newTreeEntry(VL, false);
|
||||
return;
|
||||
}
|
||||
DEBUG(dbgs() << "SLP: In-tree user (" << *User << ") at #" <<
|
||||
DEBUG(dbgs() << "SLP: In-tree user (" << *UI << ") at #" <<
|
||||
VecLocation << " vector value (" << *Scalar << ") at #"
|
||||
<< MyLastIndex << ".\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
// This user is part of the reduction.
|
||||
if (RdxOps && RdxOps->count(User))
|
||||
if (RdxOps && RdxOps->count(UI))
|
||||
continue;
|
||||
|
||||
// Make sure that we can schedule this unknown user.
|
||||
BlockNumbering &BN = BlocksNumbers[BB];
|
||||
int UserIndex = BN.getIndex(User);
|
||||
int UserIndex = BN.getIndex(UI);
|
||||
if (UserIndex < MyLastIndex) {
|
||||
|
||||
DEBUG(dbgs() << "SLP: Can't schedule extractelement for "
|
||||
<< *User << ". \n");
|
||||
<< *UI << ". \n");
|
||||
newTreeEntry(VL, false);
|
||||
return;
|
||||
}
|
||||
|
@ -739,11 +737,10 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
|
|||
// Check that instructions in this bundle don't reference other instructions.
|
||||
// The runtime of this check is O(N * N-1 * uses(N)) and a typical N is 4.
|
||||
for (unsigned i = 0, e = VL.size(); i < e; ++i) {
|
||||
for (Value::use_iterator U = VL[i]->use_begin(), UE = VL[i]->use_end();
|
||||
U != UE; ++U) {
|
||||
for (User *U : VL[i]->users()) {
|
||||
for (unsigned j = 0; j < e; ++j) {
|
||||
if (i != j && *U == VL[j]) {
|
||||
DEBUG(dbgs() << "SLP: Intra-bundle dependencies!" << **U << ". \n");
|
||||
if (i != j && U == VL[j]) {
|
||||
DEBUG(dbgs() << "SLP: Intra-bundle dependencies!" << *U << ". \n");
|
||||
newTreeEntry(VL, false);
|
||||
return;
|
||||
}
|
||||
|
@ -1595,8 +1592,8 @@ Value *BoUpSLP::vectorizeTree() {
|
|||
|
||||
// Skip users that we already RAUW. This happens when one instruction
|
||||
// has multiple uses of the same value.
|
||||
if (std::find(Scalar->use_begin(), Scalar->use_end(), User) ==
|
||||
Scalar->use_end())
|
||||
if (std::find(Scalar->user_begin(), Scalar->user_end(), User) ==
|
||||
Scalar->user_end())
|
||||
continue;
|
||||
assert(ScalarToTreeEntry.count(Scalar) && "Invalid scalar");
|
||||
|
||||
|
@ -1657,13 +1654,12 @@ Value *BoUpSLP::vectorizeTree() {
|
|||
|
||||
Type *Ty = Scalar->getType();
|
||||
if (!Ty->isVoidTy()) {
|
||||
for (Value::use_iterator User = Scalar->use_begin(),
|
||||
UE = Scalar->use_end(); User != UE; ++User) {
|
||||
DEBUG(dbgs() << "SLP: \tvalidating user:" << **User << ".\n");
|
||||
for (User *U : Scalar->users()) {
|
||||
DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n");
|
||||
|
||||
assert((ScalarToTreeEntry.count(*User) ||
|
||||
assert((ScalarToTreeEntry.count(U) ||
|
||||
// It is legal to replace the reduction users by undef.
|
||||
(RdxOps && RdxOps->count(*User))) &&
|
||||
(RdxOps && RdxOps->count(U))) &&
|
||||
"Replacing out-of-tree value with undef");
|
||||
}
|
||||
Value *Undef = UndefValue::get(Ty);
|
||||
|
@ -2466,7 +2462,7 @@ static bool findBuildVector(InsertElementInst *IE,
|
|||
if (IE->use_empty())
|
||||
return false;
|
||||
|
||||
InsertElementInst *NextUse = dyn_cast<InsertElementInst>(IE->use_back());
|
||||
InsertElementInst *NextUse = dyn_cast<InsertElementInst>(IE->user_back());
|
||||
if (!NextUse)
|
||||
return true;
|
||||
|
||||
|
|
|
@ -37,12 +37,11 @@ namespace {
|
|||
if (!I->isDeclaration()) continue;
|
||||
|
||||
bool PrintedFn = false;
|
||||
for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
|
||||
UI != E; ++UI) {
|
||||
Instruction *User = dyn_cast<Instruction>(*UI);
|
||||
if (!User) continue;
|
||||
for (User *U : I->users()) {
|
||||
Instruction *UI = dyn_cast<Instruction>(U);
|
||||
if (!UI) continue;
|
||||
|
||||
CallSite CS(cast<Value>(User));
|
||||
CallSite CS(cast<Value>(UI));
|
||||
if (!CS) continue;
|
||||
|
||||
for (CallSite::arg_iterator AI = CS.arg_begin(),
|
||||
|
@ -53,7 +52,7 @@ namespace {
|
|||
errs() << "Function '" << I->getName() << "':\n";
|
||||
PrintedFn = true;
|
||||
}
|
||||
errs() << *User;
|
||||
errs() << *UI;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue