forked from OSchip/llvm-project
Repace SmallPtrSet with SmallPtrSetImpl in function arguments to avoid needing to mention the size.
llvm-svn: 215868
This commit is contained in:
parent
3a1623f5e4
commit
5229cfd163
|
@ -174,7 +174,7 @@ public:
|
|||
/// dump - This method is used for debugging.
|
||||
void dump() const;
|
||||
protected:
|
||||
bool AddUsersImpl(Instruction *I, SmallPtrSet<Loop*,16> &SimpleLoopNests);
|
||||
bool AddUsersImpl(Instruction *I, SmallPtrSetImpl<Loop*> &SimpleLoopNests);
|
||||
};
|
||||
|
||||
Pass *createIVUsersPass();
|
||||
|
|
|
@ -163,7 +163,7 @@ public:
|
|||
/// which have machine instructions that belong to lexical scope identified by
|
||||
/// DebugLoc.
|
||||
void getMachineBasicBlocks(DebugLoc DL,
|
||||
SmallPtrSet<const MachineBasicBlock *, 4> &MBBs);
|
||||
SmallPtrSetImpl<const MachineBasicBlock *> &MBBs);
|
||||
|
||||
/// dominates - Return true if DebugLoc's lexical scope dominates at least one
|
||||
/// machine instruction's lexical scope in a given machine basic block.
|
||||
|
|
|
@ -594,7 +594,7 @@ public:
|
|||
/// changes.
|
||||
/// NOTE: This is still very expensive. Use carefully.
|
||||
bool hasPredecessorHelper(const SDNode *N,
|
||||
SmallPtrSet<const SDNode *, 32> &Visited,
|
||||
SmallPtrSetImpl<const SDNode *> &Visited,
|
||||
SmallVectorImpl<const SDNode *> &Worklist) const;
|
||||
|
||||
/// getNumOperands - Return the number of values used by this operation.
|
||||
|
|
|
@ -249,7 +249,7 @@ public:
|
|||
bool isOpaque() const { return (getSubclassData() & SCDB_HasBody) == 0; }
|
||||
|
||||
/// isSized - Return true if this is a sized type.
|
||||
bool isSized(SmallPtrSet<const Type*, 4> *Visited = nullptr) const;
|
||||
bool isSized(SmallPtrSetImpl<const Type*> *Visited = nullptr) const;
|
||||
|
||||
/// hasName - Return true if this is a named struct that has a non-empty name.
|
||||
bool hasName() const { return SymbolTableEntry != nullptr; }
|
||||
|
|
|
@ -265,7 +265,7 @@ public:
|
|||
/// get the actual size for a particular target, it is reasonable to use the
|
||||
/// DataLayout subsystem to do this.
|
||||
///
|
||||
bool isSized(SmallPtrSet<const Type*, 4> *Visited = nullptr) const {
|
||||
bool isSized(SmallPtrSetImpl<const Type*> *Visited = nullptr) const {
|
||||
// If it's a primitive, it is always sized.
|
||||
if (getTypeID() == IntegerTyID || isFloatingPointTy() ||
|
||||
getTypeID() == PointerTyID ||
|
||||
|
@ -419,7 +419,7 @@ private:
|
|||
/// isSizedDerivedType - Derived types like structures and arrays are sized
|
||||
/// iff all of the members of the type are sized as well. Since asking for
|
||||
/// their size is relatively uncommon, move this operation out of line.
|
||||
bool isSizedDerivedType(SmallPtrSet<const Type*, 4> *Visited = nullptr) const;
|
||||
bool isSizedDerivedType(SmallPtrSetImpl<const Type*> *Visited = nullptr) const;
|
||||
};
|
||||
|
||||
// Printing of types.
|
||||
|
|
|
@ -124,7 +124,7 @@ private:
|
|||
void applyScopeRestrictions();
|
||||
void applyRestriction(GlobalValue &GV, const ArrayRef<StringRef> &Libcalls,
|
||||
std::vector<const char *> &MustPreserveList,
|
||||
SmallPtrSet<GlobalValue *, 8> &AsmUsed,
|
||||
SmallPtrSetImpl<GlobalValue *> &AsmUsed,
|
||||
Mangler &Mangler);
|
||||
bool determineTarget(std::string &errMsg);
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ class Module;
|
|||
class Function;
|
||||
class GlobalValue;
|
||||
class GlobalVariable;
|
||||
template <class PtrType, unsigned SmallSize> class SmallPtrSet;
|
||||
template <class PtrType> class SmallPtrSetImpl;
|
||||
|
||||
/// Append F to the list of global ctors of module M with the given Priority.
|
||||
/// This wraps the function in the appropriate structure and stores it along
|
||||
|
@ -34,7 +34,7 @@ void appendToGlobalDtors(Module &M, Function *F, int Priority);
|
|||
/// \brief Given "llvm.used" or "llvm.compiler.used" as a global name, collect
|
||||
/// the initializer elements of that global in Set and return the global itself.
|
||||
GlobalVariable *collectUsedGlobalVariables(Module &M,
|
||||
SmallPtrSet<GlobalValue *, 8> &Set,
|
||||
SmallPtrSetImpl<GlobalValue *> &Set,
|
||||
bool CompilerUsed);
|
||||
} // End llvm namespace
|
||||
|
||||
|
|
|
@ -967,7 +967,7 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I,
|
|||
static Constant *
|
||||
ConstantFoldConstantExpressionImpl(const ConstantExpr *CE, const DataLayout *TD,
|
||||
const TargetLibraryInfo *TLI,
|
||||
SmallPtrSet<ConstantExpr *, 4> &FoldedOps) {
|
||||
SmallPtrSetImpl<ConstantExpr *> &FoldedOps) {
|
||||
SmallVector<Constant *, 8> Ops;
|
||||
for (User::const_op_iterator i = CE->op_begin(), e = CE->op_end(); i != e;
|
||||
++i) {
|
||||
|
|
|
@ -84,7 +84,7 @@ static bool isInteresting(const SCEV *S, const Instruction *I, const Loop *L,
|
|||
/// form.
|
||||
static bool isSimplifiedLoopNest(BasicBlock *BB, const DominatorTree *DT,
|
||||
const LoopInfo *LI,
|
||||
SmallPtrSet<Loop*,16> &SimpleLoopNests) {
|
||||
SmallPtrSetImpl<Loop*> &SimpleLoopNests) {
|
||||
Loop *NearestLoop = nullptr;
|
||||
for (DomTreeNode *Rung = DT->getNode(BB);
|
||||
Rung; Rung = Rung->getIDom()) {
|
||||
|
@ -112,7 +112,7 @@ static bool isSimplifiedLoopNest(BasicBlock *BB, const DominatorTree *DT,
|
|||
/// reducible SCEV, recursively add its users to the IVUsesByStride set and
|
||||
/// return true. Otherwise, return false.
|
||||
bool IVUsers::AddUsersImpl(Instruction *I,
|
||||
SmallPtrSet<Loop*,16> &SimpleLoopNests) {
|
||||
SmallPtrSetImpl<Loop*> &SimpleLoopNests) {
|
||||
// Add this IV user to the Processed set before returning false to ensure that
|
||||
// all IV users are members of the set. See IVUsers::isIVUserOrOperand.
|
||||
if (!Processed.insert(I))
|
||||
|
|
|
@ -96,7 +96,7 @@ namespace {
|
|||
|
||||
Value *findValue(Value *V, bool OffsetOk) const;
|
||||
Value *findValueImpl(Value *V, bool OffsetOk,
|
||||
SmallPtrSet<Value *, 4> &Visited) const;
|
||||
SmallPtrSetImpl<Value *> &Visited) const;
|
||||
|
||||
public:
|
||||
Module *Mod;
|
||||
|
@ -622,7 +622,7 @@ Value *Lint::findValue(Value *V, bool OffsetOk) const {
|
|||
|
||||
/// findValueImpl - Implementation helper for findValue.
|
||||
Value *Lint::findValueImpl(Value *V, bool OffsetOk,
|
||||
SmallPtrSet<Value *, 4> &Visited) const {
|
||||
SmallPtrSetImpl<Value *> &Visited) const {
|
||||
// Detect self-referential values.
|
||||
if (!Visited.insert(V))
|
||||
return UndefValue::get(V->getType());
|
||||
|
|
|
@ -1833,7 +1833,7 @@ bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
|
|||
|
||||
/// GetStringLengthH - If we can compute the length of the string pointed to by
|
||||
/// the specified pointer, return 'len+1'. If we can't, return 0.
|
||||
static uint64_t GetStringLengthH(Value *V, SmallPtrSet<PHINode*, 32> &PHIs) {
|
||||
static uint64_t GetStringLengthH(Value *V, SmallPtrSetImpl<PHINode*> &PHIs) {
|
||||
// Look through noop bitcast instructions.
|
||||
V = V->stripPointerCasts();
|
||||
|
||||
|
|
|
@ -1149,7 +1149,7 @@ bool DwarfDebug::addCurrentFnArgument(DbgVariable *Var, LexicalScope *Scope) {
|
|||
|
||||
// Collect variable information from side table maintained by MMI.
|
||||
void DwarfDebug::collectVariableInfoFromMMITable(
|
||||
SmallPtrSet<const MDNode *, 16> &Processed) {
|
||||
SmallPtrSetImpl<const MDNode *> &Processed) {
|
||||
for (const auto &VI : MMI->getVariableDbgInfo()) {
|
||||
if (!VI.Var)
|
||||
continue;
|
||||
|
@ -1308,7 +1308,7 @@ DwarfDebug::buildLocationList(SmallVectorImpl<DebugLocEntry> &DebugLoc,
|
|||
|
||||
// Find variables for each lexical scope.
|
||||
void
|
||||
DwarfDebug::collectVariableInfo(SmallPtrSet<const MDNode *, 16> &Processed) {
|
||||
DwarfDebug::collectVariableInfo(SmallPtrSetImpl<const MDNode *> &Processed) {
|
||||
LexicalScope *FnScope = LScopes.getCurrentFunctionScope();
|
||||
DwarfCompileUnit *TheCU = SPMap.lookup(FnScope->getScopeNode());
|
||||
|
||||
|
|
|
@ -533,7 +533,7 @@ class DwarfDebug : public AsmPrinterHandler {
|
|||
bool addCurrentFnArgument(DbgVariable *Var, LexicalScope *Scope);
|
||||
|
||||
/// \brief Populate LexicalScope entries with variables' info.
|
||||
void collectVariableInfo(SmallPtrSet<const MDNode *, 16> &ProcessedVars);
|
||||
void collectVariableInfo(SmallPtrSetImpl<const MDNode *> &ProcessedVars);
|
||||
|
||||
/// \brief Build the location list for all DBG_VALUEs in the
|
||||
/// function that describe the same variable.
|
||||
|
@ -542,7 +542,7 @@ class DwarfDebug : public AsmPrinterHandler {
|
|||
|
||||
/// \brief Collect variable information from the side table maintained
|
||||
/// by MMI.
|
||||
void collectVariableInfoFromMMITable(SmallPtrSet<const MDNode *, 16> &P);
|
||||
void collectVariableInfoFromMMITable(SmallPtrSetImpl<const MDNode *> &P);
|
||||
|
||||
/// \brief Ensure that a label will be emitted before MI.
|
||||
void requestLabelBeforeInsn(const MachineInstr *MI) {
|
||||
|
|
|
@ -2297,7 +2297,7 @@ static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
|
|||
/// Add the ultimately found memory instructions to MemoryUses.
|
||||
static bool FindAllMemoryUses(Instruction *I,
|
||||
SmallVectorImpl<std::pair<Instruction*,unsigned> > &MemoryUses,
|
||||
SmallPtrSet<Instruction*, 16> &ConsideredInsts,
|
||||
SmallPtrSetImpl<Instruction*> &ConsideredInsts,
|
||||
const TargetLowering &TLI) {
|
||||
// If we already considered this instruction, we're done.
|
||||
if (!ConsideredInsts.insert(I))
|
||||
|
|
|
@ -285,7 +285,7 @@ void LexicalScopes::assignInstructionRanges(
|
|||
/// have machine instructions that belong to lexical scope identified by
|
||||
/// DebugLoc.
|
||||
void LexicalScopes::getMachineBasicBlocks(
|
||||
DebugLoc DL, SmallPtrSet<const MachineBasicBlock *, 4> &MBBs) {
|
||||
DebugLoc DL, SmallPtrSetImpl<const MachineBasicBlock *> &MBBs) {
|
||||
MBBs.clear();
|
||||
LexicalScope *Scope = getOrCreateLexicalScope(DL);
|
||||
if (!Scope)
|
||||
|
|
|
@ -1439,7 +1439,7 @@ public:
|
|||
/// Add erased instructions to ErasedInstrs.
|
||||
/// Add foreign virtual registers to ShrinkRegs if their live range ended at
|
||||
/// the erased instrs.
|
||||
void eraseInstrs(SmallPtrSet<MachineInstr*, 8> &ErasedInstrs,
|
||||
void eraseInstrs(SmallPtrSetImpl<MachineInstr*> &ErasedInstrs,
|
||||
SmallVectorImpl<unsigned> &ShrinkRegs);
|
||||
|
||||
/// Get the value assignments suitable for passing to LiveInterval::join.
|
||||
|
@ -1952,7 +1952,7 @@ void JoinVals::pruneValues(JoinVals &Other,
|
|||
}
|
||||
}
|
||||
|
||||
void JoinVals::eraseInstrs(SmallPtrSet<MachineInstr*, 8> &ErasedInstrs,
|
||||
void JoinVals::eraseInstrs(SmallPtrSetImpl<MachineInstr*> &ErasedInstrs,
|
||||
SmallVectorImpl<unsigned> &ShrinkRegs) {
|
||||
for (unsigned i = 0, e = LI.getNumValNums(); i != e; ++i) {
|
||||
// Get the def location before markUnused() below invalidates it.
|
||||
|
|
|
@ -575,7 +575,7 @@ static bool MIsNeedChainEdge(AliasAnalysis *AA, const MachineFrameInfo *MFI,
|
|||
static unsigned
|
||||
iterateChainSucc(AliasAnalysis *AA, const MachineFrameInfo *MFI,
|
||||
SUnit *SUa, SUnit *SUb, SUnit *ExitSU, unsigned *Depth,
|
||||
SmallPtrSet<const SUnit*, 16> &Visited) {
|
||||
SmallPtrSetImpl<const SUnit*> &Visited) {
|
||||
if (!SUa || !SUb || SUb == ExitSU)
|
||||
return *Depth;
|
||||
|
||||
|
|
|
@ -6398,7 +6398,7 @@ bool SDNode::hasPredecessor(const SDNode *N) const {
|
|||
|
||||
bool
|
||||
SDNode::hasPredecessorHelper(const SDNode *N,
|
||||
SmallPtrSet<const SDNode *, 32> &Visited,
|
||||
SmallPtrSetImpl<const SDNode *> &Visited,
|
||||
SmallVectorImpl<const SDNode *> &Worklist) const {
|
||||
if (Visited.empty()) {
|
||||
Worklist.push_back(this);
|
||||
|
@ -6776,8 +6776,8 @@ bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
|
|||
|
||||
#ifndef NDEBUG
|
||||
static void checkForCyclesHelper(const SDNode *N,
|
||||
SmallPtrSet<const SDNode*, 32> &Visited,
|
||||
SmallPtrSet<const SDNode*, 32> &Checked,
|
||||
SmallPtrSetImpl<const SDNode*> &Visited,
|
||||
SmallPtrSetImpl<const SDNode*> &Checked,
|
||||
const llvm::SelectionDAG *DAG) {
|
||||
// If this node has already been checked, don't check it again.
|
||||
if (Checked.count(N))
|
||||
|
|
|
@ -1731,7 +1731,7 @@ static SDNode *findGlueUse(SDNode *N) {
|
|||
/// This function recursively traverses up the operand chain, ignoring
|
||||
/// certain nodes.
|
||||
static bool findNonImmUse(SDNode *Use, SDNode* Def, SDNode *ImmedUse,
|
||||
SDNode *Root, SmallPtrSet<SDNode*, 16> &Visited,
|
||||
SDNode *Root, SmallPtrSetImpl<SDNode*> &Visited,
|
||||
bool IgnoreChains) {
|
||||
// The NodeID's are given uniques ID's where a node ID is guaranteed to be
|
||||
// greater than all of its (recursive) operands. If we scan to a point where
|
||||
|
|
|
@ -139,7 +139,7 @@ void SjLjEHPrepare::insertCallSiteStore(Instruction *I, int Number) {
|
|||
/// MarkBlocksLiveIn - Insert BB and all of its predescessors into LiveBBs until
|
||||
/// we reach blocks we've already seen.
|
||||
static void MarkBlocksLiveIn(BasicBlock *BB,
|
||||
SmallPtrSet<BasicBlock *, 64> &LiveBBs) {
|
||||
SmallPtrSetImpl<BasicBlock *> &LiveBBs) {
|
||||
if (!LiveBBs.insert(BB))
|
||||
return; // already been here.
|
||||
|
||||
|
|
|
@ -283,7 +283,7 @@ void Constant::destroyConstantImpl() {
|
|||
}
|
||||
|
||||
static bool canTrapImpl(const Constant *C,
|
||||
SmallPtrSet<const ConstantExpr *, 4> &NonTrappingOps) {
|
||||
SmallPtrSetImpl<const ConstantExpr *> &NonTrappingOps) {
|
||||
assert(C->getType()->isFirstClassType() && "Cannot evaluate aggregate vals!");
|
||||
// The only thing that could possibly trap are constant exprs.
|
||||
const ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
|
||||
|
|
|
@ -155,7 +155,7 @@ int Type::getFPMantissaWidth() const {
|
|||
/// isSizedDerivedType - Derived types like structures and arrays are sized
|
||||
/// iff all of the members of the type are sized as well. Since asking for
|
||||
/// their size is relatively uncommon, move this operation out of line.
|
||||
bool Type::isSizedDerivedType(SmallPtrSet<const Type*, 4> *Visited) const {
|
||||
bool Type::isSizedDerivedType(SmallPtrSetImpl<const Type*> *Visited) const {
|
||||
if (const ArrayType *ATy = dyn_cast<ArrayType>(this))
|
||||
return ATy->getElementType()->isSized(Visited);
|
||||
|
||||
|
@ -554,7 +554,7 @@ StructType *StructType::create(StringRef Name, Type *type, ...) {
|
|||
return Ret;
|
||||
}
|
||||
|
||||
bool StructType::isSized(SmallPtrSet<const Type*, 4> *Visited) const {
|
||||
bool StructType::isSized(SmallPtrSetImpl<const Type*> *Visited) const {
|
||||
if ((getSubclassData() & SCDB_IsSized) != 0)
|
||||
return true;
|
||||
if (isOpaque())
|
||||
|
|
|
@ -303,7 +303,7 @@ void Value::takeName(Value *V) {
|
|||
}
|
||||
|
||||
#ifndef NDEBUG
|
||||
static bool contains(SmallPtrSet<ConstantExpr *, 4> &Cache, ConstantExpr *Expr,
|
||||
static bool contains(SmallPtrSetImpl<ConstantExpr *> &Cache, ConstantExpr *Expr,
|
||||
Constant *C) {
|
||||
if (!Cache.insert(Expr))
|
||||
return false;
|
||||
|
@ -476,7 +476,7 @@ Value *Value::stripInBoundsOffsets() {
|
|||
/// isDereferenceablePointer - Test if this value is always a pointer to
|
||||
/// allocated and suitably aligned memory for a simple load or store.
|
||||
static bool isDereferenceablePointer(const Value *V, const DataLayout *DL,
|
||||
SmallPtrSet<const Value *, 32> &Visited) {
|
||||
SmallPtrSetImpl<const Value *> &Visited) {
|
||||
// Note that it is not safe to speculate into a malloc'd region because
|
||||
// malloc may return null.
|
||||
|
||||
|
|
|
@ -257,7 +257,7 @@ private:
|
|||
void visitGlobalVariable(const GlobalVariable &GV);
|
||||
void visitGlobalAlias(const GlobalAlias &GA);
|
||||
void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
|
||||
void visitAliaseeSubExpr(SmallPtrSet<const GlobalAlias *, 4> &Visited,
|
||||
void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
|
||||
const GlobalAlias &A, const Constant &C);
|
||||
void visitNamedMDNode(const NamedMDNode &NMD);
|
||||
void visitMDNode(MDNode &MD, Function *F);
|
||||
|
@ -502,7 +502,7 @@ void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
|
|||
visitAliaseeSubExpr(Visited, GA, C);
|
||||
}
|
||||
|
||||
void Verifier::visitAliaseeSubExpr(SmallPtrSet<const GlobalAlias *, 4> &Visited,
|
||||
void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
|
||||
const GlobalAlias &GA, const Constant &C) {
|
||||
if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
|
||||
Assert1(!GV->isDeclaration(), "Alias must point to a definition", &GA);
|
||||
|
|
|
@ -314,7 +314,7 @@ void LTOCodeGenerator::
|
|||
applyRestriction(GlobalValue &GV,
|
||||
const ArrayRef<StringRef> &Libcalls,
|
||||
std::vector<const char*> &MustPreserveList,
|
||||
SmallPtrSet<GlobalValue*, 8> &AsmUsed,
|
||||
SmallPtrSetImpl<GlobalValue*> &AsmUsed,
|
||||
Mangler &Mangler) {
|
||||
// There are no restrictions to apply to declarations.
|
||||
if (GV.isDeclaration())
|
||||
|
@ -343,7 +343,7 @@ applyRestriction(GlobalValue &GV,
|
|||
}
|
||||
|
||||
static void findUsedValues(GlobalVariable *LLVMUsed,
|
||||
SmallPtrSet<GlobalValue*, 8> &UsedValues) {
|
||||
SmallPtrSetImpl<GlobalValue*> &UsedValues) {
|
||||
if (!LLVMUsed) return;
|
||||
|
||||
ConstantArray *Inits = cast<ConstantArray>(LLVMUsed->getInitializer());
|
||||
|
|
|
@ -1714,7 +1714,7 @@ bool ARMPreAllocLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
|
|||
static bool IsSafeAndProfitableToMove(bool isLd, unsigned Base,
|
||||
MachineBasicBlock::iterator I,
|
||||
MachineBasicBlock::iterator E,
|
||||
SmallPtrSet<MachineInstr*, 4> &MemOps,
|
||||
SmallPtrSetImpl<MachineInstr*> &MemOps,
|
||||
SmallSet<unsigned, 4> &MemRegs,
|
||||
const TargetRegisterInfo *TRI) {
|
||||
// Are there stores / loads / calls between them?
|
||||
|
|
|
@ -81,8 +81,8 @@ namespace {
|
|||
CallGraphNode *PromoteArguments(CallGraphNode *CGN);
|
||||
bool isSafeToPromoteArgument(Argument *Arg, bool isByVal) const;
|
||||
CallGraphNode *DoPromotion(Function *F,
|
||||
SmallPtrSet<Argument*, 8> &ArgsToPromote,
|
||||
SmallPtrSet<Argument*, 8> &ByValArgsToTransform);
|
||||
SmallPtrSetImpl<Argument*> &ArgsToPromote,
|
||||
SmallPtrSetImpl<Argument*> &ByValArgsToTransform);
|
||||
|
||||
using llvm::Pass::doInitialization;
|
||||
bool doInitialization(CallGraph &CG) override;
|
||||
|
@ -495,8 +495,8 @@ bool ArgPromotion::isSafeToPromoteArgument(Argument *Arg,
|
|||
/// arguments, and returns the new function. At this point, we know that it's
|
||||
/// safe to do so.
|
||||
CallGraphNode *ArgPromotion::DoPromotion(Function *F,
|
||||
SmallPtrSet<Argument*, 8> &ArgsToPromote,
|
||||
SmallPtrSet<Argument*, 8> &ByValArgsToTransform) {
|
||||
SmallPtrSetImpl<Argument*> &ArgsToPromote,
|
||||
SmallPtrSetImpl<Argument*> &ByValArgsToTransform) {
|
||||
|
||||
// Start by computing a new prototype for the function, which is the same as
|
||||
// the old function, but has modified arguments.
|
||||
|
|
|
@ -66,7 +66,7 @@ ModulePass *llvm::createConstantMergePass() { return new ConstantMerge(); }
|
|||
|
||||
/// Find values that are marked as llvm.used.
|
||||
static void FindUsedValues(GlobalVariable *LLVMUsed,
|
||||
SmallPtrSet<const GlobalValue*, 8> &UsedValues) {
|
||||
SmallPtrSetImpl<const GlobalValue*> &UsedValues) {
|
||||
if (!LLVMUsed) return;
|
||||
ConstantArray *Inits = cast<ConstantArray>(LLVMUsed->getInitializer());
|
||||
|
||||
|
|
|
@ -612,7 +612,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) {
|
|||
/// value will trap if the value is dynamically null. PHIs keeps track of any
|
||||
/// phi nodes we've seen to avoid reprocessing them.
|
||||
static bool AllUsesOfValueWillTrapIfNull(const Value *V,
|
||||
SmallPtrSet<const PHINode*, 8> &PHIs) {
|
||||
SmallPtrSetImpl<const PHINode*> &PHIs) {
|
||||
for (const User *U : V->users())
|
||||
if (isa<LoadInst>(U)) {
|
||||
// Will trap.
|
||||
|
@ -957,7 +957,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
|
|||
/// it is to the specified global.
|
||||
static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V,
|
||||
const GlobalVariable *GV,
|
||||
SmallPtrSet<const PHINode*, 8> &PHIs) {
|
||||
SmallPtrSetImpl<const PHINode*> &PHIs) {
|
||||
for (const User *U : V->users()) {
|
||||
const Instruction *Inst = cast<Instruction>(U);
|
||||
|
||||
|
@ -1047,8 +1047,8 @@ static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc,
|
|||
/// of a load) are simple enough to perform heap SRA on. This permits GEP's
|
||||
/// that index through the array and struct field, icmps of null, and PHIs.
|
||||
static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V,
|
||||
SmallPtrSet<const PHINode*, 32> &LoadUsingPHIs,
|
||||
SmallPtrSet<const PHINode*, 32> &LoadUsingPHIsPerLoad) {
|
||||
SmallPtrSetImpl<const PHINode*> &LoadUsingPHIs,
|
||||
SmallPtrSetImpl<const PHINode*> &LoadUsingPHIsPerLoad) {
|
||||
// We permit two users of the load: setcc comparing against the null
|
||||
// pointer, and a getelementptr of a specific form.
|
||||
for (const User *U : V->users()) {
|
||||
|
@ -1975,7 +1975,7 @@ bool GlobalOpt::OptimizeGlobalVars(Module &M) {
|
|||
|
||||
static inline bool
|
||||
isSimpleEnoughValueToCommit(Constant *C,
|
||||
SmallPtrSet<Constant*, 8> &SimpleConstants,
|
||||
SmallPtrSetImpl<Constant*> &SimpleConstants,
|
||||
const DataLayout *DL);
|
||||
|
||||
|
||||
|
@ -1988,7 +1988,7 @@ isSimpleEnoughValueToCommit(Constant *C,
|
|||
/// in SimpleConstants to avoid having to rescan the same constants all the
|
||||
/// time.
|
||||
static bool isSimpleEnoughValueToCommitHelper(Constant *C,
|
||||
SmallPtrSet<Constant*, 8> &SimpleConstants,
|
||||
SmallPtrSetImpl<Constant*> &SimpleConstants,
|
||||
const DataLayout *DL) {
|
||||
// Simple global addresses are supported, do not allow dllimport or
|
||||
// thread-local globals.
|
||||
|
@ -2046,7 +2046,7 @@ static bool isSimpleEnoughValueToCommitHelper(Constant *C,
|
|||
|
||||
static inline bool
|
||||
isSimpleEnoughValueToCommit(Constant *C,
|
||||
SmallPtrSet<Constant*, 8> &SimpleConstants,
|
||||
SmallPtrSetImpl<Constant*> &SimpleConstants,
|
||||
const DataLayout *DL) {
|
||||
// If we already checked this constant, we win.
|
||||
if (!SimpleConstants.insert(C)) return true;
|
||||
|
@ -2217,7 +2217,7 @@ public:
|
|||
return MutatedMemory;
|
||||
}
|
||||
|
||||
const SmallPtrSet<GlobalVariable*, 8> &getInvariants() const {
|
||||
const SmallPtrSetImpl<GlobalVariable*> &getInvariants() const {
|
||||
return Invariants;
|
||||
}
|
||||
|
||||
|
@ -2725,7 +2725,7 @@ static int compareNames(Constant *const *A, Constant *const *B) {
|
|||
}
|
||||
|
||||
static void setUsedInitializer(GlobalVariable &V,
|
||||
SmallPtrSet<GlobalValue *, 8> Init) {
|
||||
SmallPtrSetImpl<GlobalValue *> Init) {
|
||||
if (Init.empty()) {
|
||||
V.eraseFromParent();
|
||||
return;
|
||||
|
|
|
@ -191,7 +191,7 @@ static void StripTypeNames(Module &M, bool PreserveDbgInfo) {
|
|||
|
||||
/// Find values that are marked as llvm.used.
|
||||
static void findUsedValues(GlobalVariable *LLVMUsed,
|
||||
SmallPtrSet<const GlobalValue*, 8> &UsedValues) {
|
||||
SmallPtrSetImpl<const GlobalValue*> &UsedValues) {
|
||||
if (!LLVMUsed) return;
|
||||
UsedValues.insert(LLVMUsed);
|
||||
|
||||
|
|
|
@ -506,7 +506,7 @@ Instruction *InstCombiner::FoldPHIArgOpIntoPHI(PHINode &PN) {
|
|||
/// DeadPHICycle - Return true if this PHI node is only used by a PHI node cycle
|
||||
/// that is dead.
|
||||
static bool DeadPHICycle(PHINode *PN,
|
||||
SmallPtrSet<PHINode*, 16> &PotentiallyDeadPHIs) {
|
||||
SmallPtrSetImpl<PHINode*> &PotentiallyDeadPHIs) {
|
||||
if (PN->use_empty()) return true;
|
||||
if (!PN->hasOneUse()) return false;
|
||||
|
||||
|
@ -528,7 +528,7 @@ static bool DeadPHICycle(PHINode *PN,
|
|||
/// NonPhiInVal. This happens with mutually cyclic phi nodes like:
|
||||
/// z = some value; x = phi (y, z); y = phi (x, z)
|
||||
static bool PHIsEqualValue(PHINode *PN, Value *NonPhiInVal,
|
||||
SmallPtrSet<PHINode*, 16> &ValueEqualPHIs) {
|
||||
SmallPtrSetImpl<PHINode*> &ValueEqualPHIs) {
|
||||
// See if we already saw this PHI node.
|
||||
if (!ValueEqualPHIs.insert(PN))
|
||||
return true;
|
||||
|
|
|
@ -2548,7 +2548,7 @@ static bool TryToSinkInstruction(Instruction *I, BasicBlock *DestBlock) {
|
|||
/// whose condition is a known constant, we only visit the reachable successors.
|
||||
///
|
||||
static bool AddReachableCodeToWorklist(BasicBlock *BB,
|
||||
SmallPtrSet<BasicBlock*, 64> &Visited,
|
||||
SmallPtrSetImpl<BasicBlock*> &Visited,
|
||||
InstCombiner &IC,
|
||||
const DataLayout *DL,
|
||||
const TargetLibraryInfo *TLI) {
|
||||
|
|
|
@ -206,8 +206,8 @@ void
|
|||
llvm::objcarc::FindDependencies(DependenceKind Flavor,
|
||||
const Value *Arg,
|
||||
BasicBlock *StartBB, Instruction *StartInst,
|
||||
SmallPtrSet<Instruction *, 4> &DependingInsts,
|
||||
SmallPtrSet<const BasicBlock *, 4> &Visited,
|
||||
SmallPtrSetImpl<Instruction *> &DependingInsts,
|
||||
SmallPtrSetImpl<const BasicBlock *> &Visited,
|
||||
ProvenanceAnalysis &PA) {
|
||||
BasicBlock::iterator StartPos = StartInst;
|
||||
|
||||
|
|
|
@ -53,8 +53,8 @@ enum DependenceKind {
|
|||
void FindDependencies(DependenceKind Flavor,
|
||||
const Value *Arg,
|
||||
BasicBlock *StartBB, Instruction *StartInst,
|
||||
SmallPtrSet<Instruction *, 4> &DependingInstructions,
|
||||
SmallPtrSet<const BasicBlock *, 4> &Visited,
|
||||
SmallPtrSetImpl<Instruction *> &DependingInstructions,
|
||||
SmallPtrSetImpl<const BasicBlock *> &Visited,
|
||||
ProvenanceAnalysis &PA);
|
||||
|
||||
bool
|
||||
|
|
|
@ -72,9 +72,9 @@ namespace {
|
|||
|
||||
bool ContractAutorelease(Function &F, Instruction *Autorelease,
|
||||
InstructionClass Class,
|
||||
SmallPtrSet<Instruction *, 4>
|
||||
SmallPtrSetImpl<Instruction *>
|
||||
&DependingInstructions,
|
||||
SmallPtrSet<const BasicBlock *, 4>
|
||||
SmallPtrSetImpl<const BasicBlock *>
|
||||
&Visited);
|
||||
|
||||
void ContractRelease(Instruction *Release,
|
||||
|
@ -150,9 +150,9 @@ ObjCARCContract::OptimizeRetainCall(Function &F, Instruction *Retain) {
|
|||
bool
|
||||
ObjCARCContract::ContractAutorelease(Function &F, Instruction *Autorelease,
|
||||
InstructionClass Class,
|
||||
SmallPtrSet<Instruction *, 4>
|
||||
SmallPtrSetImpl<Instruction *>
|
||||
&DependingInstructions,
|
||||
SmallPtrSet<const BasicBlock *, 4>
|
||||
SmallPtrSetImpl<const BasicBlock *>
|
||||
&Visited) {
|
||||
const Value *Arg = GetObjCArg(Autorelease);
|
||||
|
||||
|
|
|
@ -2850,8 +2850,8 @@ bool ObjCARCOpt::OptimizeSequences(Function &F) {
|
|||
/// shared pointer argument. Note that Retain need not be in BB.
|
||||
static bool
|
||||
HasSafePathToPredecessorCall(const Value *Arg, Instruction *Retain,
|
||||
SmallPtrSet<Instruction *, 4> &DepInsts,
|
||||
SmallPtrSet<const BasicBlock *, 4> &Visited,
|
||||
SmallPtrSetImpl<Instruction *> &DepInsts,
|
||||
SmallPtrSetImpl<const BasicBlock *> &Visited,
|
||||
ProvenanceAnalysis &PA) {
|
||||
FindDependencies(CanChangeRetainCount, Arg, Retain->getParent(), Retain,
|
||||
DepInsts, Visited, PA);
|
||||
|
@ -2879,8 +2879,8 @@ HasSafePathToPredecessorCall(const Value *Arg, Instruction *Retain,
|
|||
static CallInst *
|
||||
FindPredecessorRetainWithSafePath(const Value *Arg, BasicBlock *BB,
|
||||
Instruction *Autorelease,
|
||||
SmallPtrSet<Instruction *, 4> &DepInsts,
|
||||
SmallPtrSet<const BasicBlock *, 4> &Visited,
|
||||
SmallPtrSetImpl<Instruction *> &DepInsts,
|
||||
SmallPtrSetImpl<const BasicBlock *> &Visited,
|
||||
ProvenanceAnalysis &PA) {
|
||||
FindDependencies(CanChangeRetainCount, Arg,
|
||||
BB, Autorelease, DepInsts, Visited, PA);
|
||||
|
@ -2906,8 +2906,8 @@ FindPredecessorRetainWithSafePath(const Value *Arg, BasicBlock *BB,
|
|||
static CallInst *
|
||||
FindPredecessorAutoreleaseWithSafePath(const Value *Arg, BasicBlock *BB,
|
||||
ReturnInst *Ret,
|
||||
SmallPtrSet<Instruction *, 4> &DepInsts,
|
||||
SmallPtrSet<const BasicBlock *, 4> &V,
|
||||
SmallPtrSetImpl<Instruction *> &DepInsts,
|
||||
SmallPtrSetImpl<const BasicBlock *> &V,
|
||||
ProvenanceAnalysis &PA) {
|
||||
FindDependencies(NeedsPositiveRetainCount, Arg,
|
||||
BB, Ret, DepInsts, V, PA);
|
||||
|
|
|
@ -1200,7 +1200,7 @@ void IndVarSimplify::SimplifyAndExtend(Loop *L,
|
|||
/// BackedgeTakenInfo. If these expressions have not been reduced, then
|
||||
/// expanding them may incur additional cost (albeit in the loop preheader).
|
||||
static bool isHighCostExpansion(const SCEV *S, BranchInst *BI,
|
||||
SmallPtrSet<const SCEV*, 8> &Processed,
|
||||
SmallPtrSetImpl<const SCEV*> &Processed,
|
||||
ScalarEvolution *SE) {
|
||||
if (!Processed.insert(S))
|
||||
return false;
|
||||
|
@ -1373,7 +1373,7 @@ static bool needsLFTR(Loop *L, DominatorTree *DT) {
|
|||
/// Recursive helper for hasConcreteDef(). Unfortunately, this currently boils
|
||||
/// down to checking that all operands are constant and listing instructions
|
||||
/// that may hide undef.
|
||||
static bool hasConcreteDefImpl(Value *V, SmallPtrSet<Value*, 8> &Visited,
|
||||
static bool hasConcreteDefImpl(Value *V, SmallPtrSetImpl<Value*> &Visited,
|
||||
unsigned Depth) {
|
||||
if (isa<Constant>(V))
|
||||
return !isa<UndefValue>(V);
|
||||
|
|
|
@ -685,7 +685,7 @@ bool LICM::isGuaranteedToExecute(Instruction &Inst) {
|
|||
namespace {
|
||||
class LoopPromoter : public LoadAndStorePromoter {
|
||||
Value *SomePtr; // Designated pointer to store to.
|
||||
SmallPtrSet<Value*, 4> &PointerMustAliases;
|
||||
SmallPtrSetImpl<Value*> &PointerMustAliases;
|
||||
SmallVectorImpl<BasicBlock*> &LoopExitBlocks;
|
||||
SmallVectorImpl<Instruction*> &LoopInsertPts;
|
||||
PredIteratorCache &PredCache;
|
||||
|
@ -713,7 +713,7 @@ namespace {
|
|||
|
||||
public:
|
||||
LoopPromoter(Value *SP, const SmallVectorImpl<Instruction *> &Insts,
|
||||
SSAUpdater &S, SmallPtrSet<Value *, 4> &PMA,
|
||||
SSAUpdater &S, SmallPtrSetImpl<Value *> &PMA,
|
||||
SmallVectorImpl<BasicBlock *> &LEB,
|
||||
SmallVectorImpl<Instruction *> &LIP, PredIteratorCache &PIC,
|
||||
AliasSetTracker &ast, LoopInfo &li, DebugLoc dl, int alignment,
|
||||
|
|
|
@ -744,7 +744,7 @@ static bool isExistingPhi(const SCEVAddRecExpr *AR, ScalarEvolution &SE) {
|
|||
/// TODO: Allow UDivExpr if we can find an existing IV increment that is an
|
||||
/// obvious multiple of the UDivExpr.
|
||||
static bool isHighCostExpansion(const SCEV *S,
|
||||
SmallPtrSet<const SCEV*, 8> &Processed,
|
||||
SmallPtrSetImpl<const SCEV*> &Processed,
|
||||
ScalarEvolution &SE) {
|
||||
// Zero/One operand expressions
|
||||
switch (S->getSCEVType()) {
|
||||
|
@ -892,34 +892,34 @@ public:
|
|||
|
||||
void RateFormula(const TargetTransformInfo &TTI,
|
||||
const Formula &F,
|
||||
SmallPtrSet<const SCEV *, 16> &Regs,
|
||||
SmallPtrSetImpl<const SCEV *> &Regs,
|
||||
const DenseSet<const SCEV *> &VisitedRegs,
|
||||
const Loop *L,
|
||||
const SmallVectorImpl<int64_t> &Offsets,
|
||||
ScalarEvolution &SE, DominatorTree &DT,
|
||||
const LSRUse &LU,
|
||||
SmallPtrSet<const SCEV *, 16> *LoserRegs = nullptr);
|
||||
SmallPtrSetImpl<const SCEV *> *LoserRegs = nullptr);
|
||||
|
||||
void print(raw_ostream &OS) const;
|
||||
void dump() const;
|
||||
|
||||
private:
|
||||
void RateRegister(const SCEV *Reg,
|
||||
SmallPtrSet<const SCEV *, 16> &Regs,
|
||||
SmallPtrSetImpl<const SCEV *> &Regs,
|
||||
const Loop *L,
|
||||
ScalarEvolution &SE, DominatorTree &DT);
|
||||
void RatePrimaryRegister(const SCEV *Reg,
|
||||
SmallPtrSet<const SCEV *, 16> &Regs,
|
||||
SmallPtrSetImpl<const SCEV *> &Regs,
|
||||
const Loop *L,
|
||||
ScalarEvolution &SE, DominatorTree &DT,
|
||||
SmallPtrSet<const SCEV *, 16> *LoserRegs);
|
||||
SmallPtrSetImpl<const SCEV *> *LoserRegs);
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
/// RateRegister - Tally up interesting quantities from the given register.
|
||||
void Cost::RateRegister(const SCEV *Reg,
|
||||
SmallPtrSet<const SCEV *, 16> &Regs,
|
||||
SmallPtrSetImpl<const SCEV *> &Regs,
|
||||
const Loop *L,
|
||||
ScalarEvolution &SE, DominatorTree &DT) {
|
||||
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Reg)) {
|
||||
|
@ -967,10 +967,10 @@ void Cost::RateRegister(const SCEV *Reg,
|
|||
/// before, rate it. Optional LoserRegs provides a way to declare any formula
|
||||
/// that refers to one of those regs an instant loser.
|
||||
void Cost::RatePrimaryRegister(const SCEV *Reg,
|
||||
SmallPtrSet<const SCEV *, 16> &Regs,
|
||||
SmallPtrSetImpl<const SCEV *> &Regs,
|
||||
const Loop *L,
|
||||
ScalarEvolution &SE, DominatorTree &DT,
|
||||
SmallPtrSet<const SCEV *, 16> *LoserRegs) {
|
||||
SmallPtrSetImpl<const SCEV *> *LoserRegs) {
|
||||
if (LoserRegs && LoserRegs->count(Reg)) {
|
||||
Lose();
|
||||
return;
|
||||
|
@ -984,13 +984,13 @@ void Cost::RatePrimaryRegister(const SCEV *Reg,
|
|||
|
||||
void Cost::RateFormula(const TargetTransformInfo &TTI,
|
||||
const Formula &F,
|
||||
SmallPtrSet<const SCEV *, 16> &Regs,
|
||||
SmallPtrSetImpl<const SCEV *> &Regs,
|
||||
const DenseSet<const SCEV *> &VisitedRegs,
|
||||
const Loop *L,
|
||||
const SmallVectorImpl<int64_t> &Offsets,
|
||||
ScalarEvolution &SE, DominatorTree &DT,
|
||||
const LSRUse &LU,
|
||||
SmallPtrSet<const SCEV *, 16> *LoserRegs) {
|
||||
SmallPtrSetImpl<const SCEV *> *LoserRegs) {
|
||||
assert(F.isCanonical() && "Cost is accurate only for canonical formula");
|
||||
// Tally up the registers.
|
||||
if (const SCEV *ScaledReg = F.ScaledReg) {
|
||||
|
@ -2557,7 +2557,7 @@ bool IVChain::isProfitableIncrement(const SCEV *OperExpr,
|
|||
///
|
||||
/// TODO: Consider IVInc free if it's already used in another chains.
|
||||
static bool
|
||||
isProfitableChain(IVChain &Chain, SmallPtrSet<Instruction*, 4> &Users,
|
||||
isProfitableChain(IVChain &Chain, SmallPtrSetImpl<Instruction*> &Users,
|
||||
ScalarEvolution &SE, const TargetTransformInfo &TTI) {
|
||||
if (StressIVChain)
|
||||
return true;
|
||||
|
|
|
@ -990,7 +990,7 @@ private:
|
|||
bool splitAlloca(AllocaInst &AI, AllocaSlices &S);
|
||||
bool runOnAlloca(AllocaInst &AI);
|
||||
void clobberUse(Use &U);
|
||||
void deleteDeadInstructions(SmallPtrSet<AllocaInst *, 4> &DeletedAllocas);
|
||||
void deleteDeadInstructions(SmallPtrSetImpl<AllocaInst *> &DeletedAllocas);
|
||||
bool promoteAllocas(Function &F);
|
||||
};
|
||||
}
|
||||
|
@ -3499,7 +3499,7 @@ bool SROA::runOnAlloca(AllocaInst &AI) {
|
|||
///
|
||||
/// We also record the alloca instructions deleted here so that they aren't
|
||||
/// subsequently handed to mem2reg to promote.
|
||||
void SROA::deleteDeadInstructions(SmallPtrSet<AllocaInst*, 4> &DeletedAllocas) {
|
||||
void SROA::deleteDeadInstructions(SmallPtrSetImpl<AllocaInst*> &DeletedAllocas) {
|
||||
while (!DeadInsts.empty()) {
|
||||
Instruction *I = DeadInsts.pop_back_val();
|
||||
DEBUG(dbgs() << "Deleting dead instruction: " << *I << "\n");
|
||||
|
@ -3524,7 +3524,7 @@ void SROA::deleteDeadInstructions(SmallPtrSet<AllocaInst*, 4> &DeletedAllocas) {
|
|||
|
||||
static void enqueueUsersInWorklist(Instruction &I,
|
||||
SmallVectorImpl<Instruction *> &Worklist,
|
||||
SmallPtrSet<Instruction *, 8> &Visited) {
|
||||
SmallPtrSetImpl<Instruction *> &Visited) {
|
||||
for (User *U : I.users())
|
||||
if (Visited.insert(cast<Instruction>(U)))
|
||||
Worklist.push_back(cast<Instruction>(U));
|
||||
|
|
|
@ -56,7 +56,7 @@ namespace {
|
|||
}
|
||||
private:
|
||||
bool ProcessBlock(BasicBlock &BB);
|
||||
bool SinkInstruction(Instruction *I, SmallPtrSet<Instruction *, 8> &Stores);
|
||||
bool SinkInstruction(Instruction *I, SmallPtrSetImpl<Instruction*> &Stores);
|
||||
bool AllUsesDominatedByBlock(Instruction *Inst, BasicBlock *BB) const;
|
||||
bool IsAcceptableTarget(Instruction *Inst, BasicBlock *SuccToSinkTo) const;
|
||||
};
|
||||
|
@ -157,7 +157,7 @@ bool Sinking::ProcessBlock(BasicBlock &BB) {
|
|||
}
|
||||
|
||||
static bool isSafeToMove(Instruction *Inst, AliasAnalysis *AA,
|
||||
SmallPtrSet<Instruction *, 8> &Stores) {
|
||||
SmallPtrSetImpl<Instruction *> &Stores) {
|
||||
|
||||
if (Inst->mayWriteToMemory()) {
|
||||
Stores.insert(Inst);
|
||||
|
@ -220,7 +220,7 @@ bool Sinking::IsAcceptableTarget(Instruction *Inst,
|
|||
/// SinkInstruction - Determine whether it is safe to sink the specified machine
|
||||
/// instruction out of its current block into a successor.
|
||||
bool Sinking::SinkInstruction(Instruction *Inst,
|
||||
SmallPtrSet<Instruction *, 8> &Stores) {
|
||||
SmallPtrSetImpl<Instruction *> &Stores) {
|
||||
|
||||
// Don't sink static alloca instructions. CodeGen assumes allocas outside the
|
||||
// entry block are dynamically sized stack objects.
|
||||
|
|
|
@ -45,7 +45,7 @@ bool llvm::isSafeToDestroyConstant(const Constant *C) {
|
|||
}
|
||||
|
||||
static bool analyzeGlobalAux(const Value *V, GlobalStatus &GS,
|
||||
SmallPtrSet<const PHINode *, 16> &PhiUsers) {
|
||||
SmallPtrSetImpl<const PHINode *> &PhiUsers) {
|
||||
for (const Use &U : V->uses()) {
|
||||
const User *UR = U.getUser();
|
||||
if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(UR)) {
|
||||
|
|
|
@ -98,7 +98,7 @@ namespace {
|
|||
/// split the landing pad block after the landingpad instruction and jump
|
||||
/// to there.
|
||||
void forwardResume(ResumeInst *RI,
|
||||
SmallPtrSet<LandingPadInst*, 16> &InlinedLPads);
|
||||
SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
|
||||
|
||||
/// addIncomingPHIValuesFor - Add incoming-PHI values to the unwind
|
||||
/// destination block for the given basic block, using the values for the
|
||||
|
@ -157,7 +157,7 @@ BasicBlock *InvokeInliningInfo::getInnerResumeDest() {
|
|||
/// branch. When there is more than one predecessor, we need to split the
|
||||
/// landing pad block after the landingpad instruction and jump to there.
|
||||
void InvokeInliningInfo::forwardResume(ResumeInst *RI,
|
||||
SmallPtrSet<LandingPadInst*, 16> &InlinedLPads) {
|
||||
SmallPtrSetImpl<LandingPadInst*> &InlinedLPads) {
|
||||
BasicBlock *Dest = getInnerResumeDest();
|
||||
BasicBlock *Src = RI->getParent();
|
||||
|
||||
|
|
|
@ -1178,7 +1178,7 @@ static void changeToCall(InvokeInst *II) {
|
|||
}
|
||||
|
||||
static bool markAliveBlocks(BasicBlock *BB,
|
||||
SmallPtrSet<BasicBlock*, 128> &Reachable) {
|
||||
SmallPtrSetImpl<BasicBlock*> &Reachable) {
|
||||
|
||||
SmallVector<BasicBlock*, 128> Worklist;
|
||||
Worklist.push_back(BB);
|
||||
|
|
|
@ -78,7 +78,7 @@ void llvm::appendToGlobalDtors(Module &M, Function *F, int Priority) {
|
|||
}
|
||||
|
||||
GlobalVariable *
|
||||
llvm::collectUsedGlobalVariables(Module &M, SmallPtrSet<GlobalValue *, 8> &Set,
|
||||
llvm::collectUsedGlobalVariables(Module &M, SmallPtrSetImpl<GlobalValue *> &Set,
|
||||
bool CompilerUsed) {
|
||||
const char *Name = CompilerUsed ? "llvm.compiler.used" : "llvm.used";
|
||||
GlobalVariable *GV = M.getGlobalVariable(Name);
|
||||
|
|
|
@ -302,8 +302,8 @@ private:
|
|||
void DetermineInsertionPoint(AllocaInst *AI, unsigned AllocaNum,
|
||||
AllocaInfo &Info);
|
||||
void ComputeLiveInBlocks(AllocaInst *AI, AllocaInfo &Info,
|
||||
const SmallPtrSet<BasicBlock *, 32> &DefBlocks,
|
||||
SmallPtrSet<BasicBlock *, 32> &LiveInBlocks);
|
||||
const SmallPtrSetImpl<BasicBlock *> &DefBlocks,
|
||||
SmallPtrSetImpl<BasicBlock *> &LiveInBlocks);
|
||||
void RenamePass(BasicBlock *BB, BasicBlock *Pred,
|
||||
RenamePassData::ValVector &IncVals,
|
||||
std::vector<RenamePassData> &Worklist);
|
||||
|
@ -766,8 +766,8 @@ void PromoteMem2Reg::run() {
|
|||
/// inserted phi nodes would be dead).
|
||||
void PromoteMem2Reg::ComputeLiveInBlocks(
|
||||
AllocaInst *AI, AllocaInfo &Info,
|
||||
const SmallPtrSet<BasicBlock *, 32> &DefBlocks,
|
||||
SmallPtrSet<BasicBlock *, 32> &LiveInBlocks) {
|
||||
const SmallPtrSetImpl<BasicBlock *> &DefBlocks,
|
||||
SmallPtrSetImpl<BasicBlock *> &LiveInBlocks) {
|
||||
|
||||
// To determine liveness, we must iterate through the predecessors of blocks
|
||||
// where the def is live. Blocks are added to the worklist if we need to
|
||||
|
|
|
@ -257,7 +257,7 @@ static unsigned ComputeSpeculationCost(const User *I, const DataLayout *DL) {
|
|||
/// V plus its non-dominating operands. If that cost is greater than
|
||||
/// CostRemaining, false is returned and CostRemaining is undefined.
|
||||
static bool DominatesMergePoint(Value *V, BasicBlock *BB,
|
||||
SmallPtrSet<Instruction*, 4> *AggressiveInsts,
|
||||
SmallPtrSetImpl<Instruction*> *AggressiveInsts,
|
||||
unsigned &CostRemaining,
|
||||
const DataLayout *DL) {
|
||||
Instruction *I = dyn_cast<Instruction>(V);
|
||||
|
|
|
@ -783,7 +783,7 @@ private:
|
|||
/// Return true if all of the instructions in the block can be speculatively
|
||||
/// executed. \p SafePtrs is a list of addresses that are known to be legal
|
||||
/// and we know that we can read from them without segfault.
|
||||
bool blockCanBePredicated(BasicBlock *BB, SmallPtrSet<Value *, 8>& SafePtrs);
|
||||
bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs);
|
||||
|
||||
/// Returns True, if 'Phi' is the kind of reduction variable for type
|
||||
/// 'Kind'. If this is a reduction variable, it adds it to ReductionList.
|
||||
|
@ -3541,7 +3541,7 @@ static Type* getWiderType(const DataLayout &DL, Type *Ty0, Type *Ty1) {
|
|||
/// \brief Check that the instruction has outside loop users and is not an
|
||||
/// identified reduction variable.
|
||||
static bool hasOutsideLoopUser(const Loop *TheLoop, Instruction *Inst,
|
||||
SmallPtrSet<Value *, 4> &Reductions) {
|
||||
SmallPtrSetImpl<Value *> &Reductions) {
|
||||
// Reduction instructions are allowed to have exit users. All other
|
||||
// instructions must not have external users.
|
||||
if (!Reductions.count(Inst))
|
||||
|
@ -4884,7 +4884,7 @@ bool LoopVectorizationLegality::canVectorizeMemory() {
|
|||
}
|
||||
|
||||
static bool hasMultipleUsesOf(Instruction *I,
|
||||
SmallPtrSet<Instruction *, 8> &Insts) {
|
||||
SmallPtrSetImpl<Instruction *> &Insts) {
|
||||
unsigned NumUses = 0;
|
||||
for(User::op_iterator Use = I->op_begin(), E = I->op_end(); Use != E; ++Use) {
|
||||
if (Insts.count(dyn_cast<Instruction>(*Use)))
|
||||
|
@ -4896,7 +4896,7 @@ static bool hasMultipleUsesOf(Instruction *I,
|
|||
return false;
|
||||
}
|
||||
|
||||
static bool areAllUsesIn(Instruction *I, SmallPtrSet<Instruction *, 8> &Set) {
|
||||
static bool areAllUsesIn(Instruction *I, SmallPtrSetImpl<Instruction *> &Set) {
|
||||
for(User::op_iterator Use = I->op_begin(), E = I->op_end(); Use != E; ++Use)
|
||||
if (!Set.count(dyn_cast<Instruction>(*Use)))
|
||||
return false;
|
||||
|
@ -5229,7 +5229,7 @@ bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) {
|
|||
}
|
||||
|
||||
bool LoopVectorizationLegality::blockCanBePredicated(BasicBlock *BB,
|
||||
SmallPtrSet<Value *, 8>& SafePtrs) {
|
||||
SmallPtrSetImpl<Value *> &SafePtrs) {
|
||||
for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) {
|
||||
// We might be able to hoist the load.
|
||||
if (it->mayReadFromMemory()) {
|
||||
|
|
|
@ -448,7 +448,7 @@ struct MatchableInfo {
|
|||
void formTwoOperandAlias(StringRef Constraint);
|
||||
|
||||
void initialize(const AsmMatcherInfo &Info,
|
||||
SmallPtrSet<Record*, 16> &SingletonRegisters,
|
||||
SmallPtrSetImpl<Record*> &SingletonRegisters,
|
||||
int AsmVariantNo, std::string &RegisterPrefix);
|
||||
|
||||
/// validate - Return true if this matchable is a valid thing to match against
|
||||
|
@ -644,7 +644,7 @@ private:
|
|||
|
||||
/// buildRegisterClasses - Build the ClassInfo* instances for register
|
||||
/// classes.
|
||||
void buildRegisterClasses(SmallPtrSet<Record*, 16> &SingletonRegisters);
|
||||
void buildRegisterClasses(SmallPtrSetImpl<Record*> &SingletonRegisters);
|
||||
|
||||
/// buildOperandClasses - Build the ClassInfo* instances for user defined
|
||||
/// operand classes.
|
||||
|
@ -766,7 +766,7 @@ void MatchableInfo::formTwoOperandAlias(StringRef Constraint) {
|
|||
}
|
||||
|
||||
void MatchableInfo::initialize(const AsmMatcherInfo &Info,
|
||||
SmallPtrSet<Record*, 16> &SingletonRegisters,
|
||||
SmallPtrSetImpl<Record*> &SingletonRegisters,
|
||||
int AsmVariantNo, std::string &RegisterPrefix) {
|
||||
AsmVariantID = AsmVariantNo;
|
||||
AsmString =
|
||||
|
@ -1075,7 +1075,7 @@ struct LessRegisterSet {
|
|||
};
|
||||
|
||||
void AsmMatcherInfo::
|
||||
buildRegisterClasses(SmallPtrSet<Record*, 16> &SingletonRegisters) {
|
||||
buildRegisterClasses(SmallPtrSetImpl<Record*> &SingletonRegisters) {
|
||||
const std::vector<CodeGenRegister*> &Registers =
|
||||
Target.getRegBank().getRegisters();
|
||||
ArrayRef<CodeGenRegisterClass*> RegClassList =
|
||||
|
@ -1093,7 +1093,7 @@ buildRegisterClasses(SmallPtrSet<Record*, 16> &SingletonRegisters) {
|
|||
(*it)->getOrder().begin(), (*it)->getOrder().end()));
|
||||
|
||||
// Add any required singleton sets.
|
||||
for (SmallPtrSet<Record*, 16>::iterator it = SingletonRegisters.begin(),
|
||||
for (SmallPtrSetImpl<Record*>::iterator it = SingletonRegisters.begin(),
|
||||
ie = SingletonRegisters.end(); it != ie; ++it) {
|
||||
Record *Rec = *it;
|
||||
RegisterSets.insert(RegisterSet(&Rec, &Rec + 1));
|
||||
|
@ -1191,7 +1191,7 @@ buildRegisterClasses(SmallPtrSet<Record*, 16> &SingletonRegisters) {
|
|||
RegisterClasses[it->first] = RegisterSetClasses[it->second];
|
||||
|
||||
// Name the register classes which correspond to singleton registers.
|
||||
for (SmallPtrSet<Record*, 16>::iterator it = SingletonRegisters.begin(),
|
||||
for (SmallPtrSetImpl<Record*>::iterator it = SingletonRegisters.begin(),
|
||||
ie = SingletonRegisters.end(); it != ie; ++it) {
|
||||
Record *Rec = *it;
|
||||
ClassInfo *CI = RegisterClasses[Rec];
|
||||
|
|
Loading…
Reference in New Issue