forked from OSchip/llvm-project
Give helpers internal linkage. NFC.
This commit is contained in:
parent
f8cccd126b
commit
350dadaa8a
|
@ -568,21 +568,6 @@ StdLibraryFunctionsChecker::findFunctionSummary(const CallEvent &Call,
|
|||
return findFunctionSummary(FD, C);
|
||||
}
|
||||
|
||||
llvm::Optional<const FunctionDecl *>
|
||||
lookupGlobalCFunction(StringRef Name, const ASTContext &ACtx) {
|
||||
IdentifierInfo &II = ACtx.Idents.get(Name);
|
||||
auto LookupRes = ACtx.getTranslationUnitDecl()->lookup(&II);
|
||||
if (LookupRes.size() == 0)
|
||||
return None;
|
||||
|
||||
assert(LookupRes.size() == 1 && "In C, identifiers should be unique");
|
||||
Decl *D = LookupRes.front()->getCanonicalDecl();
|
||||
auto *FD = dyn_cast<FunctionDecl>(D);
|
||||
if (!FD)
|
||||
return None;
|
||||
return FD->getCanonicalDecl();
|
||||
}
|
||||
|
||||
void StdLibraryFunctionsChecker::initFunctionSummaries(
|
||||
CheckerContext &C) const {
|
||||
if (!FunctionSummaryMap.empty())
|
||||
|
|
|
@ -1202,7 +1202,7 @@ template <> struct DenseMapInfo<PrivateMethodKey> {
|
|||
};
|
||||
} // end namespace llvm
|
||||
|
||||
const ObjCMethodDecl *
|
||||
static const ObjCMethodDecl *
|
||||
lookupRuntimeDefinition(const ObjCInterfaceDecl *Interface,
|
||||
Selector LookupSelector, bool InstanceMethod) {
|
||||
// Repeatedly calling lookupPrivateMethod() is expensive, especially
|
||||
|
|
|
@ -1246,12 +1246,12 @@ static Value *getAISize(LLVMContext &Context, Value *Amt) {
|
|||
return Amt;
|
||||
}
|
||||
|
||||
Align computeAllocaDefaultAlign(Type *Ty, BasicBlock *BB) {
|
||||
static Align computeAllocaDefaultAlign(Type *Ty, BasicBlock *BB) {
|
||||
const DataLayout &DL = BB->getModule()->getDataLayout();
|
||||
return DL.getPrefTypeAlign(Ty);
|
||||
}
|
||||
|
||||
Align computeAllocaDefaultAlign(Type *Ty, Instruction *I) {
|
||||
static Align computeAllocaDefaultAlign(Type *Ty, Instruction *I) {
|
||||
return computeAllocaDefaultAlign(Ty, I->getParent());
|
||||
}
|
||||
|
||||
|
@ -1333,12 +1333,12 @@ void LoadInst::AssertOK() {
|
|||
"Alignment required for atomic load");
|
||||
}
|
||||
|
||||
Align computeLoadStoreDefaultAlign(Type *Ty, BasicBlock *BB) {
|
||||
static Align computeLoadStoreDefaultAlign(Type *Ty, BasicBlock *BB) {
|
||||
const DataLayout &DL = BB->getModule()->getDataLayout();
|
||||
return DL.getABITypeAlign(Ty);
|
||||
}
|
||||
|
||||
Align computeLoadStoreDefaultAlign(Type *Ty, Instruction *I) {
|
||||
static Align computeLoadStoreDefaultAlign(Type *Ty, Instruction *I) {
|
||||
return computeLoadStoreDefaultAlign(Ty, I->getParent());
|
||||
}
|
||||
|
||||
|
|
|
@ -960,9 +960,9 @@ void ARMExpandPseudo::ExpandMOV32BitImm(MachineBasicBlock &MBB,
|
|||
// S0-S31 + FPSCR + 8 more bytes (VPR + pad, or just pad)
|
||||
static const int CMSE_FP_SAVE_SIZE = 136;
|
||||
|
||||
void determineGPRegsToClear(const MachineInstr &MI,
|
||||
const std::initializer_list<unsigned> &Regs,
|
||||
SmallVectorImpl<unsigned> &ClearRegs) {
|
||||
static void determineGPRegsToClear(const MachineInstr &MI,
|
||||
const std::initializer_list<unsigned> &Regs,
|
||||
SmallVectorImpl<unsigned> &ClearRegs) {
|
||||
SmallVector<unsigned, 4> OpRegs;
|
||||
for (const MachineOperand &Op : MI.operands()) {
|
||||
if (!Op.isReg() || !Op.isUse())
|
||||
|
|
|
@ -3330,6 +3330,7 @@ static bool isSortedByValueNo(ArrayRef<CCValAssign> ArgLocs) {
|
|||
}
|
||||
#endif
|
||||
|
||||
namespace {
|
||||
/// This is a helper class for lowering variable arguments parameters.
|
||||
class VarArgsLoweringHelper {
|
||||
public:
|
||||
|
@ -3367,6 +3368,7 @@ private:
|
|||
CallingConv::ID CallConv;
|
||||
CCState &CCInfo;
|
||||
};
|
||||
} // namespace
|
||||
|
||||
void VarArgsLoweringHelper::createVarArgAreaAndStoreRegisters(
|
||||
SDValue &Chain, unsigned StackSize) {
|
||||
|
|
|
@ -447,6 +447,7 @@ PreservedAnalyses AssumeSimplifyPass::run(Function &F,
|
|||
return PreservedAnalyses::all();
|
||||
}
|
||||
|
||||
namespace {
|
||||
class AssumeSimplifyPassLegacyPass : public FunctionPass {
|
||||
public:
|
||||
static char ID;
|
||||
|
@ -469,6 +470,7 @@ public:
|
|||
AU.setPreservesAll();
|
||||
}
|
||||
};
|
||||
} // namespace
|
||||
|
||||
char AssumeSimplifyPassLegacyPass::ID = 0;
|
||||
|
||||
|
|
|
@ -35,6 +35,7 @@ using namespace mlir::edsc::intrinsics;
|
|||
using vector::TransferReadOp;
|
||||
using vector::TransferWriteOp;
|
||||
|
||||
namespace {
|
||||
/// Helper class captures the common information needed to lower N>1-D vector
|
||||
/// transfer operations (read and write).
|
||||
/// On construction, this class opens an edsc::ScopedContext for simpler IR
|
||||
|
@ -132,6 +133,7 @@ private:
|
|||
VectorType minorVectorType; // vector<(minor_dims) x type>
|
||||
MemRefType memRefMinorVectorType; // memref<vector<(minor_dims) x type>>
|
||||
};
|
||||
} // namespace
|
||||
|
||||
template <typename ConcreteOp>
|
||||
template <typename Lambda>
|
||||
|
|
|
@ -181,8 +181,8 @@ bool mlir::isValidDim(Value value, Region *region) {
|
|||
/// `memrefDefOp` is a statically shaped one or defined using a valid symbol
|
||||
/// for `region`.
|
||||
template <typename AnyMemRefDefOp>
|
||||
bool isMemRefSizeValidSymbol(AnyMemRefDefOp memrefDefOp, unsigned index,
|
||||
Region *region) {
|
||||
static bool isMemRefSizeValidSymbol(AnyMemRefDefOp memrefDefOp, unsigned index,
|
||||
Region *region) {
|
||||
auto memRefType = memrefDefOp.getType();
|
||||
// Statically shaped.
|
||||
if (!memRefType.isDynamicDim(index))
|
||||
|
@ -1882,7 +1882,8 @@ void AffineLoadOp::build(OpBuilder &builder, OperationState &result,
|
|||
build(builder, result, memref, map, indices);
|
||||
}
|
||||
|
||||
ParseResult parseAffineLoadOp(OpAsmParser &parser, OperationState &result) {
|
||||
static ParseResult parseAffineLoadOp(OpAsmParser &parser,
|
||||
OperationState &result) {
|
||||
auto &builder = parser.getBuilder();
|
||||
auto indexTy = builder.getIndexType();
|
||||
|
||||
|
@ -1902,7 +1903,7 @@ ParseResult parseAffineLoadOp(OpAsmParser &parser, OperationState &result) {
|
|||
parser.addTypeToList(type.getElementType(), result.types));
|
||||
}
|
||||
|
||||
void print(OpAsmPrinter &p, AffineLoadOp op) {
|
||||
static void print(OpAsmPrinter &p, AffineLoadOp op) {
|
||||
p << "affine.load " << op.getMemRef() << '[';
|
||||
if (AffineMapAttr mapAttr =
|
||||
op.getAttrOfType<AffineMapAttr>(op.getMapAttrName()))
|
||||
|
@ -1995,7 +1996,8 @@ void AffineStoreOp::build(OpBuilder &builder, OperationState &result,
|
|||
build(builder, result, valueToStore, memref, map, indices);
|
||||
}
|
||||
|
||||
ParseResult parseAffineStoreOp(OpAsmParser &parser, OperationState &result) {
|
||||
static ParseResult parseAffineStoreOp(OpAsmParser &parser,
|
||||
OperationState &result) {
|
||||
auto indexTy = parser.getBuilder().getIndexType();
|
||||
|
||||
MemRefType type;
|
||||
|
@ -2016,7 +2018,7 @@ ParseResult parseAffineStoreOp(OpAsmParser &parser, OperationState &result) {
|
|||
parser.resolveOperands(mapOperands, indexTy, result.operands));
|
||||
}
|
||||
|
||||
void print(OpAsmPrinter &p, AffineStoreOp op) {
|
||||
static void print(OpAsmPrinter &p, AffineStoreOp op) {
|
||||
p << "affine.store " << op.getValueToStore();
|
||||
p << ", " << op.getMemRef() << '[';
|
||||
if (AffineMapAttr mapAttr =
|
||||
|
@ -2104,7 +2106,7 @@ static ParseResult parseAffineMinMaxOp(OpAsmParser &parser,
|
|||
/// list may contain nulls, which are interpreted as the operand not being a
|
||||
/// constant.
|
||||
template <typename T>
|
||||
OpFoldResult foldMinMaxOp(T op, ArrayRef<Attribute> operands) {
|
||||
static OpFoldResult foldMinMaxOp(T op, ArrayRef<Attribute> operands) {
|
||||
static_assert(llvm::is_one_of<T, AffineMinOp, AffineMaxOp>::value,
|
||||
"expected affine min or max op");
|
||||
|
||||
|
@ -2499,8 +2501,8 @@ static ParseResult parseAffineParallelOp(OpAsmParser &parser,
|
|||
// AffineVectorLoadOp
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
ParseResult parseAffineVectorLoadOp(OpAsmParser &parser,
|
||||
OperationState &result) {
|
||||
static ParseResult parseAffineVectorLoadOp(OpAsmParser &parser,
|
||||
OperationState &result) {
|
||||
auto &builder = parser.getBuilder();
|
||||
auto indexTy = builder.getIndexType();
|
||||
|
||||
|
@ -2522,7 +2524,7 @@ ParseResult parseAffineVectorLoadOp(OpAsmParser &parser,
|
|||
parser.addTypeToList(resultType, result.types));
|
||||
}
|
||||
|
||||
void print(OpAsmPrinter &p, AffineVectorLoadOp op) {
|
||||
static void print(OpAsmPrinter &p, AffineVectorLoadOp op) {
|
||||
p << "affine.vector_load " << op.getMemRef() << '[';
|
||||
if (AffineMapAttr mapAttr =
|
||||
op.getAttrOfType<AffineMapAttr>(op.getMapAttrName()))
|
||||
|
@ -2563,8 +2565,8 @@ static LogicalResult verify(AffineVectorLoadOp op) {
|
|||
// AffineVectorStoreOp
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
ParseResult parseAffineVectorStoreOp(OpAsmParser &parser,
|
||||
OperationState &result) {
|
||||
static ParseResult parseAffineVectorStoreOp(OpAsmParser &parser,
|
||||
OperationState &result) {
|
||||
auto indexTy = parser.getBuilder().getIndexType();
|
||||
|
||||
MemRefType memrefType;
|
||||
|
@ -2587,7 +2589,7 @@ ParseResult parseAffineVectorStoreOp(OpAsmParser &parser,
|
|||
parser.resolveOperands(mapOperands, indexTy, result.operands));
|
||||
}
|
||||
|
||||
void print(OpAsmPrinter &p, AffineVectorStoreOp op) {
|
||||
static void print(OpAsmPrinter &p, AffineVectorStoreOp op) {
|
||||
p << "affine.vector_store " << op.getValueToStore();
|
||||
p << ", " << op.getMemRef() << '[';
|
||||
if (AffineMapAttr mapAttr =
|
||||
|
|
|
@ -2727,6 +2727,7 @@ bool mlir::canFoldIntoConsumerOp(MemRefCastOp castOp) {
|
|||
return true;
|
||||
}
|
||||
|
||||
namespace {
|
||||
/// Pattern to rewrite a subview op with MemRefCast arguments.
|
||||
/// This essentially pushes memref_cast past its consuming subview when
|
||||
/// `canFoldIntoConsumerOp` is true.
|
||||
|
@ -2779,6 +2780,7 @@ public:
|
|||
return success();
|
||||
}
|
||||
};
|
||||
} // namespace
|
||||
|
||||
void SubViewOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
|
||||
MLIRContext *context) {
|
||||
|
|
|
@ -1338,7 +1338,7 @@ void TransferReadOp::build(OpBuilder &builder, OperationState &result,
|
|||
}
|
||||
|
||||
template <typename TransferOp>
|
||||
void printTransferAttrs(OpAsmPrinter &p, TransferOp op) {
|
||||
static void printTransferAttrs(OpAsmPrinter &p, TransferOp op) {
|
||||
SmallVector<StringRef, 2> elidedAttrs;
|
||||
if (op.permutation_map() == TransferOp::getTransferMinorIdentityMap(
|
||||
op.getMemRefType(), op.getVectorType()))
|
||||
|
|
|
@ -21,7 +21,8 @@ struct TestMatchers : public PassWrapper<TestMatchers, FunctionPass> {
|
|||
} // end anonymous namespace
|
||||
|
||||
// This could be done better but is not worth the variadic template trouble.
|
||||
template <typename Matcher> unsigned countMatches(FuncOp f, Matcher &matcher) {
|
||||
template <typename Matcher>
|
||||
static unsigned countMatches(FuncOp f, Matcher &matcher) {
|
||||
unsigned count = 0;
|
||||
f.walk([&count, &matcher](Operation *op) {
|
||||
if (matcher.match(op))
|
||||
|
|
|
@ -156,7 +156,7 @@ static void applyPatterns(FuncOp funcOp) {
|
|||
});
|
||||
}
|
||||
|
||||
OwningRewritePatternList
|
||||
static OwningRewritePatternList
|
||||
getMatmulToVectorCanonicalizationPatterns(MLIRContext *context) {
|
||||
OwningRewritePatternList patterns;
|
||||
AffineApplyOp::getCanonicalizationPatterns(patterns, context);
|
||||
|
@ -169,7 +169,7 @@ getMatmulToVectorCanonicalizationPatterns(MLIRContext *context) {
|
|||
return patterns;
|
||||
}
|
||||
|
||||
void fillL1TilingAndMatmulToVectorPatterns(
|
||||
static void fillL1TilingAndMatmulToVectorPatterns(
|
||||
MLIRContext *context, StringRef startMarker,
|
||||
SmallVectorImpl<OwningRewritePatternList> &patternsVector) {
|
||||
patternsVector.emplace_back(LinalgTilingPattern<MatmulOp>(
|
||||
|
|
Loading…
Reference in New Issue