Update and generalize various passes to work on both CFG and ML functions,

simplifying them in minor ways.  The only significant cleanup here
is the constant folding pass.  All the other changes are simple and easy,
but this is still enough to shrink the compiler by 45LOC.

The one pass left to merge is the CSE pass, which will be move involved, so I'm
splitting it out to its own patch (which I'll tackle right after this).

This is step 28/n towards merging instructions and statements.

PiperOrigin-RevId: 227328115
This commit is contained in:
Chris Lattner 2018-12-30 23:10:35 -08:00 committed by jpienaar
parent 3c8fc797de
commit 7974889f54
14 changed files with 75 additions and 125 deletions

View File

@ -50,8 +50,6 @@ class Function;
/// Returns true on success and false if the replacement is not possible
/// (whenever a memref is used as an operand in a non-deferencing scenario). See
/// comments at function definition for an example.
// TODO(mlir-team): extend this for Value/ CFGFunctions. Can also be easily
// extended to add additional indices at any position.
bool replaceAllMemRefUsesWith(const Value *oldMemRef, Value *newMemRef,
ArrayRef<Value *> extraIndices = {},
AffineMap indexRemap = AffineMap::Null(),
@ -102,7 +100,6 @@ OperationInst *createAffineComputationSlice(OperationInst *opInst);
/// Forward substitutes results from 'AffineApplyOp' into any users which
/// are also AffineApplyOps.
// NOTE: This method may modify users of results of this operation.
// TODO(mlir-team): extend this for Value/ CFGFunctions.
void forwardSubstitute(OpPointer<AffineApplyOp> affineApplyOp);
/// Folds the lower and upper bounds of a 'for' inst to constants if possible.

View File

@ -48,7 +48,7 @@ struct ComposeAffineMaps : public FunctionPass, InstWalker<ComposeAffineMaps> {
using InstListType = llvm::iplist<Instruction>;
void walk(InstListType::iterator Start, InstListType::iterator End);
void visitOperationInst(OperationInst *inst);
PassResult runOnMLFunction(Function *f) override;
PassResult runOnFunction(Function *f) override;
using InstWalker<ComposeAffineMaps>::walk;
static char passID;
@ -88,7 +88,7 @@ void ComposeAffineMaps::visitOperationInst(OperationInst *opInst) {
}
}
PassResult ComposeAffineMaps::runOnMLFunction(Function *f) {
PassResult ComposeAffineMaps::runOnFunction(Function *f) {
affineApplyOpsToErase.clear();
walk(f);
for (auto *opInst : affineApplyOpsToErase) {

View File

@ -33,15 +33,12 @@ struct ConstantFold : public FunctionPass, InstWalker<ConstantFold> {
SmallVector<Value *, 8> existingConstants;
// Operations that were folded and that need to be erased.
std::vector<OperationInst *> opInstsToErase;
using ConstantFactoryType = std::function<Value *(Attribute, Type)>;
bool foldOperation(OperationInst *op,
SmallVectorImpl<Value *> &existingConstants,
ConstantFactoryType constantFactory);
SmallVectorImpl<Value *> &existingConstants);
void visitOperationInst(OperationInst *inst);
void visitForInst(ForInst *inst);
PassResult runOnCFGFunction(Function *f) override;
PassResult runOnMLFunction(Function *f) override;
PassResult runOnFunction(Function *f) override;
static char passID;
};
@ -52,15 +49,12 @@ char ConstantFold::passID = 0;
/// Attempt to fold the specified operation, updating the IR to match. If
/// constants are found, we keep track of them in the existingConstants list.
///
/// This returns false if the operation was successfully folded.
bool ConstantFold::foldOperation(OperationInst *op,
SmallVectorImpl<Value *> &existingConstants,
ConstantFactoryType constantFactory) {
void ConstantFold::visitOperationInst(OperationInst *op) {
// If this operation is already a constant, just remember it for cleanup
// later, and don't try to fold it.
if (auto constant = op->dyn_cast<ConstantOp>()) {
existingConstants.push_back(constant);
return true;
return;
}
// Check to see if each of the operands is a trivial constant. If so, get
@ -78,7 +72,7 @@ bool ConstantFold::foldOperation(OperationInst *op,
// Attempt to constant fold the operation.
SmallVector<Attribute, 8> resultConstants;
if (op->constantFold(operandConstants, resultConstants))
return true;
return;
// Ok, if everything succeeded, then we can create constants corresponding
// to the result of the call.
@ -87,67 +81,21 @@ bool ConstantFold::foldOperation(OperationInst *op,
assert(resultConstants.size() == op->getNumResults() &&
"constant folding produced the wrong number of results");
FuncBuilder builder(op);
for (unsigned i = 0, e = op->getNumResults(); i != e; ++i) {
auto *res = op->getResult(i);
if (res->use_empty()) // ignore dead uses.
continue;
auto *cst = constantFactory(resultConstants[i], res->getType());
auto cst = builder.create<ConstantOp>(op->getLoc(), resultConstants[i],
res->getType());
existingConstants.push_back(cst);
res->replaceAllUsesWith(cst);
}
return false;
}
// For now, we do a simple top-down pass over a function folding constants. We
// don't handle conditional control flow, constant PHI nodes, folding
// conditional branches, or anything else fancy.
PassResult ConstantFold::runOnCFGFunction(Function *f) {
existingConstants.clear();
FuncBuilder builder(f);
for (auto &bb : *f) {
for (auto instIt = bb.begin(), e = bb.end(); instIt != e;) {
auto *inst = dyn_cast<OperationInst>(&*instIt++);
if (!inst)
continue;
auto constantFactory = [&](Attribute value, Type type) -> Value * {
builder.setInsertionPoint(inst);
return builder.create<ConstantOp>(inst->getLoc(), value, type);
};
if (!foldOperation(inst, existingConstants, constantFactory)) {
// At this point the operation is dead, remove it.
// TODO: This is assuming that all constant foldable operations have no
// side effects. When we have side effect modeling, we should verify
// that the operation is effect-free before we remove it. Until then
// this is close enough.
inst->erase();
}
}
}
// By the time we are done, we may have simplified a bunch of code, leaving
// around dead constants. Check for them now and remove them.
for (auto *cst : existingConstants) {
if (cst->use_empty())
cst->getDefiningInst()->erase();
}
return success();
}
// Override the walker's operation visiter for constant folding.
void ConstantFold::visitOperationInst(OperationInst *inst) {
auto constantFactory = [&](Attribute value, Type type) -> Value * {
FuncBuilder builder(inst);
return builder.create<ConstantOp>(inst->getLoc(), value, type);
};
if (!ConstantFold::foldOperation(inst, existingConstants, constantFactory)) {
opInstsToErase.push_back(inst);
}
// At this point the operation is dead, so we can remove it. We add it to
// a vector to avoid invalidating our walker.
opInstsToErase.push_back(op);
}
// Override the walker's 'for' instruction visit for constant folding.
@ -155,11 +103,15 @@ void ConstantFold::visitForInst(ForInst *forInst) {
constantFoldBounds(forInst);
}
PassResult ConstantFold::runOnMLFunction(Function *f) {
// For now, we do a simple top-down pass over a function folding constants. We
// don't handle conditional control flow, block arguments, folding
// conditional branches, or anything else fancy.
PassResult ConstantFold::runOnFunction(Function *f) {
existingConstants.clear();
opInstsToErase.clear();
walk(f);
// At this point, these operations are dead, remove them.
// TODO: This is assuming that all constant foldable operations have no
// side effects. When we have side effect modeling, we should verify that

View File

@ -70,7 +70,7 @@ namespace {
struct LoopFusion : public FunctionPass {
LoopFusion() : FunctionPass(&LoopFusion::passID) {}
PassResult runOnMLFunction(Function *f) override;
PassResult runOnFunction(Function *f) override;
static char passID;
};
@ -519,7 +519,7 @@ public:
} // end anonymous namespace
PassResult LoopFusion::runOnMLFunction(Function *f) {
PassResult LoopFusion::runOnFunction(Function *f) {
MemRefDependenceGraph g;
if (g.init(f))
GreedyFusion(&g).run();

View File

@ -41,7 +41,7 @@ namespace {
/// A pass to perform loop tiling on all suitable loop nests of a Function.
struct LoopTiling : public FunctionPass {
LoopTiling() : FunctionPass(&LoopTiling::passID) {}
PassResult runOnMLFunction(Function *f) override;
PassResult runOnFunction(Function *f) override;
constexpr static unsigned kDefaultTileSize = 4;
static char passID;
@ -238,7 +238,7 @@ static void getTileableBands(Function *f,
}
}
PassResult LoopTiling::runOnMLFunction(Function *f) {
PassResult LoopTiling::runOnFunction(Function *f) {
std::vector<SmallVector<ForInst *, 6>> bands;
getTileableBands(f, &bands);

View File

@ -70,7 +70,7 @@ struct LoopUnroll : public FunctionPass {
: FunctionPass(&LoopUnroll::passID), unrollFactor(unrollFactor),
unrollFull(unrollFull), getUnrollFactor(getUnrollFactor) {}
PassResult runOnMLFunction(Function *f) override;
PassResult runOnFunction(Function *f) override;
/// Unroll this for inst. Returns false if nothing was done.
bool runOnForInst(ForInst *forInst);
@ -83,7 +83,7 @@ struct LoopUnroll : public FunctionPass {
char LoopUnroll::passID = 0;
PassResult LoopUnroll::runOnMLFunction(Function *f) {
PassResult LoopUnroll::runOnFunction(Function *f) {
// Gathers all innermost loops through a post order pruned walk.
class InnermostLoopGatherer : public InstWalker<InnermostLoopGatherer, bool> {
public:

View File

@ -31,13 +31,11 @@ using namespace mlir;
namespace {
// TODO: This shouldn't be its own pass, it should be a legalization (once we
// have the proper infra).
struct LowerAffineApply : public FunctionPass {
explicit LowerAffineApply() : FunctionPass(&LowerAffineApply::passID) {}
PassResult runOnMLFunction(Function *f) override;
PassResult runOnCFGFunction(Function *f) override;
PassResult runOnFunction(Function *f) override;
static char passID;
};
@ -45,28 +43,21 @@ struct LowerAffineApply : public FunctionPass {
char LowerAffineApply::passID = 0;
PassResult LowerAffineApply::runOnMLFunction(Function *f) {
f->emitError("ML Functions contain syntactically hidden affine_apply's that "
"cannot be expanded");
return failure();
}
PassResult LowerAffineApply::runOnFunction(Function *f) {
SmallVector<OpPointer<AffineApplyOp>, 8> affineApplyInsts;
PassResult LowerAffineApply::runOnCFGFunction(Function *f) {
for (Block &bb : *f) {
// Handle iterators with care because we erase in the same loop.
// In particular, step to the next element before erasing the current one.
for (auto it = bb.begin(); it != bb.end();) {
auto *inst = dyn_cast<OperationInst>(&*it++);
if (!inst)
continue;
// Find all the affine_apply operations.
f->walkOps([&](OperationInst *inst) {
auto applyOp = inst->dyn_cast<AffineApplyOp>();
if (applyOp)
affineApplyInsts.push_back(applyOp);
});
auto affineApplyOp = inst->dyn_cast<AffineApplyOp>();
if (!affineApplyOp)
continue;
if (expandAffineApply(&*affineApplyOp))
return failure();
}
}
// Rewrite them in a second pass, avoiding invalidation of the walker
// iterator.
for (auto applyOp : affineApplyInsts)
if (expandAffineApply(applyOp))
return failure();
return success();
}

View File

@ -197,7 +197,7 @@ struct MaterializationState {
struct MaterializeVectorsPass : public FunctionPass {
MaterializeVectorsPass() : FunctionPass(&MaterializeVectorsPass::passID) {}
PassResult runOnMLFunction(Function *f) override;
PassResult runOnFunction(Function *f) override;
// Thread-safe RAII contexts local to pass, BumpPtrAllocator freed on exit.
MLFunctionMatcherContext mlContext;
@ -712,7 +712,11 @@ static bool materialize(Function *f,
return false;
}
PassResult MaterializeVectorsPass::runOnMLFunction(Function *f) {
PassResult MaterializeVectorsPass::runOnFunction(Function *f) {
// TODO(ntv): Check to see if this supports arbitrary top-level code.
if (f->getBlocks().size() != 1)
return success();
using matcher::Op;
LLVM_DEBUG(dbgs() << "\nMaterializeVectors on Function\n");
LLVM_DEBUG(f->print(dbgs()));

View File

@ -66,9 +66,7 @@ namespace {
struct MemRefDataFlowOpt : public FunctionPass, InstWalker<MemRefDataFlowOpt> {
explicit MemRefDataFlowOpt() : FunctionPass(&MemRefDataFlowOpt::passID) {}
// Not applicable to CFG functions.
PassResult runOnCFGFunction(Function *f) override { return success(); }
PassResult runOnMLFunction(Function *f) override;
PassResult runOnFunction(Function *f) override;
void visitOperationInst(OperationInst *opInst);
@ -210,7 +208,11 @@ void MemRefDataFlowOpt::visitOperationInst(OperationInst *opInst) {
loadOpsToErase.push_back(loadOpInst);
}
PassResult MemRefDataFlowOpt::runOnMLFunction(Function *f) {
PassResult MemRefDataFlowOpt::runOnFunction(Function *f) {
// Only supports single block functions at the moment.
if (f->getBlocks().size() != 1)
return success();
DominanceInfo theDomInfo(f);
domInfo = &theDomInfo;
PostDominanceInfo thePostDomInfo(f);
@ -233,7 +235,7 @@ PassResult MemRefDataFlowOpt::runOnMLFunction(Function *f) {
for (auto *memref : memrefsToErase) {
// If the memref hasn't been alloc'ed in this function, skip.
OperationInst *defInst = memref->getDefiningInst();
if (!defInst || !cast<OperationInst>(defInst)->isa<AllocOp>())
if (!defInst || !defInst->isa<AllocOp>())
// TODO(mlir-team): if the memref was returned by a 'call' instruction, we
// could still erase it if the call has no side-effects.
continue;

View File

@ -41,7 +41,7 @@ namespace {
struct PipelineDataTransfer : public FunctionPass,
InstWalker<PipelineDataTransfer> {
PipelineDataTransfer() : FunctionPass(&PipelineDataTransfer::passID) {}
PassResult runOnMLFunction(Function *f) override;
PassResult runOnFunction(Function *f) override;
PassResult runOnForInst(ForInst *forInst);
// Collect all 'for' instructions.
@ -137,7 +137,7 @@ static bool doubleBuffer(Value *oldMemRef, ForInst *forInst) {
}
/// Returns success if the IR is in a valid state.
PassResult PipelineDataTransfer::runOnMLFunction(Function *f) {
PassResult PipelineDataTransfer::runOnFunction(Function *f) {
// Do a post order walk so that inner loop DMAs are processed first. This is
// necessary since 'for' instructions nested within would otherwise become
// invalid (erased) when the outer loop is pipelined (the pipelined one gets

View File

@ -21,7 +21,7 @@
#include "mlir/Analysis/AffineStructures.h"
#include "mlir/IR/Function.h"
#include "mlir/IR/InstVisitor.h"
#include "mlir/IR/Instructions.h"
#include "mlir/Pass.h"
#include "mlir/Transforms/Passes.h"
@ -34,17 +34,13 @@ namespace {
/// Simplifies all affine expressions appearing in the operation instructions of
/// the Function. This is mainly to test the simplifyAffineExpr method.
// TODO(someone): Gradually, extend this to all affine map references found in
// ML functions and CFG functions.
struct SimplifyAffineStructures : public FunctionPass,
InstWalker<SimplifyAffineStructures> {
/// TODO(someone): This should just be defined as a canonicalization pattern
/// on AffineMap and driven from the existing canonicalization pass.
struct SimplifyAffineStructures : public FunctionPass {
explicit SimplifyAffineStructures()
: FunctionPass(&SimplifyAffineStructures::passID) {}
PassResult runOnMLFunction(Function *f) override;
// Does nothing on CFG functions for now. No reusable walkers/visitors exist
// for this yet? TODO(someone).
PassResult runOnCFGFunction(Function *f) override { return success(); }
PassResult runOnFunction(Function *f) override;
void visitIfInst(IfInst *ifInst);
void visitOperationInst(OperationInst *opInst);
@ -86,8 +82,14 @@ void SimplifyAffineStructures::visitOperationInst(OperationInst *opInst) {
}
}
PassResult SimplifyAffineStructures::runOnMLFunction(Function *f) {
walk(f);
PassResult SimplifyAffineStructures::runOnFunction(Function *f) {
f->walkInsts([&](Instruction *inst) {
if (auto *opInst = dyn_cast<OperationInst>(inst))
visitOperationInst(opInst);
if (auto *ifInst = dyn_cast<IfInst>(inst))
visitIfInst(ifInst);
});
return success();
}

View File

@ -60,8 +60,6 @@ static bool isMemRefDereferencingOp(const OperationInst &op) {
// extra operands, note that 'indexRemap' would just be applied to the existing
// indices (%i, %j).
//
// TODO(mlir-team): extend this for CFG Functions. Can also be easily
// extended to add additional indices at any position.
bool mlir::replaceAllMemRefUsesWith(const Value *oldMemRef, Value *newMemRef,
ArrayRef<Value *> extraIndices,
AffineMap indexRemap,

View File

@ -73,7 +73,7 @@ struct VectorizerTestPass : public FunctionPass {
static constexpr auto kTestAffineMapAttrName = "affine_map";
VectorizerTestPass() : FunctionPass(&VectorizerTestPass::passID) {}
PassResult runOnMLFunction(Function *f) override;
PassResult runOnFunction(Function *f) override;
void testVectorShapeRatio(Function *f);
void testForwardSlicing(Function *f);
void testBackwardSlicing(Function *f);
@ -218,7 +218,11 @@ void VectorizerTestPass::testComposeMaps(Function *f) {
res.print(outs() << "\nComposed map: ");
}
PassResult VectorizerTestPass::runOnMLFunction(Function *f) {
PassResult VectorizerTestPass::runOnFunction(Function *f) {
// Only support single block functions at this point.
if (f->getBlocks().size() != 1)
return success();
if (!clTestVectorShapeRatio.empty()) {
testVectorShapeRatio(f);
}

View File

@ -78,7 +78,7 @@ struct PrintCFGPass : public FunctionPass {
const llvm::Twine &title = "")
: FunctionPass(&PrintCFGPass::passID), os(os), shortNames(shortNames),
title(title) {}
PassResult runOnCFGFunction(Function *function) override {
PassResult runOnFunction(Function *function) override {
mlir::writeGraph(os, function, shortNames, title);
return success();
}