Automated rollback of changelist 247778391.

PiperOrigin-RevId: 247778691
This commit is contained in:
MLIR Team 2019-05-11 15:24:47 -07:00 committed by Mehdi Amini
parent 02e03b9bf4
commit 41d90a85bd
43 changed files with 130 additions and 140 deletions

View File

@ -31,7 +31,7 @@ ViewOp linalg::getViewBaseViewOp(Value *view) {
auto viewType = view->getType().dyn_cast<ViewType>();
(void)viewType;
assert(viewType.isa<ViewType>() && "expected a ViewType");
while (auto slice = dyn_cast<SliceOp>(view->getDefiningOp())) {
while (auto slice = view->getDefiningOp()->dyn_cast<SliceOp>()) {
view = slice.getParentView();
assert(viewType.isa<ViewType>() && "expected a ViewType");
}
@ -48,7 +48,7 @@ std::pair<mlir::Value *, unsigned> linalg::getViewRootIndexing(Value *view,
(void)viewType;
assert(viewType.isa<ViewType>() && "expected a ViewType");
assert(dim < viewType.getRank() && "dim exceeds rank");
if (auto viewOp = dyn_cast<ViewOp>(view->getDefiningOp()))
if (auto viewOp = view->getDefiningOp()->dyn_cast<ViewOp>())
return std::make_pair(viewOp.getIndexing(dim), dim);
auto sliceOp = view->getDefiningOp()->cast<SliceOp>();

View File

@ -40,7 +40,7 @@ linalg::common::LoopNestRangeBuilder::LoopNestRangeBuilder(
assert(ivs.size() == indexings.size());
for (unsigned i = 0, e = indexings.size(); i < e; ++i) {
auto rangeOp =
llvm::dyn_cast<RangeOp>(indexings[i].getValue()->getDefiningOp());
indexings[i].getValue()->getDefiningOp()->dyn_cast<RangeOp>();
if (!rangeOp) {
continue;
}

View File

@ -33,7 +33,7 @@ using namespace linalg::intrinsics;
unsigned linalg::getViewRank(Value *view) {
assert(view->getType().isa<ViewType>() && "expected a ViewType");
if (auto viewOp = dyn_cast<ViewOp>(view->getDefiningOp()))
if (auto viewOp = view->getDefiningOp()->dyn_cast<ViewOp>())
return viewOp.getRank();
return view->getDefiningOp()->cast<SliceOp>().getRank();
}

View File

@ -43,7 +43,7 @@ using namespace linalg::intrinsics;
// analyses. This builds the chain.
static SmallVector<Value *, 8> getViewChain(mlir::Value *v) {
assert(v->getType().isa<ViewType>() && "ViewType expected");
if (v->getDefiningOp()->isa<ViewOp>()) {
if (v->getDefiningOp()->dyn_cast<ViewOp>()) {
return SmallVector<mlir::Value *, 8>{v};
}
@ -53,7 +53,7 @@ static SmallVector<Value *, 8> getViewChain(mlir::Value *v) {
tmp.push_back(v);
v = sliceOp.getParentView();
} while (!v->getType().isa<ViewType>());
assert(v->getDefiningOp()->isa<ViewOp>() && "must be a ViewOp");
assert(v->getDefiningOp()->cast<ViewOp>() && "must be a ViewOp");
tmp.push_back(v);
return SmallVector<mlir::Value *, 8>(tmp.rbegin(), tmp.rend());
}

View File

@ -91,7 +91,7 @@ inline llvm::SmallVector<mlir::Value *, 8>
extractRangesFromViewOrSliceOp(mlir::Value *view) {
// This expects a viewType which must come from either ViewOp or SliceOp.
assert(view->getType().isa<linalg::ViewType>() && "expected ViewType");
if (auto viewOp = llvm::dyn_cast<linalg::ViewOp>(view->getDefiningOp()))
if (auto viewOp = view->getDefiningOp()->dyn_cast<linalg::ViewOp>())
return viewOp.getRanges();
auto sliceOp = view->getDefiningOp()->cast<linalg::SliceOp>();

View File

@ -46,9 +46,9 @@ void linalg::composeSliceOps(mlir::Function *f) {
void linalg::lowerToFinerGrainedTensorContraction(mlir::Function *f) {
f->walk([](Operation *op) {
if (auto matmulOp = dyn_cast<linalg::MatmulOp>(op)) {
if (auto matmulOp = op->dyn_cast<linalg::MatmulOp>()) {
matmulOp.writeAsFinerGrainTensorContraction();
} else if (auto matvecOp = dyn_cast<linalg::MatvecOp>(op)) {
} else if (auto matvecOp = op->dyn_cast<linalg::MatvecOp>()) {
matvecOp.writeAsFinerGrainTensorContraction();
} else {
return;
@ -205,11 +205,11 @@ writeContractionAsLoops(ContractionOp contraction) {
llvm::Optional<SmallVector<mlir::AffineForOp, 4>>
linalg::writeAsLoops(Operation *op) {
if (auto matmulOp = dyn_cast<linalg::MatmulOp>(op)) {
if (auto matmulOp = op->dyn_cast<linalg::MatmulOp>()) {
return writeContractionAsLoops(matmulOp);
} else if (auto matvecOp = dyn_cast<linalg::MatvecOp>(op)) {
} else if (auto matvecOp = op->dyn_cast<linalg::MatvecOp>()) {
return writeContractionAsLoops(matvecOp);
} else if (auto dotOp = dyn_cast<linalg::DotOp>(op)) {
} else if (auto dotOp = op->dyn_cast<linalg::DotOp>()) {
return writeContractionAsLoops(dotOp);
}
return llvm::None;
@ -276,7 +276,7 @@ PatternMatchResult
Rewriter<linalg::LoadOp>::matchAndRewrite(Operation *op,
PatternRewriter &rewriter) const {
auto load = op->cast<linalg::LoadOp>();
SliceOp slice = dyn_cast<SliceOp>(load.getView()->getDefiningOp());
SliceOp slice = load.getView()->getDefiningOp()->dyn_cast<SliceOp>();
ViewOp view = slice ? emitAndReturnFullyComposedView(slice.getResult())
: load.getView()->getDefiningOp()->cast<ViewOp>();
ScopedContext scope(FuncBuilder(load), load.getLoc());
@ -291,7 +291,7 @@ PatternMatchResult
Rewriter<linalg::StoreOp>::matchAndRewrite(Operation *op,
PatternRewriter &rewriter) const {
auto store = op->cast<linalg::StoreOp>();
SliceOp slice = dyn_cast<SliceOp>(store.getView()->getDefiningOp());
SliceOp slice = store.getView()->getDefiningOp()->dyn_cast<SliceOp>();
ViewOp view = slice ? emitAndReturnFullyComposedView(slice.getResult())
: store.getView()->getDefiningOp()->cast<ViewOp>();
ScopedContext scope(FuncBuilder(store), store.getLoc());

View File

@ -52,8 +52,8 @@ void linalg::lowerToTiledLoops(mlir::Function *f,
}
static bool isZeroIndex(Value *v) {
return isa_and_nonnull<ConstantIndexOp>(v->getDefiningOp()) &&
cast<ConstantIndexOp>(v->getDefiningOp()).getValue() == 0;
return v->getDefiningOp() && v->getDefiningOp()->isa<ConstantIndexOp>() &&
v->getDefiningOp()->dyn_cast<ConstantIndexOp>().getValue() == 0;
}
template <typename ConcreteOp>
@ -178,11 +178,11 @@ writeContractionAsTiledViews(TensorContractionBase<ConcreteOp> &contraction,
llvm::Optional<SmallVector<mlir::AffineForOp, 8>>
linalg::writeAsTiledViews(Operation *op, ArrayRef<Value *> tileSizes) {
if (auto matmulOp = dyn_cast<linalg::MatmulOp>(op)) {
if (auto matmulOp = op->dyn_cast<linalg::MatmulOp>()) {
return writeContractionAsTiledViews(matmulOp, tileSizes);
} else if (auto matvecOp = dyn_cast<linalg::MatvecOp>(op)) {
} else if (auto matvecOp = op->dyn_cast<linalg::MatvecOp>()) {
return writeContractionAsTiledViews(matvecOp, tileSizes);
} else if (auto dotOp = dyn_cast<linalg::DotOp>(op)) {
} else if (auto dotOp = op->dyn_cast<linalg::DotOp>()) {
return writeContractionAsTiledViews(dotOp, tileSizes);
}
return llvm::None;
@ -190,11 +190,11 @@ linalg::writeAsTiledViews(Operation *op, ArrayRef<Value *> tileSizes) {
void linalg::lowerToTiledViews(mlir::Function *f, ArrayRef<Value *> tileSizes) {
f->walk([tileSizes](Operation *op) {
if (auto matmulOp = dyn_cast<linalg::MatmulOp>(op)) {
if (auto matmulOp = op->dyn_cast<linalg::MatmulOp>()) {
writeAsTiledViews(matmulOp, tileSizes);
} else if (auto matvecOp = dyn_cast<linalg::MatvecOp>(op)) {
} else if (auto matvecOp = op->dyn_cast<linalg::MatvecOp>()) {
writeAsTiledViews(matvecOp, tileSizes);
} else if (auto dotOp = dyn_cast<linalg::DotOp>(op)) {
} else if (auto dotOp = op->dyn_cast<linalg::DotOp>()) {
writeAsTiledViews(dotOp, tileSizes);
} else {
return;

View File

@ -238,13 +238,13 @@ public:
LLVM_DEBUG(llvm::dbgs() << "Inferring shape for: " << *op << "\n");
// The add operation is trivial: propagate the input type as is.
if (auto addOp = llvm::dyn_cast<AddOp>(op)) {
if (auto addOp = op->dyn_cast<AddOp>()) {
op->getResult(0)->setType(op->getOperand(0)->getType());
continue;
}
// Transpose is easy: just invert the dimensions.
if (auto transpose = llvm::dyn_cast<TransposeOp>(op)) {
if (auto transpose = op->dyn_cast<TransposeOp>()) {
SmallVector<int64_t, 2> dims;
auto arrayTy = transpose.getOperand()->getType().cast<ToyArrayType>();
dims.insert(dims.end(), arrayTy.getShape().begin(),
@ -259,7 +259,7 @@ public:
// catch it but shape inference earlier in the pass could generate an
// invalid IR (from an invalid Toy input of course) and we wouldn't want
// to crash here.
if (auto mulOp = llvm::dyn_cast<MulOp>(op)) {
if (auto mulOp = op->dyn_cast<MulOp>()) {
auto lhs = mulOp.getLHS()->getType().cast<ToyArrayType>();
auto rhs = mulOp.getRHS()->getType().cast<ToyArrayType>();
auto lhsRank = lhs.getShape().size();
@ -291,7 +291,7 @@ public:
// for this function, queue the callee in the inter-procedural work list,
// and return. The current function stays in the work list and will
// restart after the callee is processed.
if (auto callOp = llvm::dyn_cast<GenericCallOp>(op)) {
if (auto callOp = op->dyn_cast<GenericCallOp>()) {
auto calleeName = callOp.getCalleeName();
auto *callee = getModule().getNamedFunction(calleeName);
if (!callee) {

View File

@ -53,7 +53,7 @@ struct SimplifyRedundantTranspose : public mlir::RewritePattern {
// Look through the input of the current transpose.
mlir::Value *transposeInput = transpose.getOperand();
TransposeOp transposeInputOp =
llvm::dyn_cast_or_null<TransposeOp>(transposeInput->getDefiningOp());
mlir::dyn_cast_or_null<TransposeOp>(transposeInput->getDefiningOp());
// If the input is defined by another Transpose, bingo!
if (!transposeInputOp)
return matchFailure();
@ -75,7 +75,7 @@ struct SimplifyReshapeConstant : public mlir::RewritePattern {
mlir::PatternRewriter &rewriter) const override {
ReshapeOp reshape = op->cast<ReshapeOp>();
// Look through the input of the current reshape.
ConstantOp constantOp = llvm::dyn_cast_or_null<ConstantOp>(
ConstantOp constantOp = mlir::dyn_cast_or_null<ConstantOp>(
reshape.getOperand()->getDefiningOp());
// If the input is defined by another constant, bingo!
if (!constantOp)

View File

@ -366,7 +366,7 @@ struct LateLoweringPass : public ModulePass<LateLoweringPass> {
// First patch calls type to return memref instead of ToyArray
for (auto &function : getModule()) {
function.walk([&](Operation *op) {
auto callOp = dyn_cast<CallOp>(op);
auto callOp = op->dyn_cast<CallOp>();
if (!callOp)
return;
if (!callOp.getNumResults())
@ -382,14 +382,14 @@ struct LateLoweringPass : public ModulePass<LateLoweringPass> {
for (auto &function : getModule()) {
function.walk([&](Operation *op) {
// Turns toy.alloc into sequence of alloc/dealloc (later malloc/free).
if (auto allocOp = dyn_cast<toy::AllocOp>(op)) {
if (auto allocOp = op->dyn_cast<toy::AllocOp>()) {
auto result = allocTensor(allocOp);
allocOp.replaceAllUsesWith(result);
allocOp.erase();
return;
}
// Eliminate all type.cast before lowering to LLVM.
if (auto typeCastOp = dyn_cast<toy::TypeCastOp>(op)) {
if (auto typeCastOp = op->dyn_cast<toy::TypeCastOp>()) {
typeCastOp.replaceAllUsesWith(typeCastOp.getOperand());
typeCastOp.erase();
return;
@ -429,7 +429,7 @@ struct LateLoweringPass : public ModulePass<LateLoweringPass> {
// Insert a `dealloc` operation right before the `return` operations, unless
// it is returned itself in which case the caller is responsible for it.
builder.getFunction()->walk([&](Operation *op) {
auto returnOp = dyn_cast<ReturnOp>(op);
auto returnOp = op->dyn_cast<ReturnOp>();
if (!returnOp)
return;
if (returnOp.getNumOperands() && returnOp.getOperand(0) == alloc)

View File

@ -238,7 +238,7 @@ public:
LLVM_DEBUG(llvm::dbgs() << "Inferring shape for: " << *op << "\n");
// The add operation is trivial: propagate the input type as is.
if (auto addOp = llvm::dyn_cast<AddOp>(op)) {
if (auto addOp = op->dyn_cast<AddOp>()) {
op->getResult(0)->setType(op->getOperand(0)->getType());
continue;
}
@ -261,7 +261,7 @@ public:
// catch it but shape inference earlier in the pass could generate an
// invalid IR (from an invalid Toy input of course) and we wouldn't want
// to crash here.
if (auto mulOp = llvm::dyn_cast<MulOp>(op)) {
if (auto mulOp = op->dyn_cast<MulOp>()) {
auto lhs = mulOp.getLHS()->getType().cast<ToyArrayType>();
auto rhs = mulOp.getRHS()->getType().cast<ToyArrayType>();
auto lhsRank = lhs.getShape().size();
@ -295,7 +295,7 @@ public:
// for this function, queue the callee in the inter-procedural work list,
// and return. The current function stays in the work list and will
// restart after the callee is processed.
if (auto callOp = llvm::dyn_cast<GenericCallOp>(op)) {
if (auto callOp = op->dyn_cast<GenericCallOp>()) {
auto calleeName = callOp.getCalleeName();
auto *callee = getModule().getNamedFunction(calleeName);
if (!callee) {

View File

@ -439,7 +439,7 @@ ValueHandle ValueHandle::create(Args... args) {
if (op->getNumResults() == 1) {
return ValueHandle(op->getResult(0));
} else if (op->getNumResults() == 0) {
if (auto f = dyn_cast<AffineForOp>(op)) {
if (auto f = op->dyn_cast<AffineForOp>()) {
return ValueHandle(f.getInductionVar());
}
}

View File

@ -271,7 +271,7 @@ public:
OperationState state(getContext(), location, OpTy::getOperationName());
OpTy::build(this, &state, args...);
auto *op = createOperation(state);
auto result = dyn_cast<OpTy>(op);
auto result = op->dyn_cast<OpTy>();
assert(result && "Builder didn't return the right type");
return result;
}

View File

@ -116,7 +116,7 @@ public:
/// Specialization of walk to only visit operations of 'OpTy'.
template <typename OpTy> void walk(std::function<void(OpTy)> callback) {
walk([&](Operation *opInst) {
if (auto op = dyn_cast<OpTy>(opInst))
if (auto op = opInst->dyn_cast<OpTy>())
callback(op);
});
}

View File

@ -792,7 +792,7 @@ public:
/// This is the hook used by the AsmPrinter to emit this to the .mlir file.
/// Op implementations should provide a print method.
static void printAssembly(Operation *op, OpAsmPrinter *p) {
auto opPointer = dyn_cast<ConcreteType>(op);
auto opPointer = op->dyn_cast<ConcreteType>();
assert(opPointer &&
"op's name does not match name of concrete type instantiated with");
opPointer.print(p);
@ -825,13 +825,11 @@ public:
/// This is a public constructor. Any op can be initialized to null.
explicit Op() : OpState(nullptr) {}
Op(std::nullptr_t) : OpState(nullptr) {}
/// This is a public constructor to enable access via the llvm::cast family of
/// methods. This should not be used directly.
explicit Op(Operation *state) : OpState(state) {
assert(!state || isa<ConcreteOpType>(state));
}
protected:
/// This is a private constructor only accessible through the
/// Operation::cast family of methods.
explicit Op(Operation *state) : OpState(state) {}
friend class Operation;
private:

View File

@ -389,6 +389,14 @@ public:
// Conversions to declared operations like DimOp
//===--------------------------------------------------------------------===//
/// The dyn_cast methods perform a dynamic cast from an Operation to a typed
/// Op like DimOp. This returns a null Op on failure.
template <typename OpClass> OpClass dyn_cast() {
if (isa<OpClass>())
return cast<OpClass>();
return OpClass();
}
/// The cast methods perform a cast from an Operation to a typed Op like
/// DimOp. This aborts if the parameter to the template isn't an instance of
/// the template type argument.
@ -409,10 +417,10 @@ public:
/// including this one.
void walk(const std::function<void(Operation *)> &callback);
/// Specialization of walk to only visit operations of 'T'.
template <typename T> void walk(std::function<void(T)> callback) {
/// Specialization of walk to only visit operations of 'OpTy'.
template <typename OpTy> void walk(std::function<void(OpTy)> callback) {
walk([&](Operation *op) {
if (auto derivedOp = dyn_cast<T>(op))
if (auto derivedOp = op->dyn_cast<OpTy>())
callback(derivedOp);
});
}
@ -526,6 +534,17 @@ inline auto Operation::getOperands() -> operand_range {
return {operand_begin(), operand_end()};
}
/// Provide dyn_cast_or_null functionality for Operation casts.
template <typename T> T dyn_cast_or_null(Operation *op) {
return op ? op->dyn_cast<T>() : T();
}
/// Provide isa_and_nonnull functionality for Operation casts, i.e. if the
/// operation is non-null and a class of 'T'.
template <typename T> bool isa_and_nonnull(Operation *op) {
return op && op->isa<T>();
}
/// This class implements the result iterators for the Operation class
/// in terms of getResult(idx).
class ResultIterator final
@ -579,30 +598,4 @@ inline auto Operation::getResultTypes()
} // end namespace mlir
namespace llvm {
/// Provide isa functionality for operation casts.
template <typename T> struct isa_impl<T, ::mlir::Operation> {
static inline bool doit(const ::mlir::Operation &op) {
return T::classof(const_cast<::mlir::Operation *>(&op));
}
};
/// Provide specializations for operation casts as the resulting T is value
/// typed.
template <typename T> struct cast_retty_impl<T, ::mlir::Operation *> {
using ret_type = T;
};
template <typename T> struct cast_retty_impl<T, ::mlir::Operation> {
using ret_type = T;
};
template <class T>
struct cast_convert_val<T, ::mlir::Operation, ::mlir::Operation> {
static T doit(::mlir::Operation &val) { return T(&val); }
};
template <class T>
struct cast_convert_val<T, ::mlir::Operation *, ::mlir::Operation *> {
static T doit(::mlir::Operation *val) { return T(val); }
};
} // end namespace llvm
#endif // MLIR_IR_OPERATION_H

View File

@ -215,7 +215,7 @@ public:
OperationState state(getContext(), location, OpTy::getOperationName());
OpTy::build(this, &state, args...);
auto *op = createOperation(state);
auto result = dyn_cast<OpTy>(op);
auto result = op->dyn_cast<OpTy>();
assert(result && "Builder didn't return the right type");
return result;
}
@ -231,7 +231,7 @@ public:
// If the Operation we produce is valid, return it.
if (!OpTy::verifyInvariants(op)) {
auto result = dyn_cast<OpTy>(op);
auto result = op->dyn_cast<OpTy>();
assert(result && "Builder didn't return the right type");
return result;
}

View File

@ -69,7 +69,6 @@ using llvm::cast_or_null;
using llvm::dyn_cast;
using llvm::dyn_cast_or_null;
using llvm::isa;
using llvm::isa_and_nonnull;
// Containers.
using llvm::ArrayRef;

View File

@ -61,11 +61,11 @@ bool mlir::isValidDim(Value *value) {
if (op->getParentOp() == nullptr || op->isa<ConstantOp>())
return true;
// Affine apply operation is ok if all of its operands are ok.
if (auto applyOp = dyn_cast<AffineApplyOp>(op))
if (auto applyOp = op->dyn_cast<AffineApplyOp>())
return applyOp.isValidDim();
// The dim op is okay if its operand memref/tensor is defined at the top
// level.
if (auto dimOp = dyn_cast<DimOp>(op))
if (auto dimOp = op->dyn_cast<DimOp>())
return isTopLevelSymbol(dimOp.getOperand());
return false;
}
@ -86,11 +86,11 @@ bool mlir::isValidSymbol(Value *value) {
if (op->getParentOp() == nullptr || op->isa<ConstantOp>())
return true;
// Affine apply operation is ok if all of its operands are ok.
if (auto applyOp = dyn_cast<AffineApplyOp>(op))
if (auto applyOp = op->dyn_cast<AffineApplyOp>())
return applyOp.isValidSymbol();
// The dim op is okay if its operand memref/tensor is defined at the top
// level.
if (auto dimOp = dyn_cast<DimOp>(op))
if (auto dimOp = op->dyn_cast<DimOp>())
return isTopLevelSymbol(dimOp.getOperand());
return false;
}

View File

@ -320,8 +320,8 @@ isVectorizableLoopBodyWithOpCond(AffineForOp loop,
loadAndStores.match(forOp, &loadAndStoresMatched);
for (auto ls : loadAndStoresMatched) {
auto *op = ls.getMatchedOperation();
auto load = dyn_cast<LoadOp>(op);
auto store = dyn_cast<StoreOp>(op);
auto load = op->dyn_cast<LoadOp>();
auto store = op->dyn_cast<StoreOp>();
// Only scalar types are considered vectorizable, all load/store must be
// vectorizable for a loop to qualify as vectorizable.
// TODO(ntv): ponder whether we want to be more general here.
@ -338,8 +338,8 @@ isVectorizableLoopBodyWithOpCond(AffineForOp loop,
bool mlir::isVectorizableLoopBody(AffineForOp loop, int *memRefDim) {
VectorizableOpFun fun([memRefDim](AffineForOp loop, Operation &op) {
auto load = dyn_cast<LoadOp>(op);
auto store = dyn_cast<StoreOp>(op);
auto load = op.dyn_cast<LoadOp>();
auto store = op.dyn_cast<StoreOp>();
return load ? isContiguousAccess(loop.getInductionVar(), load, memRefDim)
: isContiguousAccess(loop.getInductionVar(), store, memRefDim);
});

View File

@ -48,9 +48,9 @@ FunctionPassBase *mlir::createMemRefBoundCheckPass() {
void MemRefBoundCheck::runOnFunction() {
getFunction().walk([](Operation *opInst) {
if (auto loadOp = dyn_cast<LoadOp>(opInst)) {
if (auto loadOp = opInst->dyn_cast<LoadOp>()) {
boundCheckLoadOrStoreOp(loadOp);
} else if (auto storeOp = dyn_cast<StoreOp>(opInst)) {
} else if (auto storeOp = opInst->dyn_cast<StoreOp>()) {
boundCheckLoadOrStoreOp(storeOp);
}
// TODO(bondhugula): do this for DMA ops as well.

View File

@ -50,7 +50,7 @@ static void getForwardSliceImpl(Operation *op,
return;
}
if (auto forOp = dyn_cast<AffineForOp>(op)) {
if (auto forOp = op->dyn_cast<AffineForOp>()) {
for (auto &u : forOp.getInductionVar()->getUses()) {
auto *ownerInst = u.getOwner();
if (forwardSlice->count(ownerInst) == 0) {

View File

@ -44,7 +44,7 @@ void mlir::getLoopIVs(Operation &op, SmallVectorImpl<AffineForOp> *loops) {
AffineForOp currAffineForOp;
// Traverse up the hierarchy collecing all 'affine.for' operation while
// skipping over 'affine.if' operations.
while (currOp && ((currAffineForOp = dyn_cast<AffineForOp>(currOp)) ||
while (currOp && ((currAffineForOp = currOp->dyn_cast<AffineForOp>()) ||
currOp->isa<AffineIfOp>())) {
if (currAffineForOp)
loops->push_back(currAffineForOp);
@ -239,7 +239,7 @@ LogicalResult MemRefRegion::compute(Operation *op, unsigned loopDepth,
assert(isValidSymbol(symbol));
// Check if the symbol is a constant.
if (auto *op = symbol->getDefiningOp()) {
if (auto constOp = dyn_cast<ConstantIndexOp>(op)) {
if (auto constOp = op->dyn_cast<ConstantIndexOp>()) {
cst.setIdToConstant(*symbol, constOp.getValue());
}
}
@ -467,7 +467,7 @@ static Operation *getInstAtPosition(ArrayRef<unsigned> positions,
}
if (level == positions.size() - 1)
return &op;
if (auto childAffineForOp = dyn_cast<AffineForOp>(op))
if (auto childAffineForOp = op.dyn_cast<AffineForOp>())
return getInstAtPosition(positions, level + 1,
childAffineForOp.getBody());
@ -633,7 +633,7 @@ mlir::insertBackwardComputationSlice(Operation *srcOpInst, Operation *dstOpInst,
// Constructs MemRefAccess populating it with the memref, its indices and
// opinst from 'loadOrStoreOpInst'.
MemRefAccess::MemRefAccess(Operation *loadOrStoreOpInst) {
if (auto loadOp = dyn_cast<LoadOp>(loadOrStoreOpInst)) {
if (auto loadOp = loadOrStoreOpInst->dyn_cast<LoadOp>()) {
memref = loadOp.getMemRef();
opInst = loadOrStoreOpInst;
auto loadMemrefType = loadOp.getMemRefType();
@ -643,7 +643,7 @@ MemRefAccess::MemRefAccess(Operation *loadOrStoreOpInst) {
}
} else {
assert(loadOrStoreOpInst->isa<StoreOp>() && "load/store op expected");
auto storeOp = dyn_cast<StoreOp>(loadOrStoreOpInst);
auto storeOp = loadOrStoreOpInst->dyn_cast<StoreOp>();
opInst = loadOrStoreOpInst;
memref = storeOp.getMemRef();
auto storeMemrefType = storeOp.getMemRefType();
@ -750,7 +750,7 @@ Optional<int64_t> mlir::getMemoryFootprintBytes(AffineForOp forOp,
void mlir::getSequentialLoops(
AffineForOp forOp, llvm::SmallDenseSet<Value *, 8> *sequentialLoops) {
forOp.getOperation()->walk([&](Operation *op) {
if (auto innerFor = dyn_cast<AffineForOp>(op))
if (auto innerFor = op->dyn_cast<AffineForOp>())
if (!isLoopParallel(innerFor))
sequentialLoops->insert(innerFor.getInductionVar());
});

View File

@ -152,7 +152,7 @@ static SetVector<Operation *> getParentsOfType(Operation *op) {
SetVector<Operation *> res;
auto *current = op;
while (auto *parent = current->getParentOp()) {
if (auto typedParent = dyn_cast<T>(parent)) {
if (auto typedParent = parent->template dyn_cast<T>()) {
assert(res.count(parent) == 0 && "Already inserted");
res.insert(parent);
}
@ -177,7 +177,7 @@ AffineMap mlir::makePermutationMap(
}
}
if (auto load = dyn_cast<LoadOp>(op)) {
if (auto load = op->dyn_cast<LoadOp>()) {
return ::makePermutationMap(load.getIndices(), enclosingLoopToVectorDim);
}
@ -198,10 +198,10 @@ bool mlir::matcher::operatesOnSuperVectorsOf(Operation &op,
/// do not have to special case. Maybe a trait, or just a method, unclear atm.
bool mustDivide = false;
VectorType superVectorType;
if (auto read = dyn_cast<VectorTransferReadOp>(op)) {
if (auto read = op.dyn_cast<VectorTransferReadOp>()) {
superVectorType = read.getResultType();
mustDivide = true;
} else if (auto write = dyn_cast<VectorTransferWriteOp>(op)) {
} else if (auto write = op.dyn_cast<VectorTransferWriteOp>()) {
superVectorType = write.getVectorType();
mustDivide = true;
} else if (op.getNumResults() == 0) {

View File

@ -100,7 +100,7 @@ ValueHandle ValueHandle::create(StringRef name, ArrayRef<ValueHandle> operands,
if (op->getNumResults() == 1) {
return ValueHandle(op->getResult(0));
}
if (auto f = dyn_cast<AffineForOp>(op)) {
if (auto f = op->dyn_cast<AffineForOp>()) {
return ValueHandle(f.getInductionVar());
}
llvm_unreachable("unsupported operation, use an OperationHandle instead");
@ -147,8 +147,8 @@ static llvm::Optional<ValueHandle> emitStaticFor(ArrayRef<ValueHandle> lbs,
if (!lbDef || !ubDef)
return llvm::Optional<ValueHandle>();
auto lbConst = dyn_cast<ConstantIndexOp>(lbDef);
auto ubConst = dyn_cast<ConstantIndexOp>(ubDef);
auto lbConst = lbDef->dyn_cast<ConstantIndexOp>();
auto ubConst = ubDef->dyn_cast<ConstantIndexOp>();
if (!lbConst || !ubConst)
return llvm::Optional<ValueHandle>();

View File

@ -319,11 +319,11 @@ static LogicalResult tileLinalgOp(LinalgOp &op, ArrayRef<int64_t> tileSizes,
// TODO(ntv) expose as a primitive for other passes.
static LogicalResult tileLinalgOp(Operation *op, ArrayRef<int64_t> tileSizes,
PerFunctionState &state) {
if (auto matmulOp = dyn_cast<MatmulOp>(op)) {
if (auto matmulOp = op->dyn_cast<MatmulOp>()) {
return tileLinalgOp(matmulOp, tileSizes, state);
} else if (auto matvecOp = dyn_cast<MatvecOp>(op)) {
} else if (auto matvecOp = op->dyn_cast<MatvecOp>()) {
return tileLinalgOp(matvecOp, tileSizes, state);
} else if (auto dotOp = dyn_cast<DotOp>(op)) {
} else if (auto dotOp = op->dyn_cast<DotOp>()) {
return tileLinalgOp(dotOp, tileSizes, state);
}
return failure();

View File

@ -68,9 +68,9 @@ ValueHandle LoopNestRangeBuilder::LoopNestRangeBuilder::operator()(
SmallVector<Value *, 8> mlir::getRanges(Operation *op) {
SmallVector<Value *, 8> res;
if (auto view = dyn_cast<ViewOp>(op)) {
if (auto view = op->dyn_cast<ViewOp>()) {
res.append(view.getIndexings().begin(), view.getIndexings().end());
} else if (auto slice = dyn_cast<SliceOp>(op)) {
} else if (auto slice = op->dyn_cast<SliceOp>()) {
for (auto *i : slice.getIndexings())
if (i->getType().isa<RangeType>())
res.push_back(i);
@ -100,7 +100,7 @@ SmallVector<Value *, 8> mlir::getRanges(Operation *op) {
Value *mlir::createOrReturnView(FuncBuilder *b, Location loc,
Operation *viewDefiningOp,
ArrayRef<Value *> ranges) {
if (auto view = dyn_cast<ViewOp>(viewDefiningOp)) {
if (auto view = viewDefiningOp->dyn_cast<ViewOp>()) {
auto indexings = view.getIndexings();
if (std::equal(indexings.begin(), indexings.end(), ranges.begin()))
return view.getResult();

View File

@ -134,7 +134,7 @@ struct MemRefCastFolder : public RewritePattern {
void rewrite(Operation *op, PatternRewriter &rewriter) const override {
for (unsigned i = 0, e = op->getNumOperands(); i != e; ++i)
if (auto *memref = op->getOperand(i)->getDefiningOp())
if (auto cast = dyn_cast<MemRefCastOp>(memref))
if (auto cast = memref->dyn_cast<MemRefCastOp>())
op->setOperand(i, cast.getOperand());
rewriter.updatedRootInPlace(op);
}

View File

@ -199,11 +199,11 @@ bool ModuleTranslation::convertOperation(Operation &opInst,
// Emit branches. We need to look up the remapped blocks and ignore the block
// arguments that were transformed into PHI nodes.
if (auto brOp = dyn_cast<LLVM::BrOp>(opInst)) {
if (auto brOp = opInst.dyn_cast<LLVM::BrOp>()) {
builder.CreateBr(blockMapping[brOp.getSuccessor(0)]);
return false;
}
if (auto condbrOp = dyn_cast<LLVM::CondBrOp>(opInst)) {
if (auto condbrOp = opInst.dyn_cast<LLVM::CondBrOp>()) {
builder.CreateCondBr(valueMapping.lookup(condbrOp.getOperand(0)),
blockMapping[condbrOp.getSuccessor(0)],
blockMapping[condbrOp.getSuccessor(1)]);
@ -264,7 +264,7 @@ static Value *getPHISourceValue(Block *current, Block *pred,
// For conditional branches, we need to check if the current block is reached
// through the "true" or the "false" branch and take the relevant operands.
auto condBranchOp = dyn_cast<LLVM::CondBrOp>(terminator);
auto condBranchOp = terminator.dyn_cast<LLVM::CondBrOp>();
assert(condBranchOp &&
"only branch operations can be terminators of a block that "
"has successors");

View File

@ -173,11 +173,11 @@ static void getMultiLevelStrides(const MemRefRegion &region,
static bool getFullMemRefAsRegion(Operation *opInst, unsigned numParamLoopIVs,
MemRefRegion *region) {
unsigned rank;
if (auto loadOp = dyn_cast<LoadOp>(opInst)) {
if (auto loadOp = opInst->dyn_cast<LoadOp>()) {
rank = loadOp.getMemRefType().getRank();
region->memref = loadOp.getMemRef();
region->setWrite(false);
} else if (auto storeOp = dyn_cast<StoreOp>(opInst)) {
} else if (auto storeOp = opInst->dyn_cast<StoreOp>()) {
rank = storeOp.getMemRefType().getRank();
region->memref = storeOp.getMemRef();
region->setWrite(true);
@ -483,7 +483,7 @@ bool DmaGeneration::runOnBlock(Block *block) {
});
for (auto it = curBegin; it != block->end(); ++it) {
if (auto forOp = dyn_cast<AffineForOp>(&*it)) {
if (auto forOp = it->dyn_cast<AffineForOp>()) {
// Returns true if the footprint is known to exceed capacity.
auto exceedsCapacity = [&](AffineForOp forOp) {
Optional<int64_t> footprint =
@ -607,10 +607,10 @@ uint64_t DmaGeneration::runOnBlock(Block::iterator begin, Block::iterator end) {
// Walk this range of operations to gather all memory regions.
block->walk(begin, end, [&](Operation *opInst) {
// Gather regions to allocate to buffers in faster memory space.
if (auto loadOp = dyn_cast<LoadOp>(opInst)) {
if (auto loadOp = opInst->dyn_cast<LoadOp>()) {
if (loadOp.getMemRefType().getMemorySpace() != slowMemorySpace)
return;
} else if (auto storeOp = dyn_cast<StoreOp>(opInst)) {
} else if (auto storeOp = opInst->dyn_cast<StoreOp>()) {
if (storeOp.getMemRefType().getMemorySpace() != slowMemorySpace)
return;
} else {
@ -739,7 +739,7 @@ uint64_t DmaGeneration::runOnBlock(Block::iterator begin, Block::iterator end) {
// For a range of operations, a note will be emitted at the caller.
AffineForOp forOp;
uint64_t sizeInKib = llvm::divideCeil(totalDmaBuffersSizeInBytes, 1024);
if (llvm::DebugFlag && (forOp = dyn_cast<AffineForOp>(&*begin))) {
if (llvm::DebugFlag && (forOp = begin->dyn_cast<AffineForOp>())) {
forOp.emitRemark()
<< sizeInKib
<< " KiB of DMA buffers in fast memory space for this block\n";

View File

@ -644,7 +644,7 @@ bool MemRefDependenceGraph::init(Function &f) {
DenseMap<Operation *, unsigned> forToNodeMap;
for (auto &op : f.front()) {
if (auto forOp = dyn_cast<AffineForOp>(op)) {
if (auto forOp = op.dyn_cast<AffineForOp>()) {
// Create graph node 'id' to represent top-level 'forOp' and record
// all loads and store accesses it contains.
LoopNestStateCollector collector;
@ -666,14 +666,14 @@ bool MemRefDependenceGraph::init(Function &f) {
}
forToNodeMap[&op] = node.id;
nodes.insert({node.id, node});
} else if (auto loadOp = dyn_cast<LoadOp>(op)) {
} else if (auto loadOp = op.dyn_cast<LoadOp>()) {
// Create graph node for top-level load op.
Node node(nextNodeId++, &op);
node.loads.push_back(&op);
auto *memref = op.cast<LoadOp>().getMemRef();
memrefAccesses[memref].insert(node.id);
nodes.insert({node.id, node});
} else if (auto storeOp = dyn_cast<StoreOp>(op)) {
} else if (auto storeOp = op.dyn_cast<StoreOp>()) {
// Create graph node for top-level store op.
Node node(nextNodeId++, &op);
node.stores.push_back(&op);
@ -2125,7 +2125,7 @@ public:
auto *fn = dstNode->op->getFunction();
for (unsigned i = 0, e = fn->getNumArguments(); i != e; ++i) {
for (auto &use : fn->getArgument(i)->getUses()) {
if (auto loadOp = dyn_cast<LoadOp>(use.getOwner())) {
if (auto loadOp = use.getOwner()->dyn_cast<LoadOp>()) {
// Gather loops surrounding 'use'.
SmallVector<AffineForOp, 4> loops;
getLoopIVs(*use.getOwner(), &loops);

View File

@ -273,7 +273,7 @@ static void getTileableBands(Function &f,
for (auto &block : f)
for (auto &op : block)
if (auto forOp = dyn_cast<AffineForOp>(op))
if (auto forOp = op.dyn_cast<AffineForOp>())
getMaximalPerfectLoopNest(forOp);
}

View File

@ -92,7 +92,7 @@ void LoopUnrollAndJam::runOnFunction() {
// unroll-and-jammed by this pass. However, runOnAffineForOp can be called on
// any for operation.
auto &entryBlock = getFunction().front();
if (auto forOp = dyn_cast<AffineForOp>(entryBlock.front()))
if (auto forOp = entryBlock.front().dyn_cast<AffineForOp>())
runOnAffineForOp(forOp);
}

View File

@ -620,10 +620,10 @@ void LowerAffinePass::runOnFunction() {
// Rewrite all of the ifs and fors. We walked the operations in postorders,
// so we know that we will rewrite them in the reverse order.
for (auto *op : llvm::reverse(instsToRewrite)) {
if (auto ifOp = dyn_cast<AffineIfOp>(op)) {
if (auto ifOp = op->dyn_cast<AffineIfOp>()) {
if (lowerAffineIf(ifOp))
return signalPassFailure();
} else if (auto forOp = dyn_cast<AffineForOp>(op)) {
} else if (auto forOp = op->dyn_cast<AffineForOp>()) {
if (lowerAffineFor(forOp))
return signalPassFailure();
} else if (lowerAffineApply(op->cast<AffineApplyOp>())) {

View File

@ -556,12 +556,12 @@ static bool instantiateMaterialization(Operation *op,
if (op->getNumRegions() != 0)
return op->emitError("NYI path Op with region"), true;
if (auto write = dyn_cast<VectorTransferWriteOp>(op)) {
if (auto write = op->dyn_cast<VectorTransferWriteOp>()) {
auto *clone = instantiate(&b, write, state->hwVectorType,
state->hwVectorInstance, state->substitutionsMap);
return clone == nullptr;
}
if (auto read = dyn_cast<VectorTransferReadOp>(op)) {
if (auto read = op->dyn_cast<VectorTransferReadOp>()) {
auto *clone = instantiate(&b, read, state->hwVectorType,
state->hwVectorInstance, state->substitutionsMap);
if (!clone) {

View File

@ -103,7 +103,7 @@ void MemRefDataFlowOpt::forwardStoreToLoad(LoadOp loadOp) {
SmallVector<Operation *, 8> storeOps;
unsigned minSurroundingLoops = getNestingDepth(*loadOpInst);
for (auto &use : loadOp.getMemRef()->getUses()) {
auto storeOp = dyn_cast<StoreOp>(use.getOwner());
auto storeOp = use.getOwner()->dyn_cast<StoreOp>();
if (!storeOp)
continue;
auto *storeOpInst = storeOp.getOperation();

View File

@ -181,7 +181,7 @@ static void findMatchingStartFinishInsts(
// Collect outgoing DMA operations - needed to check for dependences below.
SmallVector<DmaStartOp, 4> outgoingDmaOps;
for (auto &op : *forOp.getBody()) {
auto dmaStartOp = dyn_cast<DmaStartOp>(op);
auto dmaStartOp = op.dyn_cast<DmaStartOp>();
if (dmaStartOp && dmaStartOp.isSrcMemorySpaceFaster())
outgoingDmaOps.push_back(dmaStartOp);
}
@ -193,7 +193,7 @@ static void findMatchingStartFinishInsts(
dmaFinishInsts.push_back(&op);
continue;
}
auto dmaStartOp = dyn_cast<DmaStartOp>(op);
auto dmaStartOp = op.dyn_cast<DmaStartOp>();
if (!dmaStartOp)
continue;

View File

@ -48,7 +48,7 @@ void TestConstantFold::foldOperation(Operation *op,
}
// If this op is a constant that are used and cannot be de-duplicated,
// remember it for cleanup later.
else if (auto constant = dyn_cast<ConstantOp>(op)) {
else if (auto constant = op->dyn_cast<ConstantOp>()) {
existingConstants.push_back(op);
}
}

View File

@ -40,7 +40,7 @@ bool ConstantFoldHelper::tryToConstantFold(
// into the value it contains. We need to consider constants before the
// constant folding logic to avoid re-creating the same constant later.
// TODO: Extend to support dialect-specific constant ops.
if (auto constant = dyn_cast<ConstantOp>(op)) {
if (auto constant = op->dyn_cast<ConstantOp>()) {
// If this constant is dead, update bookkeeping and signal the caller.
if (constant.use_empty()) {
notifyRemoval(op);

View File

@ -363,7 +363,7 @@ void mlir::getPerfectlyNestedLoops(SmallVectorImpl<AffineForOp> &nestedLoops,
nestedLoops.push_back(curr);
auto *currBody = curr.getBody();
while (currBody->begin() == std::prev(currBody->end(), 2) &&
(curr = dyn_cast<AffineForOp>(curr.getBody()->front()))) {
(curr = curr.getBody()->front().dyn_cast<AffineForOp>())) {
nestedLoops.push_back(curr);
currBody = curr.getBody();
}

View File

@ -234,7 +234,7 @@ void VectorizerTestPass::testComposeMaps(llvm::raw_ostream &outs) {
static bool affineApplyOp(Operation &op) { return op.isa<AffineApplyOp>(); }
static bool singleResultAffineApplyOpWithoutUses(Operation &op) {
auto app = dyn_cast<AffineApplyOp>(op);
auto app = op.dyn_cast<AffineApplyOp>();
return app && app.use_empty();
}

View File

@ -839,8 +839,8 @@ static LogicalResult vectorizeAffineForOp(AffineForOp loop, int64_t step,
loadAndStores.match(loop.getOperation(), &loadAndStoresMatches);
for (auto ls : loadAndStoresMatches) {
auto *opInst = ls.getMatchedOperation();
auto load = dyn_cast<LoadOp>(opInst);
auto store = dyn_cast<StoreOp>(opInst);
auto load = opInst->dyn_cast<LoadOp>();
auto store = opInst->dyn_cast<StoreOp>();
LLVM_DEBUG(opInst->print(dbgs()));
LogicalResult result =
load ? vectorizeRootOrTerminal(loop.getInductionVar(), load, state)
@ -982,7 +982,7 @@ static Value *vectorizeOperand(Value *operand, Operation *op,
return nullptr;
}
// 3. vectorize constant.
if (auto constant = dyn_cast<ConstantOp>(operand->getDefiningOp())) {
if (auto constant = operand->getDefiningOp()->dyn_cast<ConstantOp>()) {
return vectorizeConstant(
op, constant,
VectorType::get(state->strategy->vectorSizes, operand->getType()));
@ -1012,7 +1012,7 @@ static Operation *vectorizeOneOperation(Operation *opInst,
assert(!opInst->isa<VectorTransferWriteOp>() &&
"vector.transfer_write cannot be further vectorized");
if (auto store = dyn_cast<StoreOp>(opInst)) {
if (auto store = opInst->dyn_cast<StoreOp>()) {
auto *memRef = store.getMemRef();
auto *value = store.getValueToStore();
auto *vectorValue = vectorizeOperand(value, opInst, state);

View File

@ -161,8 +161,8 @@ static bool emitOneBuilder(const Record &record, raw_ostream &os) {
}
// Output the check and the rewritten builder string.
os << "if (auto op = dyn_cast<" << op.getQualCppClassName()
<< ">(opInst)) {\n";
os << "if (auto op = opInst.dyn_cast<" << op.getQualCppClassName()
<< ">()) {\n";
os << bs.str() << builderStrRef << "\n";
os << " return false;\n";
os << "}\n";