Replace remaining usages of the Instruction class with Operation.

PiperOrigin-RevId: 240777521
This commit is contained in:
River Riddle 2019-03-28 08:24:38 -07:00 committed by jpienaar
parent 31442a66ef
commit af9760fe18
40 changed files with 335 additions and 347 deletions

View File

@ -16,7 +16,7 @@
// =============================================================================
//
// This file defines convenience types for working with Affine operations
// in the MLIR instruction set.
// in the MLIR operation set.
//
//===----------------------------------------------------------------------===//
@ -242,13 +242,13 @@ AffineForOp getForInductionVarOwner(Value *val);
void extractForInductionVars(ArrayRef<AffineForOp> forInsts,
SmallVectorImpl<Value *> *ivs);
/// AffineBound represents a lower or upper bound in the for instruction.
/// AffineBound represents a lower or upper bound in the for operation.
/// This class does not own the underlying operands. Instead, it refers
/// to the operands stored in the AffineForOp. Its life span should not exceed
/// that of the for instruction it refers to.
/// that of the for operation it refers to.
class AffineBound {
public:
AffineForOp getAffineForOp() { return inst; }
AffineForOp getAffineForOp() { return op; }
AffineMap getMap() { return map; }
/// Returns an AffineValueMap representing this bound.
@ -256,27 +256,27 @@ public:
unsigned getNumOperands() { return opEnd - opStart; }
Value *getOperand(unsigned idx) {
return inst.getOperation()->getOperand(opStart + idx);
return op.getOperation()->getOperand(opStart + idx);
}
using operand_iterator = AffineForOp::operand_iterator;
using operand_range = AffineForOp::operand_range;
operand_iterator operand_begin() { return inst.operand_begin() + opStart; }
operand_iterator operand_end() { return inst.operand_begin() + opEnd; }
operand_iterator operand_begin() { return op.operand_begin() + opStart; }
operand_iterator operand_end() { return op.operand_begin() + opEnd; }
operand_range getOperands() { return {operand_begin(), operand_end()}; }
private:
// 'affine.for' instruction that contains this bound.
AffineForOp inst;
// 'affine.for' operation that contains this bound.
AffineForOp op;
// Start and end positions of this affine bound operands in the list of
// the containing 'affine.for' instruction operands.
// the containing 'affine.for' operation operands.
unsigned opStart, opEnd;
// Affine map for this bound.
AffineMap map;
AffineBound(AffineForOp inst, unsigned opStart, unsigned opEnd, AffineMap map)
: inst(inst), opStart(opStart), opEnd(opEnd), map(map) {}
AffineBound(AffineForOp op, unsigned opStart, unsigned opEnd, AffineMap map)
: op(op), opStart(opStart), opEnd(opEnd), map(map) {}
friend class AffineForOp;
};
@ -342,7 +342,7 @@ public:
static StringRef getOperationName() { return "affine.terminator"; }
private:
friend Instruction;
friend Operation;
};
/// Returns true if the given Value can be used as a dimension id.

View File

@ -100,7 +100,7 @@ bool isVectorizableLoopBody(AffineForOp loop);
bool isVectorizableLoopBodyAlongFastestVaryingMemRefDim(
AffineForOp loop, unsigned fastestVaryingDim);
/// Checks where SSA dominance would be violated if a for inst's body
/// Checks where SSA dominance would be violated if a for op's body
/// operations are shifted by the specified shifts. This method checks if a
/// 'def' and all its uses have the same shift factor.
// TODO(mlir-team): extend this to check for memory-based dependence

View File

@ -32,7 +32,7 @@ namespace OpTrait {
// corresponding trait classes. This avoids them being template
// instantiated/duplicated.
namespace impl {
bool verifyCompatibleOperandBroadcast(Instruction *op);
bool verifyCompatibleOperandBroadcast(Operation *op);
} // namespace impl
namespace util {
@ -78,7 +78,7 @@ template <typename ConcreteType>
class BroadcastableTwoOperandsOneResult
: public TraitBase<ConcreteType, BroadcastableTwoOperandsOneResult> {
public:
static bool verifyTrait(Instruction *op) {
static bool verifyTrait(Operation *op) {
return impl::verifyCompatibleOperandBroadcast(op);
}
};

View File

@ -60,7 +60,7 @@ public:
static Location getLocation();
private:
/// Only NestedBuilder (which is used to create an instruction with a body)
/// Only NestedBuilder (which is used to create an operation with a body)
/// may access private members in order to implement scoping.
friend class NestedBuilder;
@ -107,9 +107,9 @@ protected:
return *this;
}
/// Enter an mlir::Block and setup a ScopedContext to insert instructions at
/// Enter an mlir::Block and setup a ScopedContext to insert operations at
/// the end of it. Since we cannot use c++ language-level scoping to implement
/// scoping itself, we use enter/exit pairs of instructions.
/// scoping itself, we use enter/exit pairs of operations.
/// As a consequence we must allocate a new FuncBuilder + ScopedContext and
/// let the escape.
/// Step back "prev" times from the end of the block to set up the insertion
@ -142,7 +142,7 @@ private:
ScopedContext *bodyScope = nullptr;
};
/// A LoopBuilder is a generic NestedBuilder for loop-like MLIR instructions.
/// A LoopBuilder is a generic NestedBuilder for loop-like MLIR operations.
/// More specifically it is meant to be used as a temporary object for
/// representing any nested MLIR construct that is "related to" an mlir::Value*
/// (for now an induction variable).
@ -237,7 +237,7 @@ private:
BlockBuilder &operator=(BlockBuilder &other) = delete;
};
/// Base class for ValueHandle, InstructionHandle and BlockHandle.
/// Base class for ValueHandle, OperationHandle and BlockHandle.
/// Not meant to be used outside of these classes.
struct CapturableHandle {
protected:
@ -259,7 +259,7 @@ protected:
/// 3. constructed state,in which case it holds a Value.
///
/// A ValueHandle is meant to capture a single Value* and should be used for
/// instructions that have a single result. For convenience of use, we also
/// operations that have a single result. For convenience of use, we also
/// include AffineForOp in this category although it does not return a value.
/// In the case of AffineForOp, the captured Value* is the loop induction
/// variable.
@ -316,7 +316,7 @@ public:
static ValueHandle createComposedAffineApply(AffineMap map,
ArrayRef<Value *> operands);
/// Generic create for a named instruction producing a single value.
/// Generic create for a named operation producing a single value.
static ValueHandle create(StringRef name, ArrayRef<ValueHandle> operands,
ArrayRef<Type> resultTypes,
ArrayRef<NamedAttribute> attributes = {});
@ -329,7 +329,7 @@ public:
bool hasType() const { return t != Type(); }
Type getType() const { return t; }
Instruction *getOperation() const {
Operation *getOperation() const {
if (!v)
return nullptr;
return v->getDefiningOp();
@ -342,45 +342,44 @@ protected:
Value *v;
};
/// An InstructionHandle can be used in lieu of ValueHandle to capture the
/// instruction in cases when one does not care about, or cannot extract, a
/// unique Value* from the instruction.
/// This can be used for capturing zero result instructions as well as
/// multi-result instructions that are not supported by ValueHandle.
/// We do not distinguish further between zero and multi-result instructions at
/// An OperationHandle can be used in lieu of ValueHandle to capture the
/// operation in cases when one does not care about, or cannot extract, a
/// unique Value* from the operation.
/// This can be used for capturing zero result operations as well as
/// multi-result operations that are not supported by ValueHandle.
/// We do not distinguish further between zero and multi-result operations at
/// this time.
struct InstructionHandle : public CapturableHandle {
InstructionHandle() : inst(nullptr) {}
InstructionHandle(Instruction *inst) : inst(inst) {}
struct OperationHandle : public CapturableHandle {
OperationHandle() : op(nullptr) {}
OperationHandle(Operation *op) : op(op) {}
InstructionHandle(const InstructionHandle &) = default;
InstructionHandle &operator=(const InstructionHandle &) = default;
OperationHandle(const OperationHandle &) = default;
OperationHandle &operator=(const OperationHandle &) = default;
/// Generic mlir::Op create. This is the key to being extensible to the whole
/// of MLIR without duplicating the type system or the op definitions.
template <typename Op, typename... Args>
static InstructionHandle create(Args... args);
static OperationHandle create(Args... args);
/// Generic create for a named instruction.
static InstructionHandle create(StringRef name,
ArrayRef<ValueHandle> operands,
ArrayRef<Type> resultTypes,
ArrayRef<NamedAttribute> attributes = {});
/// Generic create for a named operation.
static OperationHandle create(StringRef name, ArrayRef<ValueHandle> operands,
ArrayRef<Type> resultTypes,
ArrayRef<NamedAttribute> attributes = {});
operator Instruction *() { return inst; }
Instruction *getOperation() const { return inst; }
operator Operation *() { return op; }
Operation *getOperation() const { return op; }
private:
Instruction *inst;
Operation *op;
};
/// Simple wrapper to build a generic instruction without successor blocks.
template <typename HandleType> struct CustomInstruction {
CustomInstruction(StringRef name) : name(name) {
/// Simple wrapper to build a generic operation without successor blocks.
template <typename HandleType> struct CustomOperation {
CustomOperation(StringRef name) : name(name) {
static_assert(std::is_same<HandleType, ValueHandle>() ||
std::is_same<HandleType, InstructionHandle>(),
"Only CustomInstruction<ValueHandle> or "
"CustomInstruction<InstructionHandle> can be constructed.");
std::is_same<HandleType, OperationHandle>(),
"Only CustomOperation<ValueHandle> or "
"CustomOperation<OperationHandle> can be constructed.");
}
HandleType operator()(ArrayRef<ValueHandle> operands = {},
ArrayRef<Type> resultTypes = {},
@ -426,26 +425,25 @@ private:
};
template <typename Op, typename... Args>
InstructionHandle InstructionHandle::create(Args... args) {
return InstructionHandle(
ScopedContext::getBuilder()
->create<Op>(ScopedContext::getLocation(), args...)
.getOperation());
OperationHandle OperationHandle::create(Args... args) {
return OperationHandle(ScopedContext::getBuilder()
->create<Op>(ScopedContext::getLocation(), args...)
.getOperation());
}
template <typename Op, typename... Args>
ValueHandle ValueHandle::create(Args... args) {
Instruction *inst = ScopedContext::getBuilder()
->create<Op>(ScopedContext::getLocation(), args...)
.getOperation();
if (inst->getNumResults() == 1) {
return ValueHandle(inst->getResult(0));
} else if (inst->getNumResults() == 0) {
if (auto f = inst->dyn_cast<AffineForOp>()) {
Operation *op = ScopedContext::getBuilder()
->create<Op>(ScopedContext::getLocation(), args...)
.getOperation();
if (op->getNumResults() == 1) {
return ValueHandle(op->getResult(0));
} else if (op->getNumResults() == 0) {
if (auto f = op->dyn_cast<AffineForOp>()) {
return ValueHandle(f.getInductionVar());
}
}
llvm_unreachable("unsupported instruction, use an InstructionHandle instead");
llvm_unreachable("unsupported operation, use an OperationHandle instead");
}
namespace op {

View File

@ -142,12 +142,12 @@ template <typename Load, typename Store> struct TemplatedIndexedValue {
/// Emits a `store`.
// NOLINTNEXTLINE: unconventional-assign-operator
InstructionHandle operator=(const TemplatedIndexedValue &rhs) {
OperationHandle operator=(const TemplatedIndexedValue &rhs) {
ValueHandle rrhs(rhs);
return Store(rrhs, getBase(), {indices.begin(), indices.end()});
}
// NOLINTNEXTLINE: unconventional-assign-operator
InstructionHandle operator=(ValueHandle rhs) {
OperationHandle operator=(ValueHandle rhs) {
return Store(rhs, getBase(), {indices.begin(), indices.end()});
}
@ -168,10 +168,10 @@ template <typename Load, typename Store> struct TemplatedIndexedValue {
ValueHandle operator-(ValueHandle e);
ValueHandle operator*(ValueHandle e);
ValueHandle operator/(ValueHandle e);
InstructionHandle operator+=(ValueHandle e);
InstructionHandle operator-=(ValueHandle e);
InstructionHandle operator*=(ValueHandle e);
InstructionHandle operator/=(ValueHandle e);
OperationHandle operator+=(ValueHandle e);
OperationHandle operator-=(ValueHandle e);
OperationHandle operator*=(ValueHandle e);
OperationHandle operator/=(ValueHandle e);
ValueHandle operator+(TemplatedIndexedValue e) {
return *this + static_cast<ValueHandle>(e);
}
@ -184,16 +184,16 @@ template <typename Load, typename Store> struct TemplatedIndexedValue {
ValueHandle operator/(TemplatedIndexedValue e) {
return *this / static_cast<ValueHandle>(e);
}
InstructionHandle operator+=(TemplatedIndexedValue e) {
OperationHandle operator+=(TemplatedIndexedValue e) {
return this->operator+=(static_cast<ValueHandle>(e));
}
InstructionHandle operator-=(TemplatedIndexedValue e) {
OperationHandle operator-=(TemplatedIndexedValue e) {
return this->operator-=(static_cast<ValueHandle>(e));
}
InstructionHandle operator*=(TemplatedIndexedValue e) {
OperationHandle operator*=(TemplatedIndexedValue e) {
return this->operator*=(static_cast<ValueHandle>(e));
}
InstructionHandle operator/=(TemplatedIndexedValue e) {
OperationHandle operator/=(TemplatedIndexedValue e) {
return this->operator/=(static_cast<ValueHandle>(e));
}
@ -236,26 +236,22 @@ ValueHandle TemplatedIndexedValue<Load, Store>::operator/(ValueHandle e) {
}
template <typename Load, typename Store>
InstructionHandle
TemplatedIndexedValue<Load, Store>::operator+=(ValueHandle e) {
OperationHandle TemplatedIndexedValue<Load, Store>::operator+=(ValueHandle e) {
using op::operator+;
return Store(*this + e, getBase(), {indices.begin(), indices.end()});
}
template <typename Load, typename Store>
InstructionHandle
TemplatedIndexedValue<Load, Store>::operator-=(ValueHandle e) {
OperationHandle TemplatedIndexedValue<Load, Store>::operator-=(ValueHandle e) {
using op::operator-;
return Store(*this - e, getBase(), {indices.begin(), indices.end()});
}
template <typename Load, typename Store>
InstructionHandle
TemplatedIndexedValue<Load, Store>::operator*=(ValueHandle e) {
OperationHandle TemplatedIndexedValue<Load, Store>::operator*=(ValueHandle e) {
using op::operator*;
return Store(*this * e, getBase(), {indices.begin(), indices.end()});
}
template <typename Load, typename Store>
InstructionHandle
TemplatedIndexedValue<Load, Store>::operator/=(ValueHandle e) {
OperationHandle TemplatedIndexedValue<Load, Store>::operator/=(ValueHandle e) {
using op::operator/;
return Store(*this / e, getBase(), {indices.begin(), indices.end()});
}

View File

@ -76,11 +76,11 @@ struct IndexHandle : public ValueHandle {
};
/// Provides a set of first class intrinsics.
/// In the future, most of intrinsics reated to Instruction that don't contain
/// other instructions should be Tablegen'd.
/// In the future, most of intrinsics related to Operation that don't contain
/// other operations should be Tablegen'd.
namespace intrinsics {
namespace detail {
/// Helper structure to be used with ValueBuilder / InstructionBuilder.
/// Helper structure to be used with ValueBuilder / OperationBuilder.
/// It serves the purpose of removing boilerplate specialization for the sole
/// purpose of implicitly converting ArrayRef<ValueHandle> -> ArrayRef<Value*>.
class ValueHandleArray {
@ -139,51 +139,50 @@ template <typename Op> struct ValueBuilder : public ValueHandle {
ValueBuilder() : ValueHandle(ValueHandle::create<Op>()) {}
};
template <typename Op> struct InstructionBuilder : public InstructionHandle {
template <typename Op> struct OperationBuilder : public OperationHandle {
template <typename... Args>
InstructionBuilder(Args... args)
: InstructionHandle(
InstructionHandle::create<Op>(detail::unpack(args)...)) {}
InstructionBuilder(ArrayRef<ValueHandle> vs)
: InstructionHandle(InstructionHandle::create<Op>(detail::unpack(vs))) {}
OperationBuilder(Args... args)
: OperationHandle(OperationHandle::create<Op>(detail::unpack(args)...)) {}
OperationBuilder(ArrayRef<ValueHandle> vs)
: OperationHandle(OperationHandle::create<Op>(detail::unpack(vs))) {}
template <typename... Args>
InstructionBuilder(ArrayRef<ValueHandle> vs, Args... args)
: InstructionHandle(InstructionHandle::create<Op>(
detail::unpack(vs), detail::unpack(args)...)) {}
OperationBuilder(ArrayRef<ValueHandle> vs, Args... args)
: OperationHandle(OperationHandle::create<Op>(detail::unpack(vs),
detail::unpack(args)...)) {}
template <typename T, typename... Args>
InstructionBuilder(T t, ArrayRef<ValueHandle> vs, Args... args)
: InstructionHandle(InstructionHandle::create<Op>(
OperationBuilder(T t, ArrayRef<ValueHandle> vs, Args... args)
: OperationHandle(OperationHandle::create<Op>(
detail::unpack(t), detail::unpack(vs), detail::unpack(args)...)) {}
template <typename T1, typename T2, typename... Args>
InstructionBuilder(T1 t1, T2 t2, ArrayRef<ValueHandle> vs, Args... args)
: InstructionHandle(InstructionHandle::create<Op>(
OperationBuilder(T1 t1, T2 t2, ArrayRef<ValueHandle> vs, Args... args)
: OperationHandle(OperationHandle::create<Op>(
detail::unpack(t1), detail::unpack(t2), detail::unpack(vs),
detail::unpack(args)...)) {}
InstructionBuilder() : InstructionHandle(InstructionHandle::create<Op>()) {}
OperationBuilder() : OperationHandle(OperationHandle::create<Op>()) {}
};
using alloc = ValueBuilder<AllocOp>;
using constant_float = ValueBuilder<ConstantFloatOp>;
using constant_index = ValueBuilder<ConstantIndexOp>;
using constant_int = ValueBuilder<ConstantIntOp>;
using dealloc = InstructionBuilder<DeallocOp>;
using dealloc = OperationBuilder<DeallocOp>;
using load = ValueBuilder<LoadOp>;
using ret = InstructionBuilder<ReturnOp>;
using ret = OperationBuilder<ReturnOp>;
using select = ValueBuilder<SelectOp>;
using store = InstructionBuilder<StoreOp>;
using store = OperationBuilder<StoreOp>;
using vector_type_cast = ValueBuilder<VectorTypeCastOp>;
/// Branches into the mlir::Block* captured by BlockHandle `b` with `operands`.
///
/// Prerequisites:
/// All Handles have already captured previously constructed IR objects.
InstructionHandle br(BlockHandle bh, ArrayRef<ValueHandle> operands);
OperationHandle br(BlockHandle bh, ArrayRef<ValueHandle> operands);
/// Creates a new mlir::Block* and branches to it from the current block.
/// Argument types are specified by `operands`.
/// Captures the new block in `bh` and the actual `operands` in `captures`. To
/// insert the new mlir::Block*, a local ScopedContext is constructed and
/// released to the current block. The branch instruction is then added to the
/// released to the current block. The branch operation is then added to the
/// new block.
///
/// Prerequisites:
@ -192,8 +191,8 @@ InstructionHandle br(BlockHandle bh, ArrayRef<ValueHandle> operands);
/// All `operands` have already captured an mlir::Value*
/// captures.size() == operands.size()
/// captures and operands are pairwise of the same type.
InstructionHandle br(BlockHandle *bh, ArrayRef<ValueHandle *> captures,
ArrayRef<ValueHandle> operands);
OperationHandle br(BlockHandle *bh, ArrayRef<ValueHandle *> captures,
ArrayRef<ValueHandle> operands);
/// Branches into the mlir::Block* captured by BlockHandle `trueBranch` with
/// `trueOperands` if `cond` evaluates to `true` (resp. `falseBranch` and
@ -201,17 +200,17 @@ InstructionHandle br(BlockHandle *bh, ArrayRef<ValueHandle *> captures,
///
/// Prerequisites:
/// All Handles have captured previouly constructed IR objects.
InstructionHandle cond_br(ValueHandle cond, BlockHandle trueBranch,
ArrayRef<ValueHandle> trueOperands,
BlockHandle falseBranch,
ArrayRef<ValueHandle> falseOperands);
OperationHandle cond_br(ValueHandle cond, BlockHandle trueBranch,
ArrayRef<ValueHandle> trueOperands,
BlockHandle falseBranch,
ArrayRef<ValueHandle> falseOperands);
/// Eagerly creates new mlir::Block* with argument types specified by
/// `trueOperands`/`falseOperands`.
/// Captures the new blocks in `trueBranch`/`falseBranch` and the arguments in
/// `trueCaptures/falseCaptures`.
/// To insert the new mlir::Block*, a local ScopedContext is constructed and
/// released. The branch instruction is then added in the original location and
/// released. The branch operation is then added in the original location and
/// targeting the eagerly constructed blocks.
///
/// Prerequisites:
@ -222,12 +221,12 @@ InstructionHandle cond_br(ValueHandle cond, BlockHandle trueBranch,
/// `falseCaptures`.size() == `falseOperands`.size()
/// `trueCaptures` and `trueOperands` are pairwise of the same type
/// `falseCaptures` and `falseOperands` are pairwise of the same type.
InstructionHandle cond_br(ValueHandle cond, BlockHandle *trueBranch,
ArrayRef<ValueHandle *> trueCaptures,
ArrayRef<ValueHandle> trueOperands,
BlockHandle *falseBranch,
ArrayRef<ValueHandle *> falseCaptures,
ArrayRef<ValueHandle> falseOperands);
OperationHandle cond_br(ValueHandle cond, BlockHandle *trueBranch,
ArrayRef<ValueHandle *> trueCaptures,
ArrayRef<ValueHandle> trueOperands,
BlockHandle *falseBranch,
ArrayRef<ValueHandle *> falseCaptures,
ArrayRef<ValueHandle> falseOperands);
} // namespace intrinsics
} // namespace edsc
} // namespace mlir

View File

@ -122,8 +122,8 @@ public:
/// Specialization of walk to only visit operations of 'OpTy'.
template <typename OpTy> void walk(std::function<void(OpTy)> callback) {
walk([&](Operation *inst) {
if (auto op = inst->dyn_cast<OpTy>())
walk([&](Operation *opInst) {
if (auto op = opInst->dyn_cast<OpTy>())
callback(op);
});
}
@ -135,8 +135,8 @@ public:
/// Specialization of walkPostOrder to only visit operations of 'OpTy'.
template <typename OpTy>
void walkPostOrder(std::function<void(OpTy)> callback) {
walkPostOrder([&](Operation *inst) {
if (auto op = inst->dyn_cast<OpTy>())
walkPostOrder([&](Operation *opInst) {
if (auto op = opInst->dyn_cast<OpTy>())
callback(op);
});
}

View File

@ -507,10 +507,6 @@ inline raw_ostream &operator<<(raw_ostream &os, Operation &op) {
return os;
}
/// Temporary typedef to Instruction to while the codebase transitions to
/// Operation.
using Instruction = Operation;
/// This class implements the const/non-const operand iterators for the
/// Operation class in terms of getOperand(idx).
class OperandIterator final

View File

@ -38,7 +38,6 @@ namespace mlir {
class Block;
class Dialect;
class Operation;
using Instruction = Operation;
struct OperationState;
class OpAsmParser;
class OpAsmParserResult;

View File

@ -129,7 +129,7 @@ class LLVM_TerminatorOp<string mnemonic, list<OpTrait> traits = []> :
>];
}
// Class for arithmetic binary instructions.
// Class for arithmetic binary operations.
class LLVM_ArithmeticOp<string mnemonic, string builderFunc,
list<OpTrait> traits = []> :
LLVM_OneResultOp<mnemonic,
@ -137,7 +137,7 @@ class LLVM_ArithmeticOp<string mnemonic, string builderFunc,
Arguments<(ins LLVM_Type:$lhs, LLVM_Type:$rhs)>,
LLVM_Builder<"$res = builder." # builderFunc # "($lhs, $rhs);">;
// Integer binary instructions.
// Integer binary operations.
def LLVM_AddOp : LLVM_ArithmeticOp<"add", "CreateAdd", [Commutative]>;
def LLVM_SubOp : LLVM_ArithmeticOp<"sub", "CreateSub">;
def LLVM_MulOp : LLVM_ArithmeticOp<"mul", "CreateMul", [Commutative]>;
@ -146,7 +146,7 @@ def LLVM_SDivOp : LLVM_ArithmeticOp<"sdiv", "CreateSDiv">;
def LLVM_URemOp : LLVM_ArithmeticOp<"urem", "CreateURem">;
def LLVM_SRemOp : LLVM_ArithmeticOp<"srem", "CreateSRem">;
// Other integer instructions.
// Other integer operations.
def LLVM_ICmpOp : LLVM_OneResultOp<"icmp", [NoSideEffect]>,
Arguments<(ins I32Attr:$predicate, LLVM_Type:$lhs,
LLVM_Type:$rhs)> {
@ -156,14 +156,14 @@ def LLVM_ICmpOp : LLVM_OneResultOp<"icmp", [NoSideEffect]>,
}];
}
// Floating point binary instructions.
// Floating point binary operations.
def LLVM_FAddOp : LLVM_ArithmeticOp<"fadd", "CreateFAdd">;
def LLVM_FSubOp : LLVM_ArithmeticOp<"fsub", "CreateFSub">;
def LLVM_FMulOp : LLVM_ArithmeticOp<"fmul", "CreateFMul">;
def LLVM_FDivOp : LLVM_ArithmeticOp<"fdiv", "CreateFDiv">;
def LLVM_FRemOp : LLVM_ArithmeticOp<"frem", "CreateFRem">;
// Memory-related instructions.
// Memory-related operations.
def LLVM_AllocaOp : LLVM_OneResultOp<"alloca">,
Arguments<(ins LLVM_Type:$arraySize)> {
string llvmBuilder = [{
@ -184,11 +184,11 @@ def LLVM_BitcastOp
LLVM_Builder<"$res = builder.CreateBitCast($arg, $_resultType);">;
// Call-related instructions.
// Call-related operations.
def LLVM_CallOp : LLVM_Op<"call">,
Arguments<(ins OptionalAttr<FunctionAttr>:$callee,
Variadic<LLVM_Type>)>,
Results<(outs Variadic<LLVM_Type>)>,
Results<(outs Variadic<LLVM_Type>)>,
LLVM_TwoBuilders<LLVM_OneResultOpBuilder,
LLVM_ZeroResultOpBuilder> {
let verifier = [{
@ -213,7 +213,7 @@ def LLVM_InsertValueOp : LLVM_OneResultOp<"insertvalue", [NoSideEffect]>,
}];
}
// Misc instructions.
// Misc operations.
def LLVM_SelectOp
: LLVM_OneResultOp<"select", [NoSideEffect]>,
Arguments<(ins LLVM_Type:$condition, LLVM_Type:$trueValue,

View File

@ -16,7 +16,7 @@
// =============================================================================
//
// This file defines convenience types for working with standard operations
// in the MLIR instruction set.
// in the MLIR operation set.
//
//===----------------------------------------------------------------------===//
@ -35,7 +35,7 @@ class Builder;
namespace detail {
/// A custom binary operation printer that omits the "std." prefix from the
/// operation names.
void printStandardBinaryOp(Instruction *op, OpAsmPrinter *p);
void printStandardBinaryOp(Operation *op, OpAsmPrinter *p);
} // namespace detail
class StandardOpsDialect : public Dialect {
@ -85,7 +85,7 @@ public:
MLIRContext *context);
};
/// The "br" operation represents a branch instruction in a function.
/// The "br" operation represents a branch operation in a function.
/// The operation takes variable number of operands and produces no results.
/// The operand number and types for each successor must match the
/// arguments of the block successor. For example:
@ -216,7 +216,7 @@ enum class CmpIPredicate {
/// Since integers are signless, the predicate also explicitly indicates
/// whether to interpret the operands as signed or unsigned integers for
/// less/greater than comparisons. For the sake of readability by humans,
/// custom assembly form for the instruction uses a string-typed attribute for
/// custom assembly form for the operation uses a string-typed attribute for
/// the predicate. The value of this attribute corresponds to lower-cased name
/// of the predicate constant, e.g., "slt" means "signed less than". The string
/// representation of the attribute is merely a syntactic sugar and is converted
@ -250,7 +250,7 @@ public:
Attribute constantFold(ArrayRef<Attribute> operands, MLIRContext *context);
};
/// The "cond_br" operation represents a conditional branch instruction in a
/// The "cond_br" operation represents a conditional branch operation in a
/// function. The operation takes variable number of operands and produces
/// no results. The operand number and types for each successor must match the
// arguments of the block successor. For example:
@ -401,7 +401,7 @@ public:
APFloat getValue() { return getAttrOfType<FloatAttr>("value").getValue(); }
static bool isClassFor(Instruction *op);
static bool isClassFor(Operation *op);
};
/// This is a refinement of the "constant" op for the case where it is
@ -423,7 +423,7 @@ public:
int64_t getValue() { return getAttrOfType<IntegerAttr>("value").getInt(); }
static bool isClassFor(Instruction *op);
static bool isClassFor(Operation *op);
};
/// This is a refinement of the "constant" op for the case where it is
@ -440,7 +440,7 @@ public:
int64_t getValue() { return getAttrOfType<IntegerAttr>("value").getInt(); }
static bool isClassFor(Instruction *op);
static bool isClassFor(Operation *op);
};
/// The "dealloc" operation frees the region of memory referenced by a memref
@ -557,7 +557,7 @@ public:
return getSrcMemRef()->getType().cast<MemRefType>().getRank();
}
// Returns the source memerf indices for this DMA operation.
llvm::iterator_range<Instruction::operand_iterator> getSrcIndices() {
llvm::iterator_range<Operation::operand_iterator> getSrcIndices() {
return {getOperation()->operand_begin() + 1,
getOperation()->operand_begin() + 1 + getSrcMemRefRank()};
}
@ -576,7 +576,7 @@ public:
}
// Returns the destination memref indices for this DMA operation.
llvm::iterator_range<Instruction::operand_iterator> getDstIndices() {
llvm::iterator_range<Operation::operand_iterator> getDstIndices() {
return {getOperation()->operand_begin() + 1 + getSrcMemRefRank() + 1,
getOperation()->operand_begin() + 1 + getSrcMemRefRank() + 1 +
getDstMemRefRank()};
@ -597,7 +597,7 @@ public:
}
// Returns the tag memref index for this DMA operation.
llvm::iterator_range<Instruction::operand_iterator> getTagIndices() {
llvm::iterator_range<Operation::operand_iterator> getTagIndices() {
unsigned tagIndexStartPos =
1 + getSrcMemRefRank() + 1 + getDstMemRefRank() + 1 + 1;
return {getOperation()->operand_begin() + tagIndexStartPos,
@ -677,7 +677,7 @@ public:
Value *getTagMemRef() { return getOperand(0); }
// Returns the tag memref index for this DMA operation.
llvm::iterator_range<Instruction::operand_iterator> getTagIndices() {
llvm::iterator_range<Operation::operand_iterator> getTagIndices() {
return {getOperation()->operand_begin() + 1,
getOperation()->operand_begin() + 1 + getTagMemRefRank()};
}
@ -718,7 +718,7 @@ public:
Value *getAggregate() { return getOperand(0); }
llvm::iterator_range<Instruction::operand_iterator> getIndices() {
llvm::iterator_range<Operation::operand_iterator> getIndices() {
return {getOperation()->operand_begin() + 1, getOperation()->operand_end()};
}
@ -754,7 +754,7 @@ public:
return getMemRef()->getType().cast<MemRefType>();
}
llvm::iterator_range<Instruction::operand_iterator> getIndices() {
llvm::iterator_range<Operation::operand_iterator> getIndices() {
return {getOperation()->operand_begin() + 1, getOperation()->operand_end()};
}
@ -795,7 +795,7 @@ public:
bool verify();
};
/// The "return" operation represents a return instruction within a function.
/// The "return" operation represents a return operation within a function.
/// The operation takes variable number of operands and produces no results.
/// The operand number and types must match the signature of the function
/// that contains the operation. For example:
@ -853,7 +853,7 @@ public:
/// The "store" op writes an element to a memref specified by an index list.
/// The arity of indices is the rank of the memref (i.e. if the memref being
/// stored to is of rank 3, then 3 indices are required for the store following
/// the memref identifier). The store instruction does not produce a result.
/// the memref identifier). The store operation does not produce a result.
///
/// In the following example, the ssa value '%v' is stored in memref '%A' at
/// indices [%i, %j]:
@ -878,7 +878,7 @@ public:
return getMemRef()->getType().cast<MemRefType>();
}
llvm::iterator_range<Instruction::operand_iterator> getIndices() {
llvm::iterator_range<Operation::operand_iterator> getIndices() {
return {getOperation()->operand_begin() + 2, getOperation()->operand_end()};
}
@ -917,8 +917,8 @@ public:
};
/// Prints dimension and symbol list.
void printDimAndSymbolList(Instruction::operand_iterator begin,
Instruction::operand_iterator end, unsigned numDims,
void printDimAndSymbolList(Operation::operand_iterator begin,
Operation::operand_iterator end, unsigned numDims,
OpAsmPrinter *p);
/// Parses dimension and symbol list and returns true if parsing failed.

View File

@ -113,7 +113,7 @@ public:
MemRefType getMemRefType() {
return getMemRef()->getType().cast<MemRefType>();
}
llvm::iterator_range<Instruction::operand_iterator> getIndices();
llvm::iterator_range<Operation::operand_iterator> getIndices();
Optional<Value *> getPaddingValue();
AffineMap getPermutationMap();
@ -175,7 +175,7 @@ public:
MemRefType getMemRefType() {
return getMemRef()->getType().cast<MemRefType>();
}
llvm::iterator_range<Instruction::operand_iterator> getIndices();
llvm::iterator_range<Operation::operand_iterator> getIndices();
AffineMap getPermutationMap();
static bool parse(OpAsmParser *parser, OperationState *result);

View File

@ -57,7 +57,7 @@ bool mlir::isValidDim(Value *value) {
return false;
if (auto *op = value->getDefiningOp()) {
// Top level instruction or constant operation is ok.
// Top level operation or constant operation is ok.
if (op->getParentOp() == nullptr || op->isa<ConstantOp>())
return true;
// Affine apply operation is ok if all of its operands are ok.
@ -82,7 +82,7 @@ bool mlir::isValidSymbol(Value *value) {
return false;
if (auto *op = value->getDefiningOp()) {
// Top level instruction or constant operation is ok.
// Top level operation or constant operation is ok.
if (op->getParentOp() == nullptr || op->isa<ConstantOp>())
return true;
// Affine apply operation is ok if all of its operands are ok.
@ -106,7 +106,7 @@ bool mlir::isValidSymbol(Value *value) {
/// errors.
template <typename OpTy>
static bool verifyDimAndSymbolIdentifiers(OpTy &op,
Instruction::operand_range operands,
Operation::operand_range operands,
unsigned numDims) {
unsigned opIt = 0;
for (auto *operand : operands) {
@ -462,7 +462,7 @@ AffineApplyNormalizer::AffineApplyNormalizer(AffineMap map,
? t->getDefiningOp()->dyn_cast<AffineApplyOp>()
: AffineApplyOp();
if (affineApply) {
// a. Compose affine.apply instructions.
// a. Compose affine.apply operations.
LLVM_DEBUG(affineApply.getOperation()->print(
dbgs() << "\nCompose AffineApplyOp recursively: "));
AffineMap affineApplyMap = affineApply.getAffineMap();
@ -663,7 +663,7 @@ struct SimplifyAffineApply : public RewritePattern {
SimplifyAffineApply(MLIRContext *context)
: RewritePattern(AffineApplyOp::getOperationName(), 1, context) {}
PatternMatchResult matchAndRewrite(Instruction *op,
PatternMatchResult matchAndRewrite(Operation *op,
PatternRewriter &rewriter) const override {
auto apply = op->cast<AffineApplyOp>();
auto map = apply.getAffineMap();
@ -1008,7 +1008,7 @@ struct AffineForLoopBoundFolder : public RewritePattern {
AffineForLoopBoundFolder(MLIRContext *context)
: RewritePattern(AffineForOp::getOperationName(), 1, context) {}
PatternMatchResult matchAndRewrite(Instruction *op,
PatternMatchResult matchAndRewrite(Operation *op,
PatternRewriter &rewriter) const override {
auto forOp = op->cast<AffineForOp>();
auto foldLowerOrUpperBound = [&forOp](bool lower) {

View File

@ -184,7 +184,7 @@ static bool isSameShapedVectorOrTensor(Type type1, Type type2) {
return false;
}
bool OpTrait::impl::verifyCompatibleOperandBroadcast(Instruction *op) {
bool OpTrait::impl::verifyCompatibleOperandBroadcast(Operation *op) {
assert(op->getNumOperands() == 2 &&
"only support broadcast check on two operands");
assert(op->getNumResults() == 1 &&

View File

@ -84,32 +84,32 @@ ValueHandle
mlir::edsc::ValueHandle::createComposedAffineApply(AffineMap map,
ArrayRef<Value *> operands) {
assert(ScopedContext::getBuilder() && "Unexpected null builder");
Instruction *inst =
Operation *op =
makeComposedAffineApply(ScopedContext::getBuilder(),
ScopedContext::getLocation(), map, operands)
.getOperation();
assert(inst->getNumResults() == 1 && "Not a single result AffineApply");
return ValueHandle(inst->getResult(0));
assert(op->getNumResults() == 1 && "Not a single result AffineApply");
return ValueHandle(op->getResult(0));
}
ValueHandle ValueHandle::create(StringRef name, ArrayRef<ValueHandle> operands,
ArrayRef<Type> resultTypes,
ArrayRef<NamedAttribute> attributes) {
Instruction *inst =
InstructionHandle::create(name, operands, resultTypes, attributes);
if (inst->getNumResults() == 1) {
return ValueHandle(inst->getResult(0));
Operation *op =
OperationHandle::create(name, operands, resultTypes, attributes);
if (op->getNumResults() == 1) {
return ValueHandle(op->getResult(0));
}
if (auto f = inst->dyn_cast<AffineForOp>()) {
if (auto f = op->dyn_cast<AffineForOp>()) {
return ValueHandle(f.getInductionVar());
}
llvm_unreachable("unsupported instruction, use an InstructionHandle instead");
llvm_unreachable("unsupported operation, use an OperationHandle instead");
}
InstructionHandle
InstructionHandle::create(StringRef name, ArrayRef<ValueHandle> operands,
ArrayRef<Type> resultTypes,
ArrayRef<NamedAttribute> attributes) {
OperationHandle OperationHandle::create(StringRef name,
ArrayRef<ValueHandle> operands,
ArrayRef<Type> resultTypes,
ArrayRef<NamedAttribute> attributes) {
OperationState state(ScopedContext::getContext(),
ScopedContext::getLocation(), name);
SmallVector<Value *, 4> ops(operands.begin(), operands.end());
@ -118,7 +118,7 @@ InstructionHandle::create(StringRef name, ArrayRef<ValueHandle> operands,
for (const auto &attr : attributes) {
state.addAttribute(attr.first, attr.second);
}
return InstructionHandle(ScopedContext::getBuilder()->createOperation(state));
return OperationHandle(ScopedContext::getBuilder()->createOperation(state));
}
BlockHandle mlir::edsc::BlockHandle::create(ArrayRef<Type> argTypes) {
@ -264,8 +264,8 @@ categorizeValueByAffineType(MLIRContext *context, Value *val, unsigned &numDims,
unsigned &numSymbols) {
AffineExpr d;
Value *resultVal = nullptr;
auto *inst = val->getDefiningOp();
auto constant = inst ? inst->dyn_cast<ConstantIndexOp>() : ConstantIndexOp();
auto *op = val->getDefiningOp();
auto constant = op ? op->dyn_cast<ConstantIndexOp>() : ConstantIndexOp();
if (constant) {
d = getAffineConstantExpr(constant.getValue(), context);
} else if (isValidSymbol(val) && !isValidDim(val)) {

View File

@ -23,15 +23,15 @@
using namespace mlir;
using namespace mlir::edsc;
InstructionHandle mlir::edsc::intrinsics::br(BlockHandle bh,
ArrayRef<ValueHandle> operands) {
OperationHandle mlir::edsc::intrinsics::br(BlockHandle bh,
ArrayRef<ValueHandle> operands) {
assert(bh && "Expected already captured BlockHandle");
for (auto &o : operands) {
(void)o;
assert(o && "Expected already captured ValueHandle");
}
SmallVector<Value *, 4> ops(operands.begin(), operands.end());
return InstructionHandle::create<BranchOp>(bh.getBlock(), ops);
return OperationHandle::create<BranchOp>(bh.getBlock(), ops);
}
static void enforceEmptyCapturesMatchOperands(ArrayRef<ValueHandle *> captures,
ArrayRef<ValueHandle> operands) {
@ -47,28 +47,28 @@ static void enforceEmptyCapturesMatchOperands(ArrayRef<ValueHandle *> captures,
}
}
InstructionHandle mlir::edsc::intrinsics::br(BlockHandle *bh,
ArrayRef<ValueHandle *> captures,
ArrayRef<ValueHandle> operands) {
OperationHandle mlir::edsc::intrinsics::br(BlockHandle *bh,
ArrayRef<ValueHandle *> captures,
ArrayRef<ValueHandle> operands) {
assert(!*bh && "Unexpected already captured BlockHandle");
enforceEmptyCapturesMatchOperands(captures, operands);
BlockBuilder(bh, captures)({/* no body */});
SmallVector<Value *, 4> ops(operands.begin(), operands.end());
return InstructionHandle::create<BranchOp>(bh->getBlock(), ops);
return OperationHandle::create<BranchOp>(bh->getBlock(), ops);
}
InstructionHandle
OperationHandle
mlir::edsc::intrinsics::cond_br(ValueHandle cond, BlockHandle trueBranch,
ArrayRef<ValueHandle> trueOperands,
BlockHandle falseBranch,
ArrayRef<ValueHandle> falseOperands) {
SmallVector<Value *, 4> trueOps(trueOperands.begin(), trueOperands.end());
SmallVector<Value *, 4> falseOps(falseOperands.begin(), falseOperands.end());
return InstructionHandle::create<CondBranchOp>(
return OperationHandle::create<CondBranchOp>(
cond, trueBranch.getBlock(), trueOps, falseBranch.getBlock(), falseOps);
}
InstructionHandle mlir::edsc::intrinsics::cond_br(
OperationHandle mlir::edsc::intrinsics::cond_br(
ValueHandle cond, BlockHandle *trueBranch,
ArrayRef<ValueHandle *> trueCaptures, ArrayRef<ValueHandle> trueOperands,
BlockHandle *falseBranch, ArrayRef<ValueHandle *> falseCaptures,
@ -81,6 +81,6 @@ InstructionHandle mlir::edsc::intrinsics::cond_br(
BlockBuilder(falseBranch, falseCaptures)({/* no body */});
SmallVector<Value *, 4> trueOps(trueOperands.begin(), trueOperands.end());
SmallVector<Value *, 4> falseOps(falseOperands.begin(), falseOperands.end());
return InstructionHandle::create<CondBranchOp>(
return OperationHandle::create<CondBranchOp>(
cond, trueBranch->getBlock(), trueOps, falseBranch->getBlock(), falseOps);
}

View File

@ -40,7 +40,7 @@ struct LowerEDSCTestPass : public FunctionPass<LowerEDSCTestPass> {
#include "mlir/EDSC/reference-impl.inc"
void LowerEDSCTestPass::runOnFunction() {
getFunction().walk([](Instruction *op) {
getFunction().walk([](Operation *op) {
if (op->getName().getStringRef() == "print") {
auto opName = op->getAttrOfType<StringAttr>("op");
if (!opName) {

View File

@ -46,9 +46,9 @@ using namespace mlir::edsc;
using namespace mlir::edsc::detail;
static void printDefininingStatement(llvm::raw_ostream &os, Value &v) {
auto *inst = v.getDefiningOp();
if (inst) {
inst->print(os);
auto *op = v.getDefiningOp();
if (op) {
op->print(os);
return;
}
if (auto forInst = getForInductionVarOwner(&v)) {
@ -86,7 +86,7 @@ static void checkAffineProvenance(ArrayRef<Value *> values) {
for (Value *v : values) {
auto *def = v->getDefiningOp();
(void)def;
// There may be no defining instruction if the value is a function
// There may be no defining operation if the value is a function
// argument. We accept such values.
assert((!def || def->isa<ConstantIndexOp>() || def->isa<AffineApplyOp>() ||
def->isa<AffineForOp>() || def->isa<DimOp>()) &&
@ -127,9 +127,9 @@ Value *mlir::edsc::MLIREmitter::emitExpr(Expr e) {
bool expectedEmpty = false;
if (e.isa<UnaryExpr>() || e.isa<BinaryExpr>() || e.isa<TernaryExpr>() ||
e.isa<VariadicExpr>()) {
// Emit any successors before the instruction with successors. At this
// Emit any successors before the operation with successors. At this
// point, all values defined by the current block must have been bound, the
// current instruction with successors cannot define new values, so the
// current operation with successors cannot define new values, so the
// successor can use those values.
assert(e.getSuccessors().empty() || e.getResultTypes().empty() &&
"an operation with successors must "

View File

@ -230,8 +230,8 @@ Expr::build(FuncBuilder &b, const llvm::DenseMap<Expr, Value *> &ssaBindings,
buildExprs(successorArgs[i], b, ssaBindings, blockBindings));
}
Instruction *inst = b.createOperation(state);
return llvm::to_vector<4>(inst->getResults());
Operation *op = b.createOperation(state);
return llvm::to_vector<4>(op->getResults());
}
static AffineExpr createOperandAffineExpr(Expr e, int64_t position,

View File

@ -138,7 +138,7 @@ private:
void recordTypeReference(Type ty) { usedTypes.insert(ty); }
// Visit functions.
void visitOperation(Operation *inst);
void visitOperation(Operation *op);
void visitType(Type type);
void visitAttribute(Attribute attr);
@ -158,7 +158,7 @@ private:
};
} // end anonymous namespace
// TODO Support visiting other types/instructions when implemented.
// TODO Support visiting other types/operations when implemented.
void ModuleState::visitType(Type type) {
recordTypeReference(type);
if (auto funcType = type.dyn_cast<FunctionType>()) {
@ -189,15 +189,15 @@ void ModuleState::visitAttribute(Attribute attr) {
}
}
void ModuleState::visitOperation(Operation *inst) {
void ModuleState::visitOperation(Operation *op) {
// Visit all the types used in the operation.
for (auto *operand : inst->getOperands())
for (auto *operand : op->getOperands())
visitType(operand->getType());
for (auto *result : inst->getResults())
for (auto *result : op->getResults())
visitType(result->getType());
// Visit each of the attributes.
for (auto elt : inst->getAttrs())
for (auto elt : op->getAttrs())
visitAttribute(elt.second);
}
@ -1058,8 +1058,8 @@ public:
// Print the function signature.
void printFunctionSignature();
// Methods to print instructions.
void print(Operation *inst);
// Methods to print operations.
void print(Operation *op);
void print(Block *block, bool printBlockArgs = true,
bool printBlockTerminator = true);
@ -1124,7 +1124,7 @@ public:
os.indent(currentIndent) << "}";
}
// Number of spaces used for indenting nested instructions.
// Number of spaces used for indenting nested operations.
const static unsigned indentWidth = 2;
protected:
@ -1174,19 +1174,19 @@ FunctionPrinter::FunctionPrinter(Function *function, ModulePrinter &other)
/// continuously throughout regions. In particular, we traverse the regions
/// held by operations and number values in depth-first pre-order.
void FunctionPrinter::numberValuesInBlock(Block &block) {
// Each block gets a unique ID, and all of the instructions within it get
// Each block gets a unique ID, and all of the operations within it get
// numbered as well.
blockIDs[&block] = nextBlockID++;
for (auto *arg : block.getArguments())
numberValueID(arg);
for (auto &inst : block) {
// We number instruction that have results, and we only number the first
for (auto &op : block) {
// We number operation that have results, and we only number the first
// result.
if (inst.getNumResults() != 0)
numberValueID(inst.getResult(0));
for (auto &region : inst.getRegions())
if (op.getNumResults() != 0)
numberValueID(op.getResult(0));
for (auto &region : op.getRegions())
for (auto &block : region)
numberValuesInBlock(block);
}
@ -1387,26 +1387,26 @@ void FunctionPrinter::print(Block *block, bool printBlockArgs,
auto range = llvm::make_range(
block->getOperations().begin(),
std::prev(block->getOperations().end(), printBlockTerminator ? 0 : 1));
for (auto &inst : range) {
print(&inst);
for (auto &op : range) {
print(&op);
os << '\n';
}
currentIndent -= indentWidth;
}
void FunctionPrinter::print(Operation *inst) {
void FunctionPrinter::print(Operation *op) {
os.indent(currentIndent);
printOperation(inst);
printTrailingLocation(inst->getLoc());
printOperation(op);
printTrailingLocation(op->getLoc());
}
void FunctionPrinter::printValueID(Value *value, bool printResultNo) const {
int resultNo = -1;
auto lookupValue = value;
// If this is a reference to the result of a multi-result instruction or
// instruction, print out the # identifier and make sure to map our lookup
// to the first result of the instruction.
// If this is a reference to the result of a multi-result operation or
// operation, print out the # identifier and make sure to map our lookup
// to the first result of the operation.
if (auto *result = dyn_cast<OpResult>(value)) {
if (result->getOwner()->getNumResults() != 1) {
resultNo = result->getResultNumber();

View File

@ -101,13 +101,13 @@ Operation *Operation::create(Location location, OperationName name,
/// Create a new Operation from operation state.
Operation *Operation::create(const OperationState &state) {
unsigned numRegions = state.regions.size();
Operation *inst = create(
state.location, state.name, state.operands, state.types, state.attributes,
state.successors, numRegions, state.resizableOperandList, state.context);
Operation *op = create(state.location, state.name, state.operands,
state.types, state.attributes, state.successors,
numRegions, state.resizableOperandList, state.context);
for (unsigned i = 0; i < numRegions; ++i)
if (state.regions[i])
inst->getRegion(i).takeBody(*state.regions[i]);
return inst;
op->getRegion(i).takeBody(*state.regions[i]);
return op;
}
/// Overload of create that takes an existing NamedAttributeList to avoid

View File

@ -344,7 +344,7 @@ public:
dialect(dialect) {}
// Match by type.
PatternMatchResult match(Instruction *op) const override {
PatternMatchResult match(Operation *op) const override {
if (op->isa<SourceOp>())
return this->matchSuccess();
return this->matchFailure();
@ -428,7 +428,7 @@ struct OneToOneLLVMOpLowering : public LLVMLegalizationPattern<SourceOp> {
// Convert the type of the result to an LLVM type, pass operands as is,
// preserve attributes.
SmallVector<Value *, 4> rewrite(Instruction *op, ArrayRef<Value *> operands,
SmallVector<Value *, 4> rewrite(Operation *op, ArrayRef<Value *> operands,
FuncBuilder &rewriter) const override {
unsigned numResults = op->getNumResults();
auto *mlirContext = op->getContext();
@ -541,7 +541,7 @@ static bool isSupportedMemRefType(MemRefType type) {
struct AllocOpLowering : public LLVMLegalizationPattern<AllocOp> {
using LLVMLegalizationPattern<AllocOp>::LLVMLegalizationPattern;
PatternMatchResult match(Instruction *op) const override {
PatternMatchResult match(Operation *op) const override {
if (!LLVMLegalizationPattern<AllocOp>::match(op))
return matchFailure();
auto allocOp = op->cast<AllocOp>();
@ -549,7 +549,7 @@ struct AllocOpLowering : public LLVMLegalizationPattern<AllocOp> {
return isSupportedMemRefType(type) ? matchSuccess() : matchFailure();
}
SmallVector<Value *, 4> rewrite(Instruction *op, ArrayRef<Value *> operands,
SmallVector<Value *, 4> rewrite(Operation *op, ArrayRef<Value *> operands,
FuncBuilder &rewriter) const override {
auto allocOp = op->cast<AllocOp>();
MemRefType type = allocOp.getType();
@ -650,7 +650,7 @@ struct AllocOpLowering : public LLVMLegalizationPattern<AllocOp> {
struct DeallocOpLowering : public LLVMLegalizationPattern<DeallocOp> {
using LLVMLegalizationPattern<DeallocOp>::LLVMLegalizationPattern;
SmallVector<Value *, 4> rewrite(Instruction *op, ArrayRef<Value *> operands,
SmallVector<Value *, 4> rewrite(Operation *op, ArrayRef<Value *> operands,
FuncBuilder &rewriter) const override {
assert(operands.size() == 1 && "dealloc takes one operand");
@ -684,7 +684,7 @@ struct DeallocOpLowering : public LLVMLegalizationPattern<DeallocOp> {
struct MemRefCastOpLowering : public LLVMLegalizationPattern<MemRefCastOp> {
using LLVMLegalizationPattern<MemRefCastOp>::LLVMLegalizationPattern;
PatternMatchResult match(Instruction *op) const override {
PatternMatchResult match(Operation *op) const override {
if (!LLVMLegalizationPattern<MemRefCastOp>::match(op))
return matchFailure();
auto memRefCastOp = op->cast<MemRefCastOp>();
@ -697,7 +697,7 @@ struct MemRefCastOpLowering : public LLVMLegalizationPattern<MemRefCastOp> {
: matchFailure();
}
SmallVector<Value *, 4> rewrite(Instruction *op, ArrayRef<Value *> operands,
SmallVector<Value *, 4> rewrite(Operation *op, ArrayRef<Value *> operands,
FuncBuilder &rewriter) const override {
auto memRefCastOp = op->cast<MemRefCastOp>();
auto targetType = memRefCastOp.getType();
@ -764,7 +764,7 @@ struct MemRefCastOpLowering : public LLVMLegalizationPattern<MemRefCastOp> {
struct DimOpLowering : public LLVMLegalizationPattern<DimOp> {
using LLVMLegalizationPattern<DimOp>::LLVMLegalizationPattern;
PatternMatchResult match(Instruction *op) const override {
PatternMatchResult match(Operation *op) const override {
if (!LLVMLegalizationPattern<DimOp>::match(op))
return this->matchFailure();
auto dimOp = op->cast<DimOp>();
@ -772,7 +772,7 @@ struct DimOpLowering : public LLVMLegalizationPattern<DimOp> {
return isSupportedMemRefType(type) ? matchSuccess() : matchFailure();
}
SmallVector<Value *, 4> rewrite(Instruction *op, ArrayRef<Value *> operands,
SmallVector<Value *, 4> rewrite(Operation *op, ArrayRef<Value *> operands,
FuncBuilder &rewriter) const override {
assert(operands.size() == 1 && "expected exactly one operand");
auto dimOp = op->cast<DimOp>();
@ -811,7 +811,7 @@ struct LoadStoreOpLowering : public LLVMLegalizationPattern<Derived> {
using LLVMLegalizationPattern<Derived>::LLVMLegalizationPattern;
using Base = LoadStoreOpLowering<Derived>;
PatternMatchResult match(Instruction *op) const override {
PatternMatchResult match(Operation *op) const override {
if (!LLVMLegalizationPattern<Derived>::match(op))
return this->matchFailure();
auto loadOp = op->cast<Derived>();
@ -923,7 +923,7 @@ struct LoadStoreOpLowering : public LLVMLegalizationPattern<Derived> {
struct LoadOpLowering : public LoadStoreOpLowering<LoadOp> {
using Base::Base;
SmallVector<Value *, 4> rewrite(Instruction *op, ArrayRef<Value *> operands,
SmallVector<Value *, 4> rewrite(Operation *op, ArrayRef<Value *> operands,
FuncBuilder &rewriter) const override {
auto loadOp = op->cast<LoadOp>();
auto type = loadOp.getMemRefType();
@ -945,7 +945,7 @@ struct LoadOpLowering : public LoadStoreOpLowering<LoadOp> {
struct StoreOpLowering : public LoadStoreOpLowering<StoreOp> {
using Base::Base;
SmallVector<Value *, 4> rewrite(Instruction *op, ArrayRef<Value *> operands,
SmallVector<Value *, 4> rewrite(Operation *op, ArrayRef<Value *> operands,
FuncBuilder &rewriter) const override {
auto storeOp = op->cast<StoreOp>();
auto type = storeOp.getMemRefType();
@ -965,7 +965,7 @@ struct OneToOneLLVMTerminatorLowering
using LLVMLegalizationPattern<SourceOp>::LLVMLegalizationPattern;
using Super = OneToOneLLVMTerminatorLowering<SourceOp, TargetOp>;
void rewriteTerminator(Instruction *op, ArrayRef<Value *> properOperands,
void rewriteTerminator(Operation *op, ArrayRef<Value *> properOperands,
ArrayRef<Block *> destinations,
ArrayRef<ArrayRef<Value *>> operands,
FuncBuilder &rewriter) const override {
@ -983,7 +983,7 @@ struct OneToOneLLVMTerminatorLowering
struct ReturnOpLowering : public LLVMLegalizationPattern<ReturnOp> {
using LLVMLegalizationPattern<ReturnOp>::LLVMLegalizationPattern;
SmallVector<Value *, 4> rewrite(Instruction *op, ArrayRef<Value *> operands,
SmallVector<Value *, 4> rewrite(Operation *op, ArrayRef<Value *> operands,
FuncBuilder &rewriter) const override {
unsigned numArguments = op->getNumOperands();

View File

@ -2255,10 +2255,10 @@ public:
// Operations
ParseResult parseOperation();
Instruction *parseGenericOperation();
Instruction *parseCustomOperation();
Operation *parseGenericOperation();
Operation *parseCustomOperation();
ParseResult parseInstructions(Block *block);
ParseResult parseOperations(Block *block);
private:
Function *function;
@ -2397,7 +2397,7 @@ ParseResult FunctionParser::parseRegionBody(Region &region) {
/// Block declaration.
///
/// block ::= block-label? instruction* terminator-inst
/// block ::= block-label? operation* terminator-op
/// block-label ::= block-id block-arg-list? `:`
/// block-id ::= caret-id
/// block-arg-list ::= `(` ssa-id-and-type-list? `)`
@ -2456,19 +2456,19 @@ ParseResult FunctionParser::parseBlockBody(Block *block) {
/// Create and remember a new placeholder for a forward reference.
Value *FunctionParser::createForwardReferencePlaceholder(SMLoc loc, Type type) {
// Forward references are always created as instructions, even in ML
// Forward references are always created as operations, even in ML
// functions, because we just need something with a def/use chain.
//
// We create these placeholders as having an empty name, which we know
// cannot be created through normal user input, allowing us to distinguish
// them.
auto name = OperationName("placeholder", getContext());
auto *inst = Instruction::create(
auto *op = Operation::create(
getEncodedSourceLocation(loc), name, /*operands=*/{}, type,
/*attributes=*/llvm::None, /*successors=*/{}, /*numRegions=*/0,
/*resizableOperandList=*/false, getContext());
forwardReferencePlaceholders[inst->getResult(0)] = loc;
return inst->getResult(0);
forwardReferencePlaceholders[op->getResult(0)] = loc;
return op->getResult(0);
}
/// Given an unbound reference to an SSA value and its type, return the value
@ -2532,7 +2532,7 @@ ParseResult FunctionParser::finalizeFunction(SMLoc loc) {
FunctionParser::~FunctionParser() {
for (auto &fwd : forwardReferencePlaceholders) {
// Drop all uses of undefined forward declared reference and destroy
// defining instruction.
// defining operation.
fwd.first->dropAllUses();
fwd.first->getDefiningOp()->destroy();
}
@ -2569,7 +2569,7 @@ ParseResult FunctionParser::addDefinition(SSAUseInfo useInfo, Value *value) {
return ParseSuccess;
}
/// Parse a SSA operand for an instruction or instruction.
/// Parse a SSA operand for an operation.
///
/// ssa-use ::= ssa-id
///
@ -2803,7 +2803,7 @@ ParseResult FunctionParser::parseOperation() {
return ParseFailure;
}
Instruction *op;
Operation *op;
if (getToken().is(Token::bare_identifier) || getToken().isKeyword())
op = parseCustomOperation();
else if (getToken().is(Token::string))
@ -2815,7 +2815,7 @@ ParseResult FunctionParser::parseOperation() {
if (!op)
return ParseFailure;
// If the instruction had a name, register it.
// If the operation had a name, register it.
if (!resultID.empty()) {
if (op->getNumResults() == 0)
return emitError(loc, "cannot name an operation with no results");
@ -2850,7 +2850,7 @@ struct CleanupOpStateRegions {
};
} // namespace
Instruction *FunctionParser::parseGenericOperation() {
Operation *FunctionParser::parseGenericOperation() {
// Get location information for the operation.
auto srcLocation = getEncodedSourceLocation(getToken().getLoc());
@ -2894,7 +2894,7 @@ Instruction *FunctionParser::parseGenericOperation() {
return nullptr;
}
if (parseToken(Token::colon, "expected ':' followed by instruction type"))
if (parseToken(Token::colon, "expected ':' followed by operation type"))
return nullptr;
auto typeLoc = getToken().getLoc();
@ -3238,7 +3238,7 @@ private:
};
} // end anonymous namespace.
Instruction *FunctionParser::parseCustomOperation() {
Operation *FunctionParser::parseCustomOperation() {
auto opLoc = getToken().getLoc();
auto opName = getTokenSpelling();
CustomOpAsmParser opAsmParser(opLoc, opName, *this);

View File

@ -36,7 +36,7 @@ using namespace mlir;
/// A custom binary operation printer that omits the "std." prefix from the
/// operation names.
void detail::printStandardBinaryOp(Instruction *op, OpAsmPrinter *p) {
void detail::printStandardBinaryOp(Operation *op, OpAsmPrinter *p) {
assert(op->getNumOperands() == 2 && "binary op should have two operands");
assert(op->getNumResults() == 1 && "binary op should have one result");
@ -68,8 +68,8 @@ StandardOpsDialect::StandardOpsDialect(MLIRContext *context)
>();
}
void mlir::printDimAndSymbolList(Instruction::operand_iterator begin,
Instruction::operand_iterator end,
void mlir::printDimAndSymbolList(Operation::operand_iterator begin,
Operation::operand_iterator end,
unsigned numDims, OpAsmPrinter *p) {
*p << '(';
p->printOperands(begin, begin + numDims);
@ -123,7 +123,7 @@ struct MemRefCastFolder : public RewritePattern {
MemRefCastFolder(StringRef rootOpName, MLIRContext *context)
: RewritePattern(rootOpName, 1, context) {}
PatternMatchResult match(Instruction *op) const override {
PatternMatchResult match(Operation *op) const override {
for (auto *operand : op->getOperands())
if (matchPattern(operand, m_Op<MemRefCastOp>()))
return matchSuccess();
@ -131,7 +131,7 @@ struct MemRefCastFolder : public RewritePattern {
return matchFailure();
}
void rewrite(Instruction *op, PatternRewriter &rewriter) const override {
void rewrite(Operation *op, PatternRewriter &rewriter) const override {
for (unsigned i = 0, e = op->getNumOperands(); i != e; ++i)
if (auto *memref = op->getOperand(i)->getDefiningOp())
if (auto cast = memref->dyn_cast<MemRefCastOp>())
@ -283,12 +283,12 @@ bool AllocOp::verify() {
}
namespace {
/// Fold constant dimensions into an alloc instruction.
/// Fold constant dimensions into an alloc operation.
struct SimplifyAllocConst : public RewritePattern {
SimplifyAllocConst(MLIRContext *context)
: RewritePattern(AllocOp::getOperationName(), 1, context) {}
PatternMatchResult match(Instruction *op) const override {
PatternMatchResult match(Operation *op) const override {
auto alloc = op->cast<AllocOp>();
// Check to see if any dimensions operands are constants. If so, we can
@ -299,7 +299,7 @@ struct SimplifyAllocConst : public RewritePattern {
return matchFailure();
}
void rewrite(Instruction *op, PatternRewriter &rewriter) const override {
void rewrite(Operation *op, PatternRewriter &rewriter) const override {
auto allocOp = op->cast<AllocOp>();
auto memrefType = allocOp.getType();
@ -350,13 +350,13 @@ struct SimplifyAllocConst : public RewritePattern {
}
};
/// Fold alloc instructions with no uses. Alloc has side effects on the heap,
/// Fold alloc operations with no uses. Alloc has side effects on the heap,
/// but can still be deleted if it has zero uses.
struct SimplifyDeadAlloc : public RewritePattern {
SimplifyDeadAlloc(MLIRContext *context)
: RewritePattern(AllocOp::getOperationName(), 1, context) {}
PatternMatchResult matchAndRewrite(Instruction *op,
PatternMatchResult matchAndRewrite(Operation *op,
PatternRewriter &rewriter) const override {
// Check if the alloc'ed value has any uses.
auto alloc = op->cast<AllocOp>();
@ -487,7 +487,7 @@ struct SimplifyIndirectCallWithKnownCallee : public RewritePattern {
SimplifyIndirectCallWithKnownCallee(MLIRContext *context)
: RewritePattern(CallIndirectOp::getOperationName(), 1, context) {}
PatternMatchResult matchAndRewrite(Instruction *op,
PatternMatchResult matchAndRewrite(Operation *op,
PatternRewriter &rewriter) const override {
auto indirectCall = op->cast<CallIndirectOp>();
@ -798,7 +798,7 @@ struct SimplifyConstCondBranchPred : public RewritePattern {
SimplifyConstCondBranchPred(MLIRContext *context)
: RewritePattern(CondBranchOp::getOperationName(), 1, context) {}
PatternMatchResult matchAndRewrite(Instruction *op,
PatternMatchResult matchAndRewrite(Operation *op,
PatternRewriter &rewriter) const override {
auto condbr = op->cast<CondBranchOp>();
@ -1040,13 +1040,13 @@ void ConstantFloatOp::build(Builder *builder, OperationState *result,
ConstantOp::build(builder, result, type, builder->getFloatAttr(type, value));
}
bool ConstantFloatOp::isClassFor(Instruction *op) {
bool ConstantFloatOp::isClassFor(Operation *op) {
return ConstantOp::isClassFor(op) &&
op->getResult(0)->getType().isa<FloatType>();
}
/// ConstantIntOp only matches values whose result type is an IntegerType.
bool ConstantIntOp::isClassFor(Instruction *op) {
bool ConstantIntOp::isClassFor(Operation *op) {
return ConstantOp::isClassFor(op) &&
op->getResult(0)->getType().isa<IntegerType>();
}
@ -1068,7 +1068,7 @@ void ConstantIntOp::build(Builder *builder, OperationState *result,
}
/// ConstantIndexOp only matches values whose result type is Index.
bool ConstantIndexOp::isClassFor(Instruction *op) {
bool ConstantIndexOp::isClassFor(Operation *op) {
return ConstantOp::isClassFor(op) && op->getResult(0)->getType().isIndex();
}
@ -1083,19 +1083,19 @@ void ConstantIndexOp::build(Builder *builder, OperationState *result,
// DeallocOp
//===----------------------------------------------------------------------===//
namespace {
/// Fold Dealloc instructions that are deallocating an AllocOp that is only used
/// Fold Dealloc operations that are deallocating an AllocOp that is only used
/// by other Dealloc operations.
struct SimplifyDeadDealloc : public RewritePattern {
SimplifyDeadDealloc(MLIRContext *context)
: RewritePattern(DeallocOp::getOperationName(), 1, context) {}
PatternMatchResult matchAndRewrite(Instruction *op,
PatternMatchResult matchAndRewrite(Operation *op,
PatternRewriter &rewriter) const override {
auto dealloc = op->cast<DeallocOp>();
// Check that the memref operand's defining instruction is an AllocOp.
// Check that the memref operand's defining operation is an AllocOp.
Value *memref = dealloc.getMemRef();
Instruction *defOp = memref->getDefiningOp();
Operation *defOp = memref->getDefiningOp();
if (!defOp || !defOp->isa<AllocOp>())
return matchFailure();
@ -1987,7 +1987,7 @@ struct SimplifyXMinusX : public RewritePattern {
SimplifyXMinusX(MLIRContext *context)
: RewritePattern(SubIOp::getOperationName(), 1, context) {}
PatternMatchResult matchAndRewrite(Instruction *op,
PatternMatchResult matchAndRewrite(Operation *op,
PatternRewriter &rewriter) const override {
auto subi = op->cast<SubIOp>();
if (subi.getOperand(0) != subi.getOperand(1))

View File

@ -85,7 +85,7 @@ void VectorTransferReadOp::build(Builder *builder, OperationState *result,
result->addTypes(vectorType);
}
llvm::iterator_range<Instruction::operand_iterator>
llvm::iterator_range<Operation::operand_iterator>
VectorTransferReadOp::getIndices() {
auto begin = getOperation()->operand_begin() + Offsets::FirstIndexOffset;
auto end = begin + getMemRefType().getRank();
@ -286,7 +286,7 @@ void VectorTransferWriteOp::build(Builder *builder, OperationState *result,
builder->getAffineMapAttr(permutationMap));
}
llvm::iterator_range<Instruction::operand_iterator>
llvm::iterator_range<Operation::operand_iterator>
VectorTransferWriteOp::getIndices() {
auto begin = getOperation()->operand_begin() + Offsets::FirstIndexOffset;
auto end = begin + getMemRefType().getRank();

View File

@ -43,8 +43,8 @@ namespace {
// Implementation class for module translation. Holds a reference to the module
// being translated, and the mappings between the original and the translated
// functions, basic blocks and values. It is practically easier to hold these
// mappings in one class since the conversion of control flow instructions
// needs to look up block and function mappins.
// mappings in one class since the conversion of control flow operations
// needs to look up block and function mappings.
class ModuleTranslation {
public:
// Translate the given MLIR module expressed in MLIR LLVM IR dialect into an
@ -59,7 +59,7 @@ private:
bool convertOneFunction(Function &func);
void connectPHINodes(Function &func);
bool convertBlock(Block &bb, bool ignoreArguments);
bool convertInstruction(Instruction &inst, llvm::IRBuilder<> &builder);
bool convertOperation(Operation &op, llvm::IRBuilder<> &builder);
template <typename Range>
SmallVector<llvm::Value *, 8> lookupValues(Range &&values);
@ -191,12 +191,12 @@ SmallVector<llvm::Value *, 8> ModuleTranslation::lookupValues(Range &&values) {
return remapped;
}
// Given a single MLIR instruction, create the corresponding LLVM IR instruction
// Given a single MLIR operation, create the corresponding LLVM IR operation
// using the `builder`. LLVM IR Builder does not have a generic interface so
// this has to be a long chain of `if`s calling different functions with a
// different number of arguments.
bool ModuleTranslation::convertInstruction(Instruction &inst,
llvm::IRBuilder<> &builder) {
bool ModuleTranslation::convertOperation(Operation &opInst,
llvm::IRBuilder<> &builder) {
auto extractPosition = [](ArrayAttr attr) {
SmallVector<unsigned, 4> position;
position.reserve(attr.size());
@ -212,10 +212,10 @@ bool ModuleTranslation::convertInstruction(Instruction &inst,
// itself. Otherwise, this is an indirect call and the callee is the first
// operand, look it up as a normal value. Return the llvm::Value representing
// the function result, which may be of llvm::VoidTy type.
auto convertCall = [this, &builder](Instruction &inst) -> llvm::Value * {
auto operands = lookupValues(inst.getOperands());
auto convertCall = [this, &builder](Operation &op) -> llvm::Value * {
auto operands = lookupValues(op.getOperands());
ArrayRef<llvm::Value *> operandsRef(operands);
if (auto attr = inst.getAttrOfType<FunctionAttr>("callee")) {
if (auto attr = op.getAttrOfType<FunctionAttr>("callee")) {
return builder.CreateCall(functionMapping.lookup(attr.getValue()),
operandsRef);
} else {
@ -225,10 +225,10 @@ bool ModuleTranslation::convertInstruction(Instruction &inst,
// Emit calls. If the called function has a result, remap the corresponding
// value. Note that LLVM IR dialect CallOp has either 0 or 1 result.
if (auto op = inst.dyn_cast<LLVM::CallOp>()) {
llvm::Value *result = convertCall(inst);
if (inst.getNumResults() != 0) {
valueMapping[inst.getResult(0)] = result;
if (opInst.isa<LLVM::CallOp>()) {
llvm::Value *result = convertCall(opInst);
if (opInst.getNumResults() != 0) {
valueMapping[opInst.getResult(0)] = result;
return false;
}
// Check that LLVM call returns void for 0-result functions.
@ -237,19 +237,19 @@ bool ModuleTranslation::convertInstruction(Instruction &inst,
// Emit branches. We need to look up the remapped blocks and ignore the block
// arguments that were transformed into PHI nodes.
if (auto op = inst.dyn_cast<LLVM::BrOp>()) {
builder.CreateBr(blockMapping[op.getSuccessor(0)]);
if (auto brOp = opInst.dyn_cast<LLVM::BrOp>()) {
builder.CreateBr(blockMapping[brOp.getSuccessor(0)]);
return false;
}
if (auto op = inst.dyn_cast<LLVM::CondBrOp>()) {
builder.CreateCondBr(valueMapping.lookup(op.getOperand(0)),
blockMapping[op.getSuccessor(0)],
blockMapping[op.getSuccessor(1)]);
if (auto condbrOp = opInst.dyn_cast<LLVM::CondBrOp>()) {
builder.CreateCondBr(valueMapping.lookup(condbrOp.getOperand(0)),
blockMapping[condbrOp.getSuccessor(0)],
blockMapping[condbrOp.getSuccessor(1)]);
return false;
}
inst.emitError("unsupported or non-LLVM operation: " +
inst.getName().getStringRef());
opInst.emitError("unsupported or non-LLVM operation: " +
opInst.getName().getStringRef());
return true;
}
@ -259,7 +259,7 @@ bool ModuleTranslation::convertInstruction(Instruction &inst,
bool ModuleTranslation::convertBlock(Block &bb, bool ignoreArguments) {
llvm::IRBuilder<> builder(blockMapping[&bb]);
// Before traversing instructions, make block arguments available through
// Before traversing operations, make block arguments available through
// value remapping and PHI nodes, but do not add incoming edges for the PHI
// nodes just yet: those values may be defined by this or following blocks.
// This step is omitted if "ignoreArguments" is set. The arguments of the
@ -282,16 +282,16 @@ bool ModuleTranslation::convertBlock(Block &bb, bool ignoreArguments) {
}
}
// Traverse instructions.
for (auto &inst : bb) {
if (convertInstruction(inst, builder))
// Traverse operations.
for (auto &op : bb) {
if (convertOperation(op, builder))
return true;
}
return false;
}
// Get the SSA value passed to the current block from the terminator instruction
// Get the SSA value passed to the current block from the terminator operation
// of its predecessor.
static Value *getPHISourceValue(Block *current, Block *pred,
unsigned numArguments, unsigned index) {
@ -304,7 +304,7 @@ static Value *getPHISourceValue(Block *current, Block *pred,
// through the "true" or the "false" branch and take the relevant operands.
auto condBranchOp = terminator.dyn_cast<LLVM::CondBrOp>();
assert(condBranchOp &&
"only branch instructions can be terminators of a block that "
"only branch operations can be terminators of a block that "
"has successors");
assert((condBranchOp.getSuccessor(0) != condBranchOp.getSuccessor(1)) &&
"successors with arguments in LLVM conditional branches must be "

View File

@ -79,11 +79,11 @@ TEST_FUNC(blocks) {
// assigning a block won't work well with branches, update the body instead.
b1.set({r = arg1 + arg2, edsc::Branch(b2, {arg1, r})});
b2.set({edsc::Branch(b1, {arg3, arg4})});
auto instr = edsc::Branch(b2, {c1, c2});
auto op = edsc::Branch(b2, {c1, c2});
// Emit a branch to b2. This should also emit blocks b2 and b1 that appear as
// successors to the current block after the branch instruction is insterted.
edsc::MLIREmitter(&builder, f->getLoc()).emitStmt(instr);
// successors to the current block after the branch operation is insterted.
edsc::MLIREmitter(&builder, f->getLoc()).emitStmt(op);
// clang-format off
// CHECK-LABEL: @blocks
@ -101,7 +101,7 @@ TEST_FUNC(blocks) {
}
// Inject two EDSC-constructed blocks with arguments and a conditional branch
// instruction that transfers control to these blocks.
// operation that transfers control to these blocks.
TEST_FUNC(cond_branch) {
auto f =
makeFunction("cond_branch", {}, {IntegerType::get(1, &globalContext())});

View File

@ -374,19 +374,19 @@ TEST_FUNC(custom_ops) {
auto f = makeFunction("custom_ops", {}, {indexType, indexType});
ScopedContext scope(f.get());
CustomInstruction<ValueHandle> MY_CUSTOM_OP("my_custom_op");
CustomInstruction<InstructionHandle> MY_CUSTOM_INST_0("my_custom_inst_0");
CustomInstruction<InstructionHandle> MY_CUSTOM_INST_2("my_custom_inst_2");
CustomOperation<ValueHandle> MY_CUSTOM_OP("my_custom_op");
CustomOperation<OperationHandle> MY_CUSTOM_OP_0("my_custom_op_0");
CustomOperation<OperationHandle> MY_CUSTOM_OP_2("my_custom_op_2");
// clang-format off
ValueHandle vh(indexType), vh20(indexType), vh21(indexType);
InstructionHandle ih0, ih2;
OperationHandle ih0, ih2;
IndexHandle m, n, M(f->getArgument(0)), N(f->getArgument(1));
IndexHandle ten(index_t(10)), twenty(index_t(20));
LoopNestBuilder({&m, &n}, {M, N}, {M + ten, N + twenty}, {1, 1})({
vh = MY_CUSTOM_OP({m, m + n}, {indexType}, {}),
ih0 = MY_CUSTOM_INST_0({m, m + n}, {}),
ih2 = MY_CUSTOM_INST_2({m, m + n}, {indexType, indexType}),
ih0 = MY_CUSTOM_OP_0({m, m + n}, {}),
ih2 = MY_CUSTOM_OP_2({m, m + n}, {indexType, indexType}),
// These captures are verbose for now, can improve when used in practice.
vh20 = ValueHandle(ih2.getOperation()->getResult(0)),
vh21 = ValueHandle(ih2.getOperation()->getResult(1)),
@ -397,8 +397,8 @@ TEST_FUNC(custom_ops) {
// CHECK: affine.for %i0 {{.*}}
// CHECK: affine.for %i1 {{.*}}
// CHECK: {{.*}} = "my_custom_op"{{.*}} : (index, index) -> index
// CHECK: "my_custom_inst_0"{{.*}} : (index, index) -> ()
// CHECK: [[TWO:%[a-z0-9]+]] = "my_custom_inst_2"{{.*}} : (index, index) -> (index, index)
// CHECK: "my_custom_op_0"{{.*}} : (index, index) -> ()
// CHECK: [[TWO:%[a-z0-9]+]] = "my_custom_op_2"{{.*}} : (index, index) -> (index, index)
// CHECK: {{.*}} = "my_custom_op"([[TWO]]#0, [[TWO]]#1) : (index, index) -> index
// clang-format on
f->print(llvm::outs());

View File

@ -247,7 +247,7 @@ func @for_negative_stride() {
// -----
func @non_instruction() {
func @non_operation() {
asd // expected-error {{custom op 'asd' is unknown}}
}

View File

@ -2,7 +2,7 @@
// CHECK-LABEL: func @ops(%arg0: !llvm<"i32">, %arg1: !llvm<"float">)
func @ops(%arg0 : !llvm<"i32">, %arg1 : !llvm<"float">) {
// Integer artithmetics binary instructions.
// Integer artithmetics binary operations.
//
// CHECK-NEXT: %0 = "llvm.add"(%arg0, %arg0) : (!llvm<"i32">, !llvm<"i32">) -> !llvm<"i32">
// CHECK-NEXT: %1 = "llvm.sub"(%arg0, %arg0) : (!llvm<"i32">, !llvm<"i32">) -> !llvm<"i32">
@ -21,7 +21,7 @@ func @ops(%arg0 : !llvm<"i32">, %arg1 : !llvm<"float">) {
%6 = "llvm.srem"(%arg0, %arg0) : (!llvm<"i32">, !llvm<"i32">) -> !llvm<"i32">
%7 = "llvm.icmp"(%arg0, %arg0) {predicate: 1} : (!llvm<"i32">, !llvm<"i32">) -> !llvm<"i1">
// Floating point binary instructions.
// Floating point binary operations.
//
// CHECK-NEXT: %8 = "llvm.fadd"(%arg1, %arg1) : (!llvm<"float">, !llvm<"float">) -> !llvm<"float">
// CHECK-NEXT: %9 = "llvm.fsub"(%arg1, %arg1) : (!llvm<"float">, !llvm<"float">) -> !llvm<"float">
@ -34,7 +34,7 @@ func @ops(%arg0 : !llvm<"i32">, %arg1 : !llvm<"float">) {
%11 = "llvm.fdiv"(%arg1, %arg1) : (!llvm<"float">, !llvm<"float">) -> !llvm<"float">
%12 = "llvm.frem"(%arg1, %arg1) : (!llvm<"float">, !llvm<"float">) -> !llvm<"float">
// Memory-related instructions.
// Memory-related operations.
//
// CHECK-NEXT: %13 = "llvm.alloca"(%arg0) : (!llvm<"i32">) -> !llvm<"double*">
// CHECK-NEXT: %14 = "llvm.getelementptr"(%13, %arg0, %arg0) : (!llvm<"double*">, !llvm<"i32">, !llvm<"i32">) -> !llvm<"double*">
@ -47,7 +47,7 @@ func @ops(%arg0 : !llvm<"i32">, %arg1 : !llvm<"float">) {
"llvm.store"(%15, %13) : (!llvm<"double">, !llvm<"double*">) -> ()
%16 = "llvm.bitcast"(%13) : (!llvm<"double*">) -> !llvm<"i64*">
// Function call-related instructions.
// Function call-related operations.
//
// CHECK-NEXT: %17 = "llvm.call"(%arg0) {callee: @foo : (!llvm<"i32">) -> !llvm<"{ i32, double, i32 }">} : (!llvm<"i32">) -> !llvm<"{ i32, double, i32 }">
// CHECK-NEXT: %18 = "llvm.extractvalue"(%17) {position: [0]} : (!llvm<"{ i32, double, i32 }">) -> !llvm<"i32">
@ -57,7 +57,7 @@ func @ops(%arg0 : !llvm<"i32">, %arg1 : !llvm<"float">) {
%18 = "llvm.extractvalue"(%17) {position: [0]} : (!llvm<"{ i32, double, i32 }">) -> !llvm<"i32">
%19 = "llvm.insertvalue"(%17, %18) {position: [2]} : (!llvm<"{ i32, double, i32 }">, !llvm<"i32">) -> !llvm<"{ i32, double, i32 }">
// Terminator instructions and their successors.
// Terminator operations and their successors.
//
// CHECK: "llvm.br"()[^bb1] : () -> ()
"llvm.br"()[^bb1] : () -> ()
@ -72,7 +72,7 @@ func @ops(%arg0 : !llvm<"i32">, %arg1 : !llvm<"float">) {
%20 = "llvm.pseudo.undef"() : () -> !llvm<"{ i32, double, i32 }">
%21 = "llvm.pseudo.constant"() {value: 42} : () -> !llvm<"i47">
// Misc instructions.
// Misc operations.
// CHECK: %22 = "llvm.select"(%7, %0, %1) : (!llvm<"i1">, !llvm<"i32">, !llvm<"i32">) -> !llvm<"i32">
// CHECK-NEXT: "llvm.return"() : () -> ()
%22 = "llvm.select"(%7, %0, %1) : (!llvm<"i1">, !llvm<"i32">, !llvm<"i32">) -> !llvm<"i32">

View File

@ -403,7 +403,7 @@ func @dma_loop_straightline_interspersed() {
return
}
// There are three regions here - the 'load' preceding the loop, the loop
// itself, and the instructions appearing after the loop.
// itself, and the operations appearing after the loop.
// CHECK: %0 = alloc() : memref<256xf32>
// CHECK-NEXT: %1 = alloc() : memref<1xf32, 2>
// CHECK-NEXT: %2 = alloc() : memref<1xi32>

View File

@ -569,7 +569,7 @@ func @args_ret_affine_apply(index, index) -> (index, index) {
//===---------------------------------------------------------------------===//
// Test lowering of Euclidean (floor) division, ceil division and modulo
// operation used in affine expressions. In addition to testing the
// instruction-level output, check that the obtained results are correct by
// operation-level output, check that the obtained results are correct by
// applying constant folding transformation after affine lowering.
//===---------------------------------------------------------------------===//

View File

@ -254,7 +254,7 @@ func @live_out_use(%arg0: memref<512 x 32 x f32>) -> f32 {
memref<32 x 32 x f32, 2>, memref<1 x i32>
dma_wait %tag[%zero], %num_elt : memref<1 x i32>
}
// Use live out of 'affine.for' inst; no DMA pipelining will be done.
// Use live out of 'affine.for' op; no DMA pipelining will be done.
%v = load %Av[%zero, %zero] : memref<32 x 32 x f32, 2>
return %v : f32
// CHECK: %{{[0-9]+}} = load %{{[0-9]+}}[%c0, %c0] : memref<32x32xf32, 2>

View File

@ -23,8 +23,8 @@ def : Pat<(X_AddOp (X_AddOp:$res $lhs, $rhs), $rrhs), (Y_AddOp $lhs, U:$rhs, T_C
// CHECK: struct GeneratedConvert0 : public RewritePattern
// CHECK: RewritePattern("x.add", 1, context)
// CHECK: PatternMatchResult match(Instruction *
// CHECK: void rewrite(Instruction *op, std::unique_ptr<PatternState>
// CHECK: PatternMatchResult match(Operation *
// CHECK: void rewrite(Operation *op, std::unique_ptr<PatternState>
// CHECK: PatternRewriter &rewriter)
// CHECK: rewriter.create<Y::AddOp>(loc, op->getResult(0)->getType()
// CHECK: void populateWithGenerated

View File

@ -33,7 +33,7 @@ def NS_AOp : Op<"a_op", [NoSideEffect]> {
// CHECK: using Op::Op;
// CHECK: static StringRef getOperationName();
// CHECK: Value *a();
// CHECK: Instruction::operand_range b();
// CHECK: Operation::operand_range b();
// CHECK: Value *r();
// CHECK: APInt attr1();
// CHECK: Optional< APFloat > attr2();

View File

@ -24,8 +24,8 @@ def Z_AddOp : Op<"z.add"> {
def : Pat<(Y_AddOp $lhs, $rhs, $attr1), (Y_AddOp $lhs, $rhs, (T_Compose_Attr $attr1, T_Const_Attr:$attr2))>;
// CHECK: struct GeneratedConvert0 : public RewritePattern
// CHECK: RewritePattern("y.add", 1, context)
// CHECK: PatternMatchResult match(Instruction *
// CHECK: void rewrite(Instruction *op, std::unique_ptr<PatternState>
// CHECK: PatternMatchResult match(Operation *
// CHECK: void rewrite(Operation *op, std::unique_ptr<PatternState>
// CHECK-NEXT: PatternRewriter &rewriter)
// CHECK: auto vAddOp0 = rewriter.create<Y::AddOp>(loc, op->getResult(0)->getType(),
// CHECK-NEXT: s.lhs,
@ -37,8 +37,8 @@ def : Pat<(Y_AddOp $lhs, $rhs, $attr1), (Y_AddOp $lhs, $rhs, (T_Compose_Attr $at
def : Pat<(Z_AddOp $lhs, $rhs, $attr1, $attr2), (Y_AddOp $lhs, $rhs, (T_Compose_Attr $attr1, $attr2))>;
// CHECK: struct GeneratedConvert1 : public RewritePattern
// CHECK: RewritePattern("z.add", 1, context)
// CHECK: PatternMatchResult match(Instruction *
// CHECK: void rewrite(Instruction *op, std::unique_ptr<PatternState>
// CHECK: PatternMatchResult match(Operation *
// CHECK: void rewrite(Operation *op, std::unique_ptr<PatternState>
// CHECK-NEXT: PatternRewriter &rewriter)
// CHECK: auto vAddOp0 = rewriter.create<Y::AddOp>(loc, op->getResult(0)->getType(),
// CHECK-NEXT: s.lhs,

View File

@ -141,11 +141,11 @@ static bool emitOneBuilder(const Record &record, raw_ostream &os) {
bs << "op.getResult()->getType().cast<LLVM::LLVMType>()."
"getUnderlyingType()";
} else if (name == "_hasResult") {
bs << "inst.getNumResults() == 1";
bs << "opInst.getNumResults() == 1";
} else if (name == "_location") {
bs << "inst.getLoc()";
bs << "opInst.getLoc()";
} else if (name == "_numOperands") {
bs << "inst.getNumOperands()";
bs << "opInst.getNumOperands()";
} else if (name == "$") {
bs << '$';
} else {
@ -157,7 +157,7 @@ static bool emitOneBuilder(const Record &record, raw_ostream &os) {
}
// Output the check and the rewritten builder string.
os << "if (auto op = inst.dyn_cast<" << op.getQualCppClassName()
os << "if (auto op = opInst.dyn_cast<" << op.getQualCppClassName()
<< ">()) {\n";
os << bs.str() << builderStrRef << "\n";
os << " return false;\n";

View File

@ -487,7 +487,7 @@ void OpEmitter::genNamedOperandGetters() {
assert(getOperation()->getNumOperands() >= {0});
return {std::next(operand_begin(), {0}), operand_end()};
)";
auto &m = opClass.newMethod("Instruction::operand_range", operand.name);
auto &m = opClass.newMethod("Operation::operand_range", operand.name);
m.body() << formatv(code, i);
}
}

View File

@ -160,7 +160,7 @@ void PatternEmitter::emitOpMatch(DagNode tree, int depth) {
int indent = 4 + 2 * depth;
// Skip the operand matching at depth 0 as the pattern rewriter already does.
if (depth != 0) {
// Skip if there is no defining instruction (e.g., arguments to function).
// Skip if there is no defining operation (e.g., arguments to function).
os.indent(indent) << formatv("if (!op{0}) return matchFailure();\n", depth);
os.indent(indent) << formatv(
"if (!op{0}->isa<{1}>()) return matchFailure();\n", depth,
@ -271,7 +271,7 @@ void PatternEmitter::emitAttributeMatch(DagNode tree, int index, int depth,
void PatternEmitter::emitMatchMethod(DagNode tree) {
// Emit the heading.
os << R"(
PatternMatchResult match(Instruction *op0) const override {
PatternMatchResult match(Operation *op0) const override {
auto ctx = op0->getContext(); (void)ctx;
auto state = llvm::make_unique<MatchedState>();)"
<< "\n";
@ -286,7 +286,7 @@ void PatternEmitter::emitMatchMethod(DagNode tree) {
}
for (auto &res : pattern.getSourcePatternBoundResults())
os.indent(4) << formatv("mlir::Instruction* {0}; (void){0};\n",
os.indent(4) << formatv("mlir::Operation* {0}; (void){0};\n",
resultName(res.first()));
emitOpMatch(tree, 0);
@ -378,7 +378,7 @@ void PatternEmitter::emitRewriteMethod() {
PrintFatalError(loc, "must provide at least one result pattern");
os << R"(
void rewrite(Instruction *op, std::unique_ptr<PatternState> state,
void rewrite(Operation *op, std::unique_ptr<PatternState> state,
PatternRewriter &rewriter) const override {
auto& s = *static_cast<MatchedState *>(state.get());
auto loc = op->getLoc(); (void)loc;

View File

@ -41,7 +41,7 @@ TEST(OperandStorageTest, NonResizable) {
builder.getIntegerType(16));
Value *operand = useOp->getResult(0);
// Create a non-resizable instruction with one operand.
// Create a non-resizable operation with one operand.
Operation *user = createOp(&context, /*resizableOperands=*/false, operand,
builder.getIntegerType(16));
@ -56,7 +56,7 @@ TEST(OperandStorageTest, NonResizable) {
user->setOperands(llvm::None);
EXPECT_EQ(user->getNumOperands(), 0);
// Destroy the instructions.
// Destroy the operations.
user->destroy();
useOp->destroy();
}
@ -70,14 +70,14 @@ TEST(OperandStorageDeathTest, AddToNonResizable) {
builder.getIntegerType(16));
Value *operand = useOp->getResult(0);
// Create a non-resizable instruction with one operand.
// Create a non-resizable operation with one operand.
Operation *user = createOp(&context, /*resizableOperands=*/false, operand,
builder.getIntegerType(16));
// Sanity check the storage.
EXPECT_EQ(user->hasResizableOperandsList(), false);
// Adding operands to a non resizable instruction should result in a failure.
// Adding operands to a non resizable operation should result in a failure.
ASSERT_DEATH(user->setOperands({operand, operand}), "");
}
@ -90,7 +90,7 @@ TEST(OperandStorageTest, Resizable) {
builder.getIntegerType(16));
Value *operand = useOp->getResult(0);
// Create a resizable instruction with one operand.
// Create a resizable operation with one operand.
Operation *user = createOp(&context, /*resizableOperands=*/true, operand,
builder.getIntegerType(16));
@ -109,7 +109,7 @@ TEST(OperandStorageTest, Resizable) {
user->setOperands({operand, operand, operand});
EXPECT_EQ(user->getNumOperands(), 3);
// Destroy the instructions.
// Destroy the operations.
user->destroy();
useOp->destroy();
}