[mlir][NFC] Update GPU/SCF operations to use `hasVerifier` instead of `verifier`

The verifier field is deprecated, and slated for removal.

Differential Revision: https://reviews.llvm.org/D118827
This commit is contained in:
River Riddle 2022-02-02 10:24:22 -08:00
parent 4809da8eaf
commit 094ede6d20
4 changed files with 144 additions and 171 deletions

View File

@ -110,7 +110,6 @@ def GPU_SubgroupIdOp : GPU_Op<"subgroup_id", [NoSideEffect]>,
}];
let assemblyFormat = "attr-dict `:` type($result)";
let verifier = [{ return success(); }];
}
def GPU_NumSubgroupsOp : GPU_Op<"num_subgroups", [NoSideEffect]>,
@ -126,7 +125,6 @@ def GPU_NumSubgroupsOp : GPU_Op<"num_subgroups", [NoSideEffect]>,
}];
let assemblyFormat = "attr-dict `:` type($result)";
let verifier = [{ return success(); }];
}
def GPU_SubgroupSizeOp : GPU_Op<"subgroup_size", [NoSideEffect]>,
@ -142,7 +140,6 @@ def GPU_SubgroupSizeOp : GPU_Op<"subgroup_size", [NoSideEffect]>,
}];
let assemblyFormat = "attr-dict `:` type($result)";
let verifier = [{ return success(); }];
}
def GPU_GPUFuncOp : GPU_Op<"func", [
@ -298,7 +295,6 @@ def GPU_GPUFuncOp : GPU_Op<"func", [
LogicalResult verifyBody();
}];
// let verifier = [{ return ::verifFuncOpy(*this); }];
let printer = [{ printGPUFuncOp(p, *this); }];
let parser = [{ return parseGPUFuncOp(parser, result); }];
}
@ -434,7 +430,6 @@ def GPU_LaunchFuncOp : GPU_Op<"launch_func",
static StringRef getKernelAttrName() { return "kernel"; }
}];
let verifier = [{ return ::verify(*this); }];
let assemblyFormat = [{
custom<AsyncDependencies>(type($asyncToken), $asyncDependencies)
$kernel
@ -443,6 +438,7 @@ def GPU_LaunchFuncOp : GPU_Op<"launch_func",
(`dynamic_shared_memory_size` $dynamicSharedMemorySize^)?
custom<LaunchFuncOperands>($operands, type($operands)) attr-dict
}];
let hasVerifier = 1;
}
def GPU_LaunchOp : GPU_Op<"launch">,
@ -562,8 +558,8 @@ def GPU_LaunchOp : GPU_Op<"launch">,
let parser = [{ return parseLaunchOp(parser, result); }];
let printer = [{ printLaunchOp(p, *this); }];
let verifier = [{ return ::verify(*this); }];
let hasCanonicalizer = 1;
let hasVerifier = 1;
}
def GPU_PrintfOp : GPU_Op<"printf", [MemoryEffects<[MemWrite]>]>,
@ -595,7 +591,7 @@ def GPU_ReturnOp : GPU_Op<"return", [HasParent<"GPUFuncOp">, NoSideEffect,
let builders = [OpBuilder<(ins), [{ // empty}]>];
let assemblyFormat = "attr-dict ($operands^ `:` type($operands))?";
let verifier = [{ return ::verify(*this); }];
let hasVerifier = 1;
}
def GPU_TerminatorOp : GPU_Op<"terminator", [HasParent<"LaunchOp">,
@ -682,9 +678,9 @@ def GPU_AllReduceOp : GPU_Op<"all_reduce",
in convergence.
}];
let regions = (region AnyRegion:$body);
let verifier = [{ return ::verifyAllReduce(*this); }];
let assemblyFormat = [{ custom<AllReduceOperation>($op) $value $body attr-dict
`:` functional-type(operands, results) }];
let hasVerifier = 1;
}
def GPU_ShuffleOpXor : I32EnumAttrCase<"XOR", 0, "xor">;
@ -822,7 +818,6 @@ def GPU_HostRegisterOp : GPU_Op<"host_register">,
}];
let assemblyFormat = "$value attr-dict `:` type($value)";
let verifier = [{ return success(); }];
}
def GPU_WaitOp : GPU_Op<"wait", [GPU_AsyncOpInterface]> {
@ -971,8 +966,8 @@ def GPU_MemcpyOp : GPU_Op<"memcpy", [GPU_AsyncOpInterface]> {
custom<AsyncDependencies>(type($asyncToken), $asyncDependencies)
$dst`,` $src `:` type($dst)`,` type($src) attr-dict
}];
let verifier = [{ return ::verify(*this); }];
let hasFolder = 1;
let hasVerifier = 1;
}
def GPU_MemsetOp : GPU_Op<"memset",
@ -1006,8 +1001,6 @@ def GPU_MemsetOp : GPU_Op<"memset",
custom<AsyncDependencies>(type($asyncToken), $asyncDependencies)
$dst`,` $value `:` type($dst)`,` type($value) attr-dict
}];
// MemsetOp is fully verified by traits.
let verifier = [{ return success(); }];
let hasFolder = 1;
}
@ -1048,8 +1041,7 @@ def GPU_SubgroupMmaLoadMatrixOp : GPU_Op<"subgroup_mma_load_matrix",
let assemblyFormat = [{
$srcMemref`[`$indices`]` attr-dict `:` type($srcMemref) `->` type($res)
}];
let verifier = [{ return ::verify(*this); }];
let hasVerifier = 1;
}
def GPU_SubgroupMmaStoreMatrixOp : GPU_Op<"subgroup_mma_store_matrix",
@ -1086,8 +1078,7 @@ def GPU_SubgroupMmaStoreMatrixOp : GPU_Op<"subgroup_mma_store_matrix",
let assemblyFormat = [{
$src`,` $dstMemref`[`$indices`]` attr-dict `:` type($src)`,` type($dstMemref)
}];
let verifier = [{ return ::verify(*this); }];
let hasVerifier = 1;
}
def GPU_SubgroupMmaComputeOp : GPU_Op<"subgroup_mma_compute",
@ -1125,8 +1116,7 @@ def GPU_SubgroupMmaComputeOp : GPU_Op<"subgroup_mma_compute",
let assemblyFormat = [{
$opA`,` $opB`,` $opC attr-dict `:` type($opA)`,` type($opB) `->` type($res)
}];
let verifier = [{ return ::verify(*this); }];
let hasVerifier = 1;
}
def GPU_SubgroupMmaConstantMatrixOp : GPU_Op<"subgroup_mma_constant_matrix",

View File

@ -29,12 +29,10 @@ class SCF_Op<string mnemonic, list<Trait> traits = []> :
Op<SCF_Dialect, mnemonic, traits> {
// For every standard op, there needs to be a:
// * void print(OpAsmPrinter &p, ${C++ class of Op} op)
// * LogicalResult verify(${C++ class of Op} op)
// * ParseResult parse${C++ class of Op}(OpAsmParser &parser,
// OperationState &result)
// functions.
let printer = [{ return ::print(p, *this); }];
let verifier = [{ return ::verify(*this); }];
let parser = [{ return ::parse$cppClass(parser, result); }];
}
@ -56,9 +54,6 @@ def ConditionOp : SCF_Op<"condition", [
let assemblyFormat =
[{ `(` $condition `)` attr-dict ($args^ `:` type($args))? }];
// Override the default verifier, everything is checked by traits.
let verifier = ?;
}
//===----------------------------------------------------------------------===//
@ -114,6 +109,7 @@ def ExecuteRegionOp : SCF_Op<"execute_region"> {
let hasCanonicalizer = 1;
let hasFolder = 0;
let hasVerifier = 1;
}
def ForOp : SCF_Op<"for",
@ -312,6 +308,7 @@ def ForOp : SCF_Op<"for",
}];
let hasCanonicalizer = 1;
let hasVerifier = 1;
}
def IfOp : SCF_Op<"if",
@ -404,6 +401,7 @@ def IfOp : SCF_Op<"if",
}];
let hasFolder = 1;
let hasCanonicalizer = 1;
let hasVerifier = 1;
}
def ParallelOp : SCF_Op<"parallel",
@ -485,6 +483,7 @@ def ParallelOp : SCF_Op<"parallel",
}];
let hasCanonicalizer = 1;
let hasVerifier = 1;
}
def ReduceOp : SCF_Op<"reduce", [HasParent<"ParallelOp">]> {
@ -533,6 +532,7 @@ def ReduceOp : SCF_Op<"reduce", [HasParent<"ParallelOp">]> {
let arguments = (ins AnyType:$operand);
let regions = (region SizedRegion<1>:$reductionOperator);
let hasVerifier = 1;
}
def ReduceReturnOp :
@ -551,6 +551,7 @@ def ReduceReturnOp :
let arguments = (ins AnyType:$result);
let assemblyFormat = "$result attr-dict `:` type($result)";
let hasVerifier = 1;
}
def WhileOp : SCF_Op<"while",
@ -683,6 +684,7 @@ def WhileOp : SCF_Op<"while",
}];
let hasCanonicalizer = 1;
let hasVerifier = 1;
}
def YieldOp : SCF_Op<"yield", [NoSideEffect, ReturnLike, Terminator,
@ -706,10 +708,6 @@ def YieldOp : SCF_Op<"yield", [NoSideEffect, ReturnLike, Terminator,
let assemblyFormat =
[{ attr-dict ($results^ `:` type($results))? }];
// Override default verifier (defined in SCF_Op), no custom verification
// needed.
let verifier = ?;
}
#endif // MLIR_DIALECT_SCF_SCFOPS

View File

@ -270,46 +270,37 @@ LogicalResult GPUDialect::verifyOperationAttribute(Operation *op,
return walkResult.wasInterrupted() ? failure() : success();
}
template <typename T>
static LogicalResult verifyIndexOp(T op) {
auto dimension = op.dimension();
if (dimension != "x" && dimension != "y" && dimension != "z")
return op.emitError("dimension \"") << dimension << "\" is invalid";
return success();
}
static LogicalResult verifyAllReduce(gpu::AllReduceOp allReduce) {
if (allReduce.body().empty() != allReduce.op().hasValue())
return allReduce.emitError(
"expected either an op attribute or a non-empty body");
if (!allReduce.body().empty()) {
if (allReduce.body().getNumArguments() != 2)
return allReduce.emitError("expected two region arguments");
for (auto argument : allReduce.body().getArguments()) {
if (argument.getType() != allReduce.getType())
return allReduce.emitError("incorrect region argument type");
LogicalResult gpu::AllReduceOp::verify() {
if (body().empty() != op().hasValue())
return emitError("expected either an op attribute or a non-empty body");
if (!body().empty()) {
if (body().getNumArguments() != 2)
return emitError("expected two region arguments");
for (auto argument : body().getArguments()) {
if (argument.getType() != getType())
return emitError("incorrect region argument type");
}
unsigned yieldCount = 0;
for (Block &block : allReduce.body()) {
for (Block &block : body()) {
if (auto yield = dyn_cast<gpu::YieldOp>(block.getTerminator())) {
if (yield.getNumOperands() != 1)
return allReduce.emitError("expected one gpu.yield operand");
if (yield.getOperand(0).getType() != allReduce.getType())
return allReduce.emitError("incorrect gpu.yield type");
return emitError("expected one gpu.yield operand");
if (yield.getOperand(0).getType() != getType())
return emitError("incorrect gpu.yield type");
++yieldCount;
}
}
if (yieldCount == 0)
return allReduce.emitError("expected gpu.yield op in region");
return emitError("expected gpu.yield op in region");
} else {
gpu::AllReduceOperation opName = *allReduce.op();
gpu::AllReduceOperation opName = *op();
if ((opName == gpu::AllReduceOperation::AND ||
opName == gpu::AllReduceOperation::OR ||
opName == gpu::AllReduceOperation::XOR) &&
!allReduce.getType().isa<IntegerType>()) {
return allReduce.emitError()
<< '`' << gpu::stringifyAllReduceOperation(opName) << '`'
<< " accumulator is only compatible with Integer type";
!getType().isa<IntegerType>()) {
return emitError()
<< '`' << gpu::stringifyAllReduceOperation(opName)
<< "` accumulator is only compatible with Integer type";
}
}
return success();
@ -411,20 +402,20 @@ KernelDim3 LaunchOp::getBlockSizeOperandValues() {
return KernelDim3{getOperand(3), getOperand(4), getOperand(5)};
}
static LogicalResult verify(LaunchOp op) {
LogicalResult LaunchOp::verify() {
// Kernel launch takes kNumConfigOperands leading operands for grid/block
// sizes and transforms them into kNumConfigRegionAttributes region arguments
// for block/thread identifiers and grid/block sizes.
if (!op.body().empty()) {
if (op.body().getNumArguments() !=
LaunchOp::kNumConfigOperands + op.getNumOperands() -
(op.dynamicSharedMemorySize() ? 1 : 0))
return op.emitOpError("unexpected number of region arguments");
if (!body().empty()) {
if (body().getNumArguments() != LaunchOp::kNumConfigOperands +
getNumOperands() -
(dynamicSharedMemorySize() ? 1 : 0))
return emitOpError("unexpected number of region arguments");
}
// Block terminators without successors are expected to exit the kernel region
// and must be `gpu.terminator`.
for (Block &block : op.body()) {
for (Block &block : body()) {
if (block.empty())
continue;
if (block.back().getNumSuccessors() != 0)
@ -434,7 +425,7 @@ static LogicalResult verify(LaunchOp op) {
.emitError()
.append("expected '", gpu::TerminatorOp::getOperationName(),
"' or a terminator with successors")
.attachNote(op.getLoc())
.attachNote(getLoc())
.append("in '", LaunchOp::getOperationName(), "' body region");
}
}
@ -650,21 +641,21 @@ KernelDim3 LaunchFuncOp::getBlockSizeOperandValues() {
return KernelDim3{operands[3], operands[4], operands[5]};
}
static LogicalResult verify(LaunchFuncOp op) {
auto module = op->getParentOfType<ModuleOp>();
LogicalResult LaunchFuncOp::verify() {
auto module = (*this)->getParentOfType<ModuleOp>();
if (!module)
return op.emitOpError("expected to belong to a module");
return emitOpError("expected to belong to a module");
if (!module->getAttrOfType<UnitAttr>(
GPUDialect::getContainerModuleAttrName()))
return op.emitOpError(
"expected the closest surrounding module to have the '" +
GPUDialect::getContainerModuleAttrName() + "' attribute");
return emitOpError("expected the closest surrounding module to have the '" +
GPUDialect::getContainerModuleAttrName() +
"' attribute");
auto kernelAttr = op->getAttrOfType<SymbolRefAttr>(op.getKernelAttrName());
auto kernelAttr = (*this)->getAttrOfType<SymbolRefAttr>(getKernelAttrName());
if (!kernelAttr)
return op.emitOpError("symbol reference attribute '" +
op.getKernelAttrName() + "' must be specified");
return emitOpError("symbol reference attribute '" + getKernelAttrName() +
"' must be specified");
return success();
}
@ -945,25 +936,25 @@ LogicalResult GPUFuncOp::verifyBody() {
// ReturnOp
//===----------------------------------------------------------------------===//
static LogicalResult verify(gpu::ReturnOp returnOp) {
GPUFuncOp function = returnOp->getParentOfType<GPUFuncOp>();
LogicalResult gpu::ReturnOp::verify() {
GPUFuncOp function = (*this)->getParentOfType<GPUFuncOp>();
FunctionType funType = function.getType();
if (funType.getNumResults() != returnOp.operands().size())
return returnOp.emitOpError()
if (funType.getNumResults() != operands().size())
return emitOpError()
.append("expected ", funType.getNumResults(), " result operands")
.attachNote(function.getLoc())
.append("return type declared here");
for (const auto &pair : llvm::enumerate(
llvm::zip(function.getType().getResults(), returnOp.operands()))) {
llvm::zip(function.getType().getResults(), operands()))) {
Type type;
Value operand;
std::tie(type, operand) = pair.value();
if (type != operand.getType())
return returnOp.emitOpError() << "unexpected type `" << operand.getType()
<< "' for operand #" << pair.index();
return emitOpError() << "unexpected type `" << operand.getType()
<< "' for operand #" << pair.index();
}
return success();
}
@ -1014,15 +1005,15 @@ static void print(OpAsmPrinter &p, GPUModuleOp op) {
// GPUMemcpyOp
//===----------------------------------------------------------------------===//
static LogicalResult verify(MemcpyOp op) {
auto srcType = op.src().getType();
auto dstType = op.dst().getType();
LogicalResult MemcpyOp::verify() {
auto srcType = src().getType();
auto dstType = dst().getType();
if (getElementTypeOrSelf(srcType) != getElementTypeOrSelf(dstType))
return op.emitOpError("arguments have incompatible element type");
return emitOpError("arguments have incompatible element type");
if (failed(verifyCompatibleShape(srcType, dstType)))
return op.emitOpError("arguments have incompatible shape");
return emitOpError("arguments have incompatible shape");
return success();
}
@ -1056,26 +1047,26 @@ static void printAsyncDependencies(OpAsmPrinter &printer, Operation *op,
// GPU_SubgroupMmaLoadMatrixOp
//===----------------------------------------------------------------------===//
static LogicalResult verify(SubgroupMmaLoadMatrixOp op) {
auto srcType = op.srcMemref().getType();
auto resType = op.res().getType();
LogicalResult SubgroupMmaLoadMatrixOp::verify() {
auto srcType = srcMemref().getType();
auto resType = res().getType();
auto resMatrixType = resType.cast<gpu::MMAMatrixType>();
auto operand = resMatrixType.getOperand();
auto srcMemrefType = srcType.cast<MemRefType>();
auto srcMemSpace = srcMemrefType.getMemorySpaceAsInt();
if (!srcMemrefType.getLayout().isIdentity())
return op.emitError("expected identity layout map for source memref");
return emitError("expected identity layout map for source memref");
if (srcMemSpace != kGenericMemorySpace && srcMemSpace != kSharedMemorySpace &&
srcMemSpace != kGlobalMemorySpace)
return op.emitError(
return emitError(
"source memorySpace kGenericMemorySpace, kSharedMemorySpace or "
"kGlobalMemorySpace only allowed");
if (!operand.equals("AOp") && !operand.equals("BOp") &&
!operand.equals("COp"))
return op.emitError("only AOp, BOp and COp can be loaded");
return emitError("only AOp, BOp and COp can be loaded");
return success();
}
@ -1084,23 +1075,22 @@ static LogicalResult verify(SubgroupMmaLoadMatrixOp op) {
// GPU_SubgroupMmaStoreMatrixOp
//===----------------------------------------------------------------------===//
static LogicalResult verify(SubgroupMmaStoreMatrixOp op) {
auto srcType = op.src().getType();
auto dstType = op.dstMemref().getType();
LogicalResult SubgroupMmaStoreMatrixOp::verify() {
auto srcType = src().getType();
auto dstType = dstMemref().getType();
auto srcMatrixType = srcType.cast<gpu::MMAMatrixType>();
auto dstMemrefType = dstType.cast<MemRefType>();
auto dstMemSpace = dstMemrefType.getMemorySpaceAsInt();
if (!dstMemrefType.getLayout().isIdentity())
return op.emitError("expected identity layout map for destination memref");
return emitError("expected identity layout map for destination memref");
if (dstMemSpace != kGenericMemorySpace && dstMemSpace != kSharedMemorySpace &&
dstMemSpace != kGlobalMemorySpace)
return op.emitError(
"destination memorySpace of kGenericMemorySpace, "
"kGlobalMemorySpace or kSharedMemorySpace only allowed");
return emitError("destination memorySpace of kGenericMemorySpace, "
"kGlobalMemorySpace or kSharedMemorySpace only allowed");
if (!srcMatrixType.getOperand().equals("COp"))
return op.emitError(
return emitError(
"expected the operand matrix being stored to have 'COp' operand type");
return success();
@ -1110,21 +1100,17 @@ static LogicalResult verify(SubgroupMmaStoreMatrixOp op) {
// GPU_SubgroupMmaComputeOp
//===----------------------------------------------------------------------===//
static LogicalResult verify(SubgroupMmaComputeOp op) {
LogicalResult SubgroupMmaComputeOp::verify() {
enum OperandMap { A, B, C };
SmallVector<MMAMatrixType, 3> opTypes;
auto populateOpInfo = [&opTypes, &op]() {
opTypes.push_back(op.opA().getType().cast<MMAMatrixType>());
opTypes.push_back(op.opB().getType().cast<MMAMatrixType>());
opTypes.push_back(op.opC().getType().cast<MMAMatrixType>());
};
populateOpInfo();
opTypes.push_back(opA().getType().cast<MMAMatrixType>());
opTypes.push_back(opB().getType().cast<MMAMatrixType>());
opTypes.push_back(opC().getType().cast<MMAMatrixType>());
if (!opTypes[A].getOperand().equals("AOp") ||
!opTypes[B].getOperand().equals("BOp") ||
!opTypes[C].getOperand().equals("COp"))
return op.emitError("operands must be in the order AOp, BOp, COp");
return emitError("operands must be in the order AOp, BOp, COp");
ArrayRef<int64_t> aShape, bShape, cShape;
aShape = opTypes[A].getShape();
@ -1133,7 +1119,7 @@ static LogicalResult verify(SubgroupMmaComputeOp op) {
if (aShape[1] != bShape[0] || aShape[0] != cShape[0] ||
bShape[1] != cShape[1])
return op.emitError("operand shapes do not satisfy matmul constraints");
return emitError("operand shapes do not satisfy matmul constraints");
return success();
}

View File

@ -125,11 +125,11 @@ static void print(OpAsmPrinter &p, ExecuteRegionOp op) {
p.printOptionalAttrDict(op->getAttrs());
}
static LogicalResult verify(ExecuteRegionOp op) {
if (op.getRegion().empty())
return op.emitOpError("region needs to have at least one block");
if (op.getRegion().front().getNumArguments() > 0)
return op.emitOpError("region cannot have any arguments");
LogicalResult ExecuteRegionOp::verify() {
if (getRegion().empty())
return emitOpError("region needs to have at least one block");
if (getRegion().front().getNumArguments() > 0)
return emitOpError("region cannot have any arguments");
return success();
}
@ -276,47 +276,47 @@ void ForOp::build(OpBuilder &builder, OperationState &result, Value lb,
}
}
static LogicalResult verify(ForOp op) {
if (auto cst = op.getStep().getDefiningOp<arith::ConstantIndexOp>())
LogicalResult ForOp::verify() {
if (auto cst = getStep().getDefiningOp<arith::ConstantIndexOp>())
if (cst.value() <= 0)
return op.emitOpError("constant step operand must be positive");
return emitOpError("constant step operand must be positive");
// Check that the body defines as single block argument for the induction
// variable.
auto *body = op.getBody();
auto *body = getBody();
if (!body->getArgument(0).getType().isIndex())
return op.emitOpError(
return emitOpError(
"expected body first argument to be an index argument for "
"the induction variable");
auto opNumResults = op.getNumResults();
auto opNumResults = getNumResults();
if (opNumResults == 0)
return success();
// If ForOp defines values, check that the number and types of
// the defined values match ForOp initial iter operands and backedge
// basic block arguments.
if (op.getNumIterOperands() != opNumResults)
return op.emitOpError(
if (getNumIterOperands() != opNumResults)
return emitOpError(
"mismatch in number of loop-carried values and defined values");
if (op.getNumRegionIterArgs() != opNumResults)
return op.emitOpError(
if (getNumRegionIterArgs() != opNumResults)
return emitOpError(
"mismatch in number of basic block args and defined values");
auto iterOperands = op.getIterOperands();
auto iterArgs = op.getRegionIterArgs();
auto opResults = op.getResults();
auto iterOperands = getIterOperands();
auto iterArgs = getRegionIterArgs();
auto opResults = getResults();
unsigned i = 0;
for (auto e : llvm::zip(iterOperands, iterArgs, opResults)) {
if (std::get<0>(e).getType() != std::get<2>(e).getType())
return op.emitOpError() << "types mismatch between " << i
<< "th iter operand and defined value";
return emitOpError() << "types mismatch between " << i
<< "th iter operand and defined value";
if (std::get<1>(e).getType() != std::get<2>(e).getType())
return op.emitOpError() << "types mismatch between " << i
<< "th iter region arg and defined value";
return emitOpError() << "types mismatch between " << i
<< "th iter region arg and defined value";
i++;
}
return RegionBranchOpInterface::verifyTypes(op);
return RegionBranchOpInterface::verifyTypes(*this);
}
/// Prints the initialization list in the form of
@ -1062,11 +1062,11 @@ void IfOp::build(OpBuilder &builder, OperationState &result, Value cond,
build(builder, result, TypeRange(), cond, thenBuilder, elseBuilder);
}
static LogicalResult verify(IfOp op) {
if (op.getNumResults() != 0 && op.getElseRegion().empty())
return op.emitOpError("must have an else block if defining values");
LogicalResult IfOp::verify() {
if (getNumResults() != 0 && getElseRegion().empty())
return emitOpError("must have an else block if defining values");
return RegionBranchOpInterface::verifyTypes(op);
return RegionBranchOpInterface::verifyTypes(*this);
}
static ParseResult parseIfOp(OpAsmParser &parser, OperationState &result) {
@ -1723,32 +1723,31 @@ void ParallelOp::build(
wrapper);
}
static LogicalResult verify(ParallelOp op) {
LogicalResult ParallelOp::verify() {
// Check that there is at least one value in lowerBound, upperBound and step.
// It is sufficient to test only step, because it is ensured already that the
// number of elements in lowerBound, upperBound and step are the same.
Operation::operand_range stepValues = op.getStep();
Operation::operand_range stepValues = getStep();
if (stepValues.empty())
return op.emitOpError(
return emitOpError(
"needs at least one tuple element for lowerBound, upperBound and step");
// Check whether all constant step values are positive.
for (Value stepValue : stepValues)
if (auto cst = stepValue.getDefiningOp<arith::ConstantIndexOp>())
if (cst.value() <= 0)
return op.emitOpError("constant step operand must be positive");
return emitOpError("constant step operand must be positive");
// Check that the body defines the same number of block arguments as the
// number of tuple elements in step.
Block *body = op.getBody();
Block *body = getBody();
if (body->getNumArguments() != stepValues.size())
return op.emitOpError()
<< "expects the same number of induction variables: "
<< body->getNumArguments()
<< " as bound and step values: " << stepValues.size();
return emitOpError() << "expects the same number of induction variables: "
<< body->getNumArguments()
<< " as bound and step values: " << stepValues.size();
for (auto arg : body->getArguments())
if (!arg.getType().isIndex())
return op.emitOpError(
return emitOpError(
"expects arguments for the induction variable to be of index type");
// Check that the yield has no results
@ -1759,20 +1758,20 @@ static LogicalResult verify(ParallelOp op) {
// Check that the number of results is the same as the number of ReduceOps.
SmallVector<ReduceOp, 4> reductions(body->getOps<ReduceOp>());
auto resultsSize = op.getResults().size();
auto resultsSize = getResults().size();
auto reductionsSize = reductions.size();
auto initValsSize = op.getInitVals().size();
auto initValsSize = getInitVals().size();
if (resultsSize != reductionsSize)
return op.emitOpError()
<< "expects number of results: " << resultsSize
<< " to be the same as number of reductions: " << reductionsSize;
return emitOpError() << "expects number of results: " << resultsSize
<< " to be the same as number of reductions: "
<< reductionsSize;
if (resultsSize != initValsSize)
return op.emitOpError()
<< "expects number of results: " << resultsSize
<< " to be the same as number of initial values: " << initValsSize;
return emitOpError() << "expects number of results: " << resultsSize
<< " to be the same as number of initial values: "
<< initValsSize;
// Check that the types of the results and reductions are the same.
for (auto resultAndReduce : llvm::zip(op.getResults(), reductions)) {
for (auto resultAndReduce : llvm::zip(getResults(), reductions)) {
auto resultType = std::get<0>(resultAndReduce).getType();
auto reduceOp = std::get<1>(resultAndReduce);
auto reduceType = reduceOp.getOperand().getType();
@ -2075,23 +2074,23 @@ void ReduceOp::build(
body->getArgument(1));
}
static LogicalResult verify(ReduceOp op) {
LogicalResult ReduceOp::verify() {
// The region of a ReduceOp has two arguments of the same type as its operand.
auto type = op.getOperand().getType();
Block &block = op.getReductionOperator().front();
auto type = getOperand().getType();
Block &block = getReductionOperator().front();
if (block.empty())
return op.emitOpError("the block inside reduce should not be empty");
return emitOpError("the block inside reduce should not be empty");
if (block.getNumArguments() != 2 ||
llvm::any_of(block.getArguments(), [&](const BlockArgument &arg) {
return arg.getType() != type;
}))
return op.emitOpError()
<< "expects two arguments to reduce block of type " << type;
return emitOpError() << "expects two arguments to reduce block of type "
<< type;
// Check that the block is terminated by a ReduceReturnOp.
if (!isa<ReduceReturnOp>(block.getTerminator()))
return op.emitOpError("the block inside reduce should be terminated with a "
"'scf.reduce.return' op");
return emitOpError("the block inside reduce should be terminated with a "
"'scf.reduce.return' op");
return success();
}
@ -2127,14 +2126,14 @@ static void print(OpAsmPrinter &p, ReduceOp op) {
// ReduceReturnOp
//===----------------------------------------------------------------------===//
static LogicalResult verify(ReduceReturnOp op) {
LogicalResult ReduceReturnOp::verify() {
// The type of the return value should be the same type as the type of the
// operand of the enclosing ReduceOp.
auto reduceOp = cast<ReduceOp>(op->getParentOp());
auto reduceOp = cast<ReduceOp>((*this)->getParentOp());
Type reduceType = reduceOp.getOperand().getType();
if (reduceType != op.getResult().getType())
return op.emitOpError() << "needs to have type " << reduceType
<< " (the type of the enclosing ReduceOp)";
if (reduceType != getResult().getType())
return emitOpError() << "needs to have type " << reduceType
<< " (the type of the enclosing ReduceOp)";
return success();
}
@ -2278,18 +2277,18 @@ static TerminatorTy verifyAndGetTerminator(scf::WhileOp op, Region &region,
return nullptr;
}
static LogicalResult verify(scf::WhileOp op) {
if (failed(RegionBranchOpInterface::verifyTypes(op)))
LogicalResult scf::WhileOp::verify() {
if (failed(RegionBranchOpInterface::verifyTypes(*this)))
return failure();
auto beforeTerminator = verifyAndGetTerminator<scf::ConditionOp>(
op, op.getBefore(),
*this, getBefore(),
"expects the 'before' region to terminate with 'scf.condition'");
if (!beforeTerminator)
return failure();
auto afterTerminator = verifyAndGetTerminator<scf::YieldOp>(
op, op.getAfter(),
*this, getAfter(),
"expects the 'after' region to terminate with 'scf.yield'");
return success(afterTerminator != nullptr);
}