[mlir] NFC: Rename LoopOps dialect to SCF (Structured Control Flow)

This dialect contains various structured control flow operaitons, not only
loops, reflect this in the name. Drop the Ops suffix for consistency with other
dialects.

Note that this only moves the files and changes the C++ namespace from 'loop'
to 'scf'. The visible IR prefix remains the same and will be updated
separately. The conversions will also be updated separately.

Differential Revision: https://reviews.llvm.org/D79578
This commit is contained in:
Alex Zinenko 2020-05-11 15:00:48 +02:00
parent 3bc9525731
commit c25b20c0f6
69 changed files with 277 additions and 280 deletions

View File

@ -21,7 +21,7 @@
#include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/LoopOps/LoopOps.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/DialectConversion.h"
@ -68,7 +68,7 @@ public:
auto upperBound = rewriter.create<ConstantIndexOp>(loc, memRefShape[i]);
auto step = rewriter.create<ConstantIndexOp>(loc, 1);
auto loop =
rewriter.create<loop::ForOp>(loc, lowerBound, upperBound, step);
rewriter.create<scf::ForOp>(loc, lowerBound, upperBound, step);
loop.getBody()->clear();
loopIvs.push_back(loop.getInductionVar());
@ -79,7 +79,7 @@ public:
if (i != e - 1)
rewriter.create<CallOp>(loc, printfRef, rewriter.getIntegerType(32),
newLineCst);
rewriter.create<loop::YieldOp>(loc);
rewriter.create<scf::YieldOp>(loc);
rewriter.setInsertionPointToStart(loop.getBody());
}

View File

@ -21,7 +21,7 @@
#include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/LoopOps/LoopOps.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/DialectConversion.h"
@ -68,7 +68,7 @@ public:
auto upperBound = rewriter.create<ConstantIndexOp>(loc, memRefShape[i]);
auto step = rewriter.create<ConstantIndexOp>(loc, 1);
auto loop =
rewriter.create<loop::ForOp>(loc, lowerBound, upperBound, step);
rewriter.create<scf::ForOp>(loc, lowerBound, upperBound, step);
loop.getBody()->clear();
loopIvs.push_back(loop.getInductionVar());
@ -79,7 +79,7 @@ public:
if (i != e - 1)
rewriter.create<CallOp>(loc, printfRef, rewriter.getIntegerType(32),
newLineCst);
rewriter.create<loop::YieldOp>(loc);
rewriter.create<scf::YieldOp>(loc);
rewriter.setInsertionPointToStart(loop.getBody());
}

View File

@ -17,9 +17,9 @@ class OwningRewritePatternList;
struct LogicalResult;
class Value;
namespace loop {
namespace scf {
class ForOp;
} // end namespace loop
} // end namespace scf
/// Convert a perfect affine loop nest with the outermost loop identified by
/// `forOp` into a gpu::Launch operation. Map `numBlockDims` outer loops to
@ -45,7 +45,7 @@ LogicalResult convertAffineLoopNestToGPULaunch(AffineForOp forOp,
/// parallelization is performed, it is under the responsibility of the caller
/// to strip-mine the loops and to perform the dependence analysis before
/// calling the conversion.
LogicalResult convertLoopNestToGPULaunch(loop::ForOp forOp,
LogicalResult convertLoopNestToGPULaunch(scf::ForOp forOp,
unsigned numBlockDims,
unsigned numThreadDims);
@ -70,7 +70,7 @@ LogicalResult convertLoopNestToGPULaunch(loop::ForOp forOp,
/// TODO(ravishankarm) : Add checks that verify 2(b) above.
/// The above conditions are assumed to be satisfied by the computation rooted
/// at `forOp`.
LogicalResult convertLoopToGPULaunch(loop::ForOp forOp,
LogicalResult convertLoopToGPULaunch(scf::ForOp forOp,
ArrayRef<Value> numWorkGroups,
ArrayRef<Value> workGroupSizes);

View File

@ -176,7 +176,7 @@ def ConvertSimpleLoopsToGPU : FunctionPass<"convert-loops-to-gpu"> {
}
def ConvertLoopsToGPU : FunctionPass<"convert-loop-op-to-gpu"> {
let summary = "Convert top-level loop::ForOp to GPU kernels";
let summary = "Convert top-level scf::ForOp to GPU kernels";
let constructor = "mlir::createLoopToGPUPass()";
let options = [
ListOption<"numWorkGroups", "gpu-num-workgroups", "int64_t",

View File

@ -3,9 +3,9 @@ add_subdirectory(AVX512)
add_subdirectory(GPU)
add_subdirectory(Linalg)
add_subdirectory(LLVMIR)
add_subdirectory(LoopOps)
add_subdirectory(OpenMP)
add_subdirectory(Quant)
add_subdirectory(SCF)
add_subdirectory(Shape)
add_subdirectory(SPIRV)
add_subdirectory(StandardOps)

View File

@ -29,7 +29,7 @@ class Region;
#include "mlir/Dialect/GPU/ParallelLoopMapperAttr.h.inc"
namespace loop {
namespace scf {
class ParallelOp;
}
@ -54,7 +54,7 @@ ParallelLoopDimMapping getParallelLoopDimMappingAttr(Processor processor,
/// - the number of DimMapperAttr provided is same as the number of loops of
/// the `ploopOp`.
/// - the mapping does not map multiple loops to the same processor.
LogicalResult setMappingAttr(loop::ParallelOp ploopOp,
LogicalResult setMappingAttr(scf::ParallelOp ploopOp,
ArrayRef<ParallelLoopDimMapping> mapping);
} // end namespace gpu

View File

@ -26,9 +26,9 @@ class AffineForOp;
class BlockArgument;
class SubViewOp;
namespace loop {
namespace scf {
class ParallelOp;
} // namespace loop
} // namespace scf
namespace edsc {
class AffineLoopNestBuilder;
@ -85,7 +85,7 @@ private:
typename std::conditional_t<std::is_same<LoopTy, AffineForOp>::value,
AffineLoopNestBuilder, LoopNestRangeBuilder>;
using BuilderType =
typename std::conditional_t<std::is_same<LoopTy, loop::ParallelOp>::value,
typename std::conditional_t<std::is_same<LoopTy, scf::ParallelOp>::value,
ParallelLoopNestBuilder,
LoopOrAffineLoopBuilder>;

View File

@ -10,7 +10,7 @@
#define MLIR_DIALECT_LINALG_UTILS_H_
#include "mlir/Dialect/Linalg/IR/LinalgOps.h"
#include "mlir/Dialect/LoopOps/LoopOps.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "llvm/ADT/SetVector.h"

View File

@ -1,9 +0,0 @@
add_mlir_dialect(LoopOps loop)
add_mlir_doc(LoopOps -gen-dialect-doc LoopDialect Dialects/)
set(LLVM_TARGET_DEFINITIONS Passes.td)
mlir_tablegen(Passes.h.inc -gen-pass-decls)
add_public_tablegen_target(MLIRLoopPassIncGen)
add_dependencies(mlir-headers MLIRLoopPassIncGen)
add_mlir_doc(Passes -gen-pass-doc LoopPasses ./)

View File

@ -0,0 +1,9 @@
add_mlir_dialect(SCFOps loop Ops)
add_mlir_doc(SCFOps -gen-dialect-doc SCFDialect Dialects/)
set(LLVM_TARGET_DEFINITIONS Passes.td)
mlir_tablegen(Passes.h.inc -gen-pass-decls)
add_public_tablegen_target(MLIRSCFPassIncGen)
add_dependencies(mlir-headers MLIRSCFPassIncGen)
add_mlir_doc(Passes -gen-pass-doc SCFPasses ./)

View File

@ -11,10 +11,10 @@
//
//===----------------------------------------------------------------------===//
#ifndef MLIR_DIALECT_LOOPOPS_EDSC_BUILDERS_H_
#define MLIR_DIALECT_LOOPOPS_EDSC_BUILDERS_H_
#ifndef MLIR_DIALECT_SCF_EDSC_BUILDERS_H_
#define MLIR_DIALECT_SCF_EDSC_BUILDERS_H_
#include "mlir/Dialect/LoopOps/LoopOps.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/EDSC/Builders.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/Types.h"
@ -22,13 +22,13 @@
namespace mlir {
namespace edsc {
/// Constructs a new loop::ParallelOp and captures the associated induction
/// Constructs a new scf::ParallelOp and captures the associated induction
/// variables. An array of Value pointers is passed as the first
/// argument and is the *only* way to capture loop induction variables.
LoopBuilder makeParallelLoopBuilder(MutableArrayRef<Value> ivs,
ArrayRef<Value> lbs, ArrayRef<Value> ubs,
ArrayRef<Value> steps);
/// Constructs a new loop::ForOp and captures the associated induction
/// Constructs a new scf::ForOp and captures the associated induction
/// variable. A Value pointer is passed as the first argument and is the
/// *only* way to capture the loop induction variable.
LoopBuilder makeLoopBuilder(Value *iv, Value lb, Value ub, Value step,
@ -74,4 +74,4 @@ private:
} // namespace edsc
} // namespace mlir
#endif // MLIR_DIALECT_LOOPOPS_EDSC_BUILDERS_H_
#endif // MLIR_DIALECT_SCF_EDSC_BUILDERS_H_

View File

@ -1,4 +1,4 @@
//===- Intrinsics.h - MLIR EDSC Intrinsics for Linalg -----------*- C++ -*-===//
//===- Intrinsics.h - MLIR EDSC Intrinsics for SCF --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM
// Exceptions.
@ -6,19 +6,19 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef MLIR_DIALECT_LOOPOPS_EDSC_INTRINSICS_H_
#define MLIR_DIALECT_LOOPOPS_EDSC_INTRINSICS_H_
#ifndef MLIR_DIALECT_SCF_EDSC_INTRINSICS_H_
#define MLIR_DIALECT_SCF_EDSC_INTRINSICS_H_
#include "mlir/Dialect/LoopOps/EDSC/Builders.h"
#include "mlir/Dialect/SCF/EDSC/Builders.h"
namespace mlir {
namespace edsc {
namespace intrinsics {
using loop_yield = OperationBuilder<loop::YieldOp>;
using loop_yield = OperationBuilder<scf::YieldOp>;
} // namespace intrinsics
} // namespace edsc
} // namespace mlir
#endif // MLIR_DIALECT_LOOPOPS_EDSC_INTRINSICS_H_
#endif // MLIR_DIALECT_SCF_EDSC_INTRINSICS_H_

View File

@ -10,8 +10,8 @@
//
//===----------------------------------------------------------------------===//
#ifndef MLIR_DIALECT_LOOPOPS_PASSES_H_
#define MLIR_DIALECT_LOOPOPS_PASSES_H_
#ifndef MLIR_DIALECT_SCF_PASSES_H_
#define MLIR_DIALECT_SCF_PASSES_H_
#include "llvm/ADT/ArrayRef.h"
#include <memory>
@ -33,4 +33,4 @@ createParallelLoopTilingPass(llvm::ArrayRef<int64_t> tileSize = {});
} // namespace mlir
#endif // MLIR_DIALECT_LOOPOPS_PASSES_H_
#endif // MLIR_DIALECT_SCF_PASSES_H_

View File

@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
#ifndef MLIR_DIALECT_LOOP_PASSES
#define MLIR_DIALECT_LOOP_PASSES
#ifndef MLIR_DIALECT_SCF_PASSES
#define MLIR_DIALECT_SCF_PASSES
include "mlir/Pass/PassBase.td"
@ -32,4 +32,4 @@ def LoopParallelLoopTiling : FunctionPass<"parallel-loop-tiling"> {
];
}
#endif // MLIR_DIALECT_LOOP_PASSES
#endif // MLIR_DIALECT_SCF_PASSES

View File

@ -1,4 +1,4 @@
//===- Ops.h - Loop MLIR Operations -----------------------------*- C++ -*-===//
//===- SCFOps.h - Structured Control Flow -----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@ -6,12 +6,12 @@
//
//===----------------------------------------------------------------------===//
//
// This file defines convenience types for working with loop operations.
// This file defines structured control flow operations.
//
//===----------------------------------------------------------------------===//
#ifndef MLIR_LOOPOPS_OPS_H_
#define MLIR_LOOPOPS_OPS_H_
#ifndef MLIR_DIALECT_SCF_H_
#define MLIR_DIALECT_SCF_H_
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
@ -22,12 +22,12 @@
#include "mlir/Interfaces/SideEffects.h"
namespace mlir {
namespace loop {
namespace scf {
#include "mlir/Dialect/LoopOps/LoopOpsDialect.h.inc"
#include "mlir/Dialect/SCF/SCFOpsDialect.h.inc"
#define GET_OP_CLASSES
#include "mlir/Dialect/LoopOps/LoopOps.h.inc"
#include "mlir/Dialect/SCF/SCFOps.h.inc"
// Insert `loop.terminator` at the end of the only region's only block if it
// does not have a terminator already. If a new `loop.terminator` is inserted,
@ -43,6 +43,6 @@ ForOp getForInductionVarOwner(Value val);
// value is not an induction variable, then return nullptr.
ParallelOp getParallelForInductionVarOwner(Value val);
} // end namespace loop
} // end namespace scf
} // end namespace mlir
#endif // MLIR_LOOPOPS_OPS_H_
#endif // MLIR_DIALECT_SCF_H_

View File

@ -1,4 +1,4 @@
//===- Ops.td - Loop operation definitions ---------------*- tablegen -*-===//
//===- SCFOps.td - Structured Control Flow operations ------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@ -6,25 +6,25 @@
//
//===----------------------------------------------------------------------===//
//
// Defines MLIR loop operations.
// Defines MLIR structured control flow operations.
//
//===----------------------------------------------------------------------===//
#ifndef LOOP_OPS
#define LOOP_OPS
#ifndef MLIR_DIALECT_SCF_SCFOPS
#define MLIR_DIALECT_SCF_SCFOPS
include "mlir/Interfaces/ControlFlowInterfaces.td"
include "mlir/Interfaces/LoopLikeInterface.td"
include "mlir/Interfaces/SideEffects.td"
def LoopOps_Dialect : Dialect {
def SCF_Dialect : Dialect {
let name = "loop";
let cppNamespace = "";
}
// Base class for Loop dialect ops.
class Loop_Op<string mnemonic, list<OpTrait> traits = []> :
Op<LoopOps_Dialect, mnemonic, traits> {
// Base class for SCF dialect ops.
class SCF_Op<string mnemonic, list<OpTrait> traits = []> :
Op<SCF_Dialect, mnemonic, traits> {
// For every standard op, there needs to be a:
// * void print(OpAsmPrinter &p, ${C++ class of Op} op)
// * LogicalResult verify(${C++ class of Op} op)
@ -36,7 +36,7 @@ class Loop_Op<string mnemonic, list<OpTrait> traits = []> :
let parser = [{ return ::parse$cppClass(parser, result); }];
}
def ForOp : Loop_Op<"for",
def ForOp : SCF_Op<"for",
[DeclareOpInterfaceMethods<LoopLikeOpInterface>,
DeclareOpInterfaceMethods<RegionBranchOpInterface>,
SingleBlockImplicitTerminator<"YieldOp">,
@ -176,7 +176,7 @@ def ForOp : Loop_Op<"for",
}];
}
def IfOp : Loop_Op<"if",
def IfOp : SCF_Op<"if",
[DeclareOpInterfaceMethods<RegionBranchOpInterface>,
SingleBlockImplicitTerminator<"YieldOp">, RecursiveSideEffects]> {
let summary = "if-then-else operation";
@ -250,7 +250,7 @@ def IfOp : Loop_Op<"if",
}];
}
def ParallelOp : Loop_Op<"parallel",
def ParallelOp : SCF_Op<"parallel",
[AttrSizedOperandSegments,
DeclareOpInterfaceMethods<LoopLikeOpInterface>,
RecursiveSideEffects,
@ -324,7 +324,7 @@ def ParallelOp : Loop_Op<"parallel",
}];
}
def ReduceOp : Loop_Op<"reduce", [HasParent<"ParallelOp">]> {
def ReduceOp : SCF_Op<"reduce", [HasParent<"ParallelOp">]> {
let summary = "reduce operation for parallel for";
let description = [{
"loop.reduce" is an operation occurring inside "loop.parallel" operations.
@ -372,7 +372,7 @@ def ReduceOp : Loop_Op<"reduce", [HasParent<"ParallelOp">]> {
}
def ReduceReturnOp :
Loop_Op<"reduce.return", [HasParent<"ReduceOp">, NoSideEffect,
SCF_Op<"reduce.return", [HasParent<"ReduceOp">, NoSideEffect,
Terminator]> {
let summary = "terminator for reduce operation";
let description = [{
@ -389,7 +389,7 @@ def ReduceReturnOp :
let assemblyFormat = "$result attr-dict `:` type($result)";
}
def YieldOp : Loop_Op<"yield", [NoSideEffect, ReturnLike, Terminator]> {
def YieldOp : SCF_Op<"yield", [NoSideEffect, ReturnLike, Terminator]> {
let summary = "loop yield and termination operation";
let description = [{
"loop.yield" yields an SSA value from a loop dialect op region and
@ -409,4 +409,4 @@ def YieldOp : Loop_Op<"yield", [NoSideEffect, ReturnLike, Terminator]> {
[{ /* nothing to do */ }]>
];
}
#endif // LOOP_OPS
#endif // MLIR_DIALECT_SCF_SCFOPS

View File

@ -1,4 +1,4 @@
//===- Transforms.h - Pass Entrypoints --------------------------*- C++ -*-===//
//===- Transforms.h - SCF dialect transformation utilities ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@ -6,12 +6,12 @@
//
//===----------------------------------------------------------------------===//
//
// This header file defines transformations on loop operations.
// This header file defines transformations on SCF operations.
//
//===----------------------------------------------------------------------===//
#ifndef MLIR_DIALECT_LOOPOPS_TRANSFORMS_H_
#define MLIR_DIALECT_LOOPOPS_TRANSFORMS_H_
#ifndef MLIR_DIALECT_SCF_TRANSFORMS_H_
#define MLIR_DIALECT_SCF_TRANSFORMS_H_
#include "llvm/ADT/ArrayRef.h"
@ -19,7 +19,7 @@ namespace mlir {
class Region;
namespace loop {
namespace scf {
class ParallelOp;
@ -42,7 +42,7 @@ void naivelyFuseParallelOps(Region &region);
/// The old loop is replaced with the new one.
void tileParallelLoop(ParallelOp op, llvm::ArrayRef<int64_t> tileSizes);
} // namespace loop
} // namespace scf
} // namespace mlir
#endif // MLIR_DIALECT_LOOPOPS_TRANSFORMS_H_
#endif // MLIR_DIALECT_SCF_TRANSFORMS_H_

View File

@ -22,9 +22,9 @@
#include "mlir/Dialect/LLVMIR/NVVMDialect.h"
#include "mlir/Dialect/LLVMIR/ROCDLDialect.h"
#include "mlir/Dialect/Linalg/IR/LinalgOps.h"
#include "mlir/Dialect/LoopOps/LoopOps.h"
#include "mlir/Dialect/OpenMP/OpenMPDialect.h"
#include "mlir/Dialect/Quant/QuantOps.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/Dialect/SDBM/SDBMDialect.h"
#include "mlir/Dialect/SPIRV/SPIRVDialect.h"
#include "mlir/Dialect/Shape/IR/Shape.h"
@ -44,7 +44,7 @@ inline void registerAllDialects() {
registerDialect<LLVM::LLVMAVX512Dialect>();
registerDialect<LLVM::LLVMDialect>();
registerDialect<linalg::LinalgDialect>();
registerDialect<loop::LoopOpsDialect>();
registerDialect<scf::SCFDialect>();
registerDialect<omp::OpenMPDialect>();
registerDialect<quant::QuantizationDialect>();
registerDialect<spirv::SPIRVDialect>();

View File

@ -31,8 +31,8 @@
#include "mlir/Dialect/GPU/Passes.h"
#include "mlir/Dialect/LLVMIR/Transforms/LegalizeForExport.h"
#include "mlir/Dialect/Linalg/Passes.h"
#include "mlir/Dialect/LoopOps/Passes.h"
#include "mlir/Dialect/Quant/Passes.h"
#include "mlir/Dialect/SCF/Passes.h"
#include "mlir/Dialect/SPIRV/Passes.h"
#include "mlir/Dialect/StandardOps/Transforms/Passes.h"
#include "mlir/Transforms/LocationSnapshot.h"
@ -78,7 +78,7 @@ inline void registerAllPasses() {
// Loop
#define GEN_PASS_REGISTRATION
#include "mlir/Dialect/LoopOps/Passes.h.inc"
#include "mlir/Dialect/SCF/Passes.h.inc"
// Quant
#define GEN_PASS_REGISTRATION

View File

@ -27,10 +27,10 @@ class Value;
class ValueRange;
struct MemRefRegion;
namespace loop {
namespace scf {
class ForOp;
class ParallelOp;
} // end namespace loop
} // end namespace scf
/// Unrolls this for operation completely if the trip count is known to be
/// constant. Returns failure otherwise.
@ -40,7 +40,7 @@ LogicalResult loopUnrollFull(AffineForOp forOp);
/// if the loop cannot be unrolled either due to restrictions or due to invalid
/// unroll factors. Requires positive loop bounds and step.
LogicalResult loopUnrollByFactor(AffineForOp forOp, uint64_t unrollFactor);
LogicalResult loopUnrollByFactor(loop::ForOp forOp, uint64_t unrollFactor);
LogicalResult loopUnrollByFactor(scf::ForOp forOp, uint64_t unrollFactor);
/// Unrolls this loop by the specified unroll factor or its trip count,
/// whichever is lower.
@ -56,8 +56,8 @@ bool LLVM_ATTRIBUTE_UNUSED isPerfectlyNested(ArrayRef<AffineForOp> loops);
/// AffineForOp, and the second op is a terminator).
void getPerfectlyNestedLoops(SmallVectorImpl<AffineForOp> &nestedLoops,
AffineForOp root);
void getPerfectlyNestedLoops(SmallVectorImpl<loop::ForOp> &nestedLoops,
loop::ForOp root);
void getPerfectlyNestedLoops(SmallVectorImpl<scf::ForOp> &nestedLoops,
scf::ForOp root);
/// Unrolls and jams this loop by the specified factor. Returns success if the
/// loop is successfully unroll-jammed.
@ -69,10 +69,10 @@ LogicalResult loopUnrollJamByFactor(AffineForOp forOp,
LogicalResult loopUnrollJamUpToFactor(AffineForOp forOp,
uint64_t unrollJamFactor);
/// Promotes the loop body of a AffineForOp/loop::ForOp to its containing block
/// Promotes the loop body of a AffineForOp/scf::ForOp to its containing block
/// if the loop was known to have a single iteration.
LogicalResult promoteIfSingleIteration(AffineForOp forOp);
LogicalResult promoteIfSingleIteration(loop::ForOp forOp);
LogicalResult promoteIfSingleIteration(scf::ForOp forOp);
/// Promotes all single iteration AffineForOp's in the Function, i.e., moves
/// their body into the containing Block.
@ -128,13 +128,13 @@ AffineForOp sinkSequentialLoops(AffineForOp forOp);
/// occurrence in `forOps`, under each of the `targets`.
/// Returns the new AffineForOps, one per each of (`forOps`, `targets`) pair,
/// nested immediately under each of `targets`.
using Loops = SmallVector<loop::ForOp, 8>;
using Loops = SmallVector<scf::ForOp, 8>;
using TileLoops = std::pair<Loops, Loops>;
SmallVector<SmallVector<AffineForOp, 8>, 8> tile(ArrayRef<AffineForOp> forOps,
ArrayRef<uint64_t> sizes,
ArrayRef<AffineForOp> targets);
SmallVector<Loops, 8> tile(ArrayRef<loop::ForOp> forOps, ArrayRef<Value> sizes,
ArrayRef<loop::ForOp> targets);
SmallVector<Loops, 8> tile(ArrayRef<scf::ForOp> forOps, ArrayRef<Value> sizes,
ArrayRef<scf::ForOp> targets);
/// Performs tiling (with interchange) by strip-mining the `forOps` by `sizes`
/// and sinking them, in their order of occurrence in `forOps`, under `target`.
@ -142,15 +142,15 @@ SmallVector<Loops, 8> tile(ArrayRef<loop::ForOp> forOps, ArrayRef<Value> sizes,
/// `target`.
SmallVector<AffineForOp, 8> tile(ArrayRef<AffineForOp> forOps,
ArrayRef<uint64_t> sizes, AffineForOp target);
Loops tile(ArrayRef<loop::ForOp> forOps, ArrayRef<Value> sizes,
loop::ForOp target);
Loops tile(ArrayRef<scf::ForOp> forOps, ArrayRef<Value> sizes,
scf::ForOp target);
/// Tile a nest of loop::ForOp loops rooted at `rootForOp` with the given
/// Tile a nest of scf::ForOp loops rooted at `rootForOp` with the given
/// (parametric) sizes. Sizes are expected to be strictly positive values at
/// runtime. If more sizes than loops are provided, discard the trailing values
/// in sizes. Assumes the loop nest is permutable.
/// Returns the newly created intra-tile loops.
Loops tilePerfectlyNested(loop::ForOp rootForOp, ArrayRef<Value> sizes);
Loops tilePerfectlyNested(scf::ForOp rootForOp, ArrayRef<Value> sizes);
/// Explicit copy / DMA generation options for mlir::affineDataCopyGenerate.
struct AffineCopyOptions {
@ -220,18 +220,17 @@ LogicalResult generateCopyForMemRegion(const MemRefRegion &memrefRegion,
/// Tile a nest of standard for loops rooted at `rootForOp` by finding such
/// parametric tile sizes that the outer loops have a fixed number of iterations
/// as defined in `sizes`.
TileLoops extractFixedOuterLoops(loop::ForOp rootFOrOp,
ArrayRef<int64_t> sizes);
TileLoops extractFixedOuterLoops(scf::ForOp rootFOrOp, ArrayRef<int64_t> sizes);
/// Replace a perfect nest of "for" loops with a single linearized loop. Assumes
/// `loops` contains a list of perfectly nested loops with bounds and steps
/// independent of any loop induction variable involved in the nest.
void coalesceLoops(MutableArrayRef<loop::ForOp> loops);
void coalesceLoops(MutableArrayRef<scf::ForOp> loops);
/// Take the ParallelLoop and for each set of dimension indices, combine them
/// into a single dimension. combinedDimensions must contain each index into
/// loops exactly once.
void collapseParallelLoops(loop::ParallelOp loops,
void collapseParallelLoops(scf::ParallelOp loops,
ArrayRef<std::vector<unsigned>> combinedDimensions);
/// Maps `forOp` for execution on a parallel grid of virtual `processorIds` of
@ -265,7 +264,7 @@ void collapseParallelLoops(loop::ParallelOp loops,
/// ...
/// }
/// ```
void mapLoopToProcessorIds(loop::ForOp forOp, ArrayRef<Value> processorId,
void mapLoopToProcessorIds(scf::ForOp forOp, ArrayRef<Value> processorId,
ArrayRef<Value> numProcessors);
/// Gathers all AffineForOps in 'func' grouped by loop depth.

View File

@ -22,7 +22,7 @@ add_mlir_library(MLIRAnalysis
MLIRCallInterfaces
MLIRControlFlowInterfaces
MLIRInferTypeOpInterface
MLIRLoopOps
MLIRSCF
)
add_mlir_library(MLIRLoopAnalysis
@ -40,5 +40,5 @@ add_mlir_library(MLIRLoopAnalysis
MLIRCallInterfaces
MLIRControlFlowInterfaces
MLIRInferTypeOpInterface
MLIRLoopOps
MLIRSCF
)

View File

@ -12,7 +12,7 @@
#include "mlir/Analysis/SliceAnalysis.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/LoopOps/LoopOps.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/IR/Function.h"
#include "mlir/IR/Operation.h"
#include "mlir/Support/LLVM.h"
@ -44,7 +44,7 @@ static void getForwardSliceImpl(Operation *op,
for (auto *ownerInst : forOp.getInductionVar().getUsers())
if (forwardSlice->count(ownerInst) == 0)
getForwardSliceImpl(ownerInst, forwardSlice, filter);
} else if (auto forOp = dyn_cast<loop::ForOp>(op)) {
} else if (auto forOp = dyn_cast<scf::ForOp>(op)) {
for (auto *ownerInst : forOp.getInductionVar().getUsers())
if (forwardSlice->count(ownerInst) == 0)
getForwardSliceImpl(ownerInst, forwardSlice, filter);
@ -82,7 +82,7 @@ static void getBackwardSliceImpl(Operation *op,
return;
assert((op->getNumRegions() == 0 || isa<AffineForOp>(op) ||
isa<loop::ForOp>(op)) &&
isa<scf::ForOp>(op)) &&
"unexpected generic op with regions");
// Evaluate whether we should keep this def.
@ -99,7 +99,7 @@ static void getBackwardSliceImpl(Operation *op,
auto *affOp = affIv.getOperation();
if (backwardSlice->count(affOp) == 0)
getBackwardSliceImpl(affOp, backwardSlice, filter);
} else if (auto loopIv = loop::getForInductionVarOwner(operand)) {
} else if (auto loopIv = scf::getForInductionVarOwner(operand)) {
auto *loopOp = loopIv.getOperation();
if (backwardSlice->count(loopOp) == 0)
getBackwardSliceImpl(loopOp, backwardSlice, filter);

View File

@ -15,7 +15,7 @@
#include "../PassDetail.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/LoopOps/LoopOps.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/IR/AffineExprVisitor.h"
#include "mlir/IR/BlockAndValueMapping.h"
@ -332,7 +332,7 @@ public:
LogicalResult matchAndRewrite(AffineTerminatorOp op,
PatternRewriter &rewriter) const override {
rewriter.replaceOpWithNewOp<loop::YieldOp>(op);
rewriter.replaceOpWithNewOp<scf::YieldOp>(op);
return success();
}
};
@ -347,7 +347,7 @@ public:
Value lowerBound = lowerAffineLowerBound(op, rewriter);
Value upperBound = lowerAffineUpperBound(op, rewriter);
Value step = rewriter.create<ConstantIndexOp>(loc, op.getStep());
auto f = rewriter.create<loop::ForOp>(loc, lowerBound, upperBound, step);
auto f = rewriter.create<scf::ForOp>(loc, lowerBound, upperBound, step);
f.region().getBlocks().clear();
rewriter.inlineRegionBefore(op.region(), f.region(), f.region().end());
rewriter.eraseOp(op);
@ -392,7 +392,7 @@ public:
: rewriter.create<ConstantIntOp>(loc, /*value=*/1, /*width=*/1);
bool hasElseRegion = !op.elseRegion().empty();
auto ifOp = rewriter.create<loop::IfOp>(loc, cond, hasElseRegion);
auto ifOp = rewriter.create<scf::IfOp>(loc, cond, hasElseRegion);
rewriter.inlineRegionBefore(op.thenRegion(), &ifOp.thenRegion().back());
ifOp.thenRegion().back().erase();
if (hasElseRegion) {
@ -582,7 +582,7 @@ class LowerAffinePass : public ConvertAffineToStandardBase<LowerAffinePass> {
OwningRewritePatternList patterns;
populateAffineToStdConversionPatterns(patterns, &getContext());
ConversionTarget target(getContext());
target.addLegalDialect<loop::LoopOpsDialect, StandardOpsDialect>();
target.addLegalDialect<scf::SCFDialect, StandardOpsDialect>();
if (failed(applyPartialConversion(getFunction(), target, patterns)))
signalPassFailure();
}

View File

@ -12,7 +12,7 @@ add_mlir_conversion_library(MLIRAffineToStandard
LINK_LIBS PUBLIC
MLIRAffineOps
MLIRLoopOps
MLIRSCF
MLIRPass
MLIRStandardOps
MLIRTransforms

View File

@ -11,7 +11,7 @@
//===----------------------------------------------------------------------===//
#include "mlir/Conversion/GPUToSPIRV/ConvertGPUToSPIRV.h"
#include "mlir/Dialect/GPU/GPUDialect.h"
#include "mlir/Dialect/LoopOps/LoopOps.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/Dialect/SPIRV/SPIRVDialect.h"
#include "mlir/Dialect/SPIRV/SPIRVLowering.h"
#include "mlir/Dialect/SPIRV/SPIRVOps.h"
@ -21,34 +21,34 @@ using namespace mlir;
namespace {
/// Pattern to convert a loop::ForOp within kernel functions into spirv::LoopOp.
class ForOpConversion final : public SPIRVOpLowering<loop::ForOp> {
/// Pattern to convert a scf::ForOp within kernel functions into spirv::LoopOp.
class ForOpConversion final : public SPIRVOpLowering<scf::ForOp> {
public:
using SPIRVOpLowering<loop::ForOp>::SPIRVOpLowering;
using SPIRVOpLowering<scf::ForOp>::SPIRVOpLowering;
LogicalResult
matchAndRewrite(loop::ForOp forOp, ArrayRef<Value> operands,
matchAndRewrite(scf::ForOp forOp, ArrayRef<Value> operands,
ConversionPatternRewriter &rewriter) const override;
};
/// Pattern to convert a loop::IfOp within kernel functions into
/// Pattern to convert a scf::IfOp within kernel functions into
/// spirv::SelectionOp.
class IfOpConversion final : public SPIRVOpLowering<loop::IfOp> {
class IfOpConversion final : public SPIRVOpLowering<scf::IfOp> {
public:
using SPIRVOpLowering<loop::IfOp>::SPIRVOpLowering;
using SPIRVOpLowering<scf::IfOp>::SPIRVOpLowering;
LogicalResult
matchAndRewrite(loop::IfOp IfOp, ArrayRef<Value> operands,
matchAndRewrite(scf::IfOp IfOp, ArrayRef<Value> operands,
ConversionPatternRewriter &rewriter) const override;
};
/// Pattern to erase a loop::YieldOp.
class TerminatorOpConversion final : public SPIRVOpLowering<loop::YieldOp> {
/// Pattern to erase a scf::YieldOp.
class TerminatorOpConversion final : public SPIRVOpLowering<scf::YieldOp> {
public:
using SPIRVOpLowering<loop::YieldOp>::SPIRVOpLowering;
using SPIRVOpLowering<scf::YieldOp>::SPIRVOpLowering;
LogicalResult
matchAndRewrite(loop::YieldOp terminatorOp, ArrayRef<Value> operands,
matchAndRewrite(scf::YieldOp terminatorOp, ArrayRef<Value> operands,
ConversionPatternRewriter &rewriter) const override {
rewriter.eraseOp(terminatorOp);
return success();
@ -117,18 +117,18 @@ public:
} // namespace
//===----------------------------------------------------------------------===//
// loop::ForOp.
// scf::ForOp.
//===----------------------------------------------------------------------===//
LogicalResult
ForOpConversion::matchAndRewrite(loop::ForOp forOp, ArrayRef<Value> operands,
ForOpConversion::matchAndRewrite(scf::ForOp forOp, ArrayRef<Value> operands,
ConversionPatternRewriter &rewriter) const {
// loop::ForOp can be lowered to the structured control flow represented by
// scf::ForOp can be lowered to the structured control flow represented by
// spirv::LoopOp by making the continue block of the spirv::LoopOp the loop
// latch and the merge block the exit block. The resulting spirv::LoopOp has a
// single back edge from the continue to header block, and a single exit from
// header to merge.
loop::ForOpOperandAdaptor forOperands(operands);
scf::ForOpOperandAdaptor forOperands(operands);
auto loc = forOp.getLoc();
auto loopControl = rewriter.getI32IntegerAttr(
static_cast<uint32_t>(spirv::LoopControl::None));
@ -190,16 +190,16 @@ ForOpConversion::matchAndRewrite(loop::ForOp forOp, ArrayRef<Value> operands,
}
//===----------------------------------------------------------------------===//
// loop::IfOp.
// scf::IfOp.
//===----------------------------------------------------------------------===//
LogicalResult
IfOpConversion::matchAndRewrite(loop::IfOp ifOp, ArrayRef<Value> operands,
IfOpConversion::matchAndRewrite(scf::IfOp ifOp, ArrayRef<Value> operands,
ConversionPatternRewriter &rewriter) const {
// When lowering `loop::IfOp` we explicitly create a selection header block
// When lowering `scf::IfOp` we explicitly create a selection header block
// before the control flow diverges and a merge block where control flow
// subsequently converges.
loop::IfOpOperandAdaptor ifOperands(operands);
scf::IfOpOperandAdaptor ifOperands(operands);
auto loc = ifOp.getLoc();
// Create `spv.selection` operation, selection header block and merge block.

View File

@ -16,7 +16,7 @@
#include "mlir/Conversion/GPUToSPIRV/ConvertGPUToSPIRV.h"
#include "mlir/Conversion/StandardToSPIRV/ConvertStandardToSPIRV.h"
#include "mlir/Dialect/GPU/GPUDialect.h"
#include "mlir/Dialect/LoopOps/LoopOps.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/Dialect/SPIRV/SPIRVDialect.h"
#include "mlir/Dialect/SPIRV/SPIRVLowering.h"
#include "mlir/Dialect/SPIRV/SPIRVOps.h"

View File

@ -11,6 +11,6 @@ add_mlir_conversion_library(MLIRLoopToStandard
Core
LINK_LIBS PUBLIC
MLIRLoopOps
MLIRSCF
MLIRTransforms
)

View File

@ -13,7 +13,7 @@
#include "../PassDetail.h"
#include "mlir/Conversion/LoopToStandard/ConvertLoopToStandard.h"
#include "mlir/Dialect/LoopOps/LoopOps.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/IR/BlockAndValueMapping.h"
#include "mlir/IR/Builders.h"
@ -25,7 +25,7 @@
#include "mlir/Transforms/Utils.h"
using namespace mlir;
using namespace mlir::loop;
using namespace mlir::scf;
namespace {
@ -195,10 +195,10 @@ struct IfLowering : public OpRewritePattern<IfOp> {
PatternRewriter &rewriter) const override;
};
struct ParallelLowering : public OpRewritePattern<mlir::loop::ParallelOp> {
using OpRewritePattern<mlir::loop::ParallelOp>::OpRewritePattern;
struct ParallelLowering : public OpRewritePattern<mlir::scf::ParallelOp> {
using OpRewritePattern<mlir::scf::ParallelOp>::OpRewritePattern;
LogicalResult matchAndRewrite(mlir::loop::ParallelOp parallelOp,
LogicalResult matchAndRewrite(mlir::scf::ParallelOp parallelOp,
PatternRewriter &rewriter) const override;
};
} // namespace

View File

@ -18,7 +18,7 @@
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/GPU/GPUDialect.h"
#include "mlir/Dialect/GPU/ParallelLoopMapper.h"
#include "mlir/Dialect/LoopOps/LoopOps.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/BlockAndValueMapping.h"
@ -34,7 +34,7 @@
#define DEBUG_TYPE "loops-to-gpu"
using namespace mlir;
using namespace mlir::loop;
using namespace mlir::scf;
using llvm::seq;
@ -486,7 +486,7 @@ LogicalResult mlir::convertLoopNestToGPULaunch(ForOp forOp,
return ::convertLoopNestToGPULaunch(forOp, numBlockDims, numThreadDims);
}
LogicalResult mlir::convertLoopToGPULaunch(loop::ForOp forOp,
LogicalResult mlir::convertLoopToGPULaunch(scf::ForOp forOp,
ArrayRef<Value> numWorkGroups,
ArrayRef<Value> workGroupSizes) {
return ::convertLoopToGPULaunch(forOp, numWorkGroups, workGroupSizes);
@ -704,7 +704,7 @@ static LogicalResult processParallelLoop(
CmpIOp pred = rewriter.create<CmpIOp>(
loc, CmpIPredicate::slt, newIndex,
cloningMap.lookupOrDefault(originalBound));
loop::IfOp ifOp = rewriter.create<loop::IfOp>(loc, pred, false);
scf::IfOp ifOp = rewriter.create<scf::IfOp>(loc, pred, false);
rewriter.setInsertionPointToStart(&ifOp.thenRegion().front());
// Put a sentinel into the worklist so we know when to pop out of the
// if body again. We use the launchOp here, as that cannot be part of
@ -714,7 +714,7 @@ static LogicalResult processParallelLoop(
}
} else {
// Create a sequential for loop.
auto loopOp = rewriter.create<loop::ForOp>(
auto loopOp = rewriter.create<scf::ForOp>(
loc, cloningMap.lookupOrDefault(lowerBound),
cloningMap.lookupOrDefault(upperBound),
cloningMap.lookupOrDefault(step));

View File

@ -11,7 +11,7 @@
#include "mlir/Conversion/LoopsToGPU/LoopsToGPU.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/GPU/GPUDialect.h"
#include "mlir/Dialect/LoopOps/LoopOps.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/Transforms/DialectConversion.h"
@ -22,7 +22,7 @@
#define LOOPOP_TO_GPU_PASS_NAME "convert-loop-op-to-gpu"
using namespace mlir;
using namespace mlir::loop;
using namespace mlir::scf;
namespace {
// A pass that traverses top-level loops in the function and converts them to
@ -98,8 +98,8 @@ struct ParallelLoopToGpuPass
target.addLegalDialect<StandardOpsDialect>();
target.addLegalDialect<AffineDialect>();
target.addLegalDialect<gpu::GPUDialect>();
target.addLegalDialect<loop::LoopOpsDialect>();
target.addIllegalOp<loop::ParallelOp>();
target.addLegalDialect<scf::SCFDialect>();
target.addIllegalOp<scf::ParallelOp>();
if (failed(applyPartialConversion(getOperation(), target, patterns)))
signalPassFailure();
}

View File

@ -14,8 +14,8 @@
#include "mlir/Conversion/VectorToLoops/ConvertVectorToLoops.h"
#include "mlir/Dialect/Affine/EDSC/Intrinsics.h"
#include "mlir/Dialect/LoopOps/EDSC/Builders.h"
#include "mlir/Dialect/LoopOps/EDSC/Intrinsics.h"
#include "mlir/Dialect/SCF/EDSC/Builders.h"
#include "mlir/Dialect/SCF/EDSC/Intrinsics.h"
#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
#include "mlir/Dialect/Vector/EDSC/Intrinsics.h"
#include "mlir/Dialect/Vector/VectorOps.h"
@ -168,7 +168,7 @@ void NDTransferOpHelper<ConcreteOp>::emitInBounds(
inBounds = inBounds && inBounds2;
}
auto ifOp = ScopedContext::getBuilderRef().create<loop::IfOp>(
auto ifOp = ScopedContext::getBuilderRef().create<scf::IfOp>(
ScopedContext::getLocation(), TypeRange{}, inBounds,
/*withElseRegion=*/std::is_same<ConcreteOp, TransferReadOp>());
BlockBuilder(&ifOp.thenRegion().front(),

View File

@ -3,9 +3,9 @@ add_subdirectory(AVX512)
add_subdirectory(GPU)
add_subdirectory(Linalg)
add_subdirectory(LLVMIR)
add_subdirectory(LoopOps)
add_subdirectory(OpenMP)
add_subdirectory(Quant)
add_subdirectory(SCF)
add_subdirectory(SDBM)
add_subdirectory(Shape)
add_subdirectory(SPIRV)

View File

@ -18,7 +18,7 @@ add_mlir_dialect_library(MLIRGPU
MLIREDSC
MLIRIR
MLIRLLVMIR
MLIRLoopOps
MLIRSCF
MLIRPass
MLIRSideEffects
MLIRStandardOps

View File

@ -13,7 +13,7 @@
#include "mlir/Dialect/GPU/MemoryPromotion.h"
#include "mlir/Dialect/GPU/GPUDialect.h"
#include "mlir/Dialect/LoopOps/EDSC/Builders.h"
#include "mlir/Dialect/SCF/EDSC/Builders.h"
#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/LoopUtils.h"
@ -90,7 +90,7 @@ static void insertCopyLoops(OpBuilder &builder, Location loc,
llvm::enumerate(llvm::reverse(llvm::makeArrayRef(ivs).take_back(
GPUDialect::getNumWorkgroupDimensions())))) {
Value v = en.value();
auto loop = cast<loop::ForOp>(v.getParentRegion()->getParentOp());
auto loop = cast<scf::ForOp>(v.getParentRegion()->getParentOp());
mapLoopToProcessorIds(loop, {threadIds[en.index()]},
{blockDims[en.index()]});
}

View File

@ -15,13 +15,13 @@
#include "mlir/Dialect/GPU/GPUDialect.h"
#include "mlir/Dialect/GPU/Passes.h"
#include "mlir/Dialect/LoopOps/LoopOps.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/Pass/Pass.h"
using namespace mlir;
using namespace mlir::gpu;
using namespace mlir::loop;
using namespace mlir::scf;
#include "mlir/Dialect/GPU/ParallelLoopMapperEnums.cpp.inc"
namespace mlir {
@ -41,7 +41,7 @@ ParallelLoopDimMapping getParallelLoopDimMappingAttr(Processor processor,
AffineMapAttr::get(map), AffineMapAttr::get(bound), context);
}
LogicalResult setMappingAttr(loop::ParallelOp ploopOp,
LogicalResult setMappingAttr(scf::ParallelOp ploopOp,
ArrayRef<ParallelLoopDimMapping> mapping) {
// Verify that each processor is mapped to only once.
llvm::DenseSet<gpu::Processor> specifiedMappings;

View File

@ -10,7 +10,7 @@
#include "mlir/Dialect/Affine/EDSC/Intrinsics.h"
#include "mlir/Dialect/Linalg/EDSC/Builders.h"
#include "mlir/Dialect/Linalg/EDSC/Intrinsics.h"
#include "mlir/Dialect/LoopOps/EDSC/Builders.h"
#include "mlir/Dialect/SCF/EDSC/Builders.h"
#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
#include "mlir/Dialect/Utils/StructuredOpsUtils.h"
#include "mlir/IR/AffineExpr.h"
@ -19,7 +19,7 @@ using namespace mlir;
using namespace mlir::edsc;
using namespace mlir::edsc::intrinsics;
using namespace mlir::linalg;
using namespace mlir::loop;
using namespace mlir::scf;
mlir::edsc::LoopRangeBuilder::LoopRangeBuilder(Value *iv, Value range) {
assert(range.getType() && "expected !linalg.range type");
@ -79,7 +79,7 @@ namespace mlir {
namespace edsc {
template <>
GenericLoopNestRangeBuilder<loop::ForOp>::GenericLoopNestRangeBuilder(
GenericLoopNestRangeBuilder<scf::ForOp>::GenericLoopNestRangeBuilder(
MutableArrayRef<Value> ivs, ArrayRef<Value> ranges) {
builder = std::make_unique<LoopNestRangeBuilder>(ivs, ranges);
}
@ -102,7 +102,7 @@ GenericLoopNestRangeBuilder<AffineForOp>::GenericLoopNestRangeBuilder(
}
template <>
GenericLoopNestRangeBuilder<loop::ParallelOp>::GenericLoopNestRangeBuilder(
GenericLoopNestRangeBuilder<scf::ParallelOp>::GenericLoopNestRangeBuilder(
MutableArrayRef<Value> ivs, ArrayRef<Value> ranges) {
SmallVector<Value, 4> lbs, ubs, steps;
for (Value range : ranges) {

View File

@ -10,6 +10,6 @@ add_mlir_dialect_library(MLIRLinalgEDSC
MLIRAffineOps
MLIRAffineEDSC
MLIRLinalgOps
MLIRLoopOps
MLIRSCF
MLIRStandardOps
)

View File

@ -23,7 +23,7 @@ add_mlir_dialect_library(MLIRLinalgTransforms
MLIRLinalgEDSC
MLIRLinalgOps
MLIRLinalgUtils
MLIRLoopOps
MLIRSCF
MLIRPass
MLIRStandardOps
MLIRStandardToLLVM

View File

@ -14,7 +14,7 @@
#include "mlir/Dialect/Linalg/Passes.h"
#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
#include "mlir/Dialect/Linalg/Utils/Utils.h"
#include "mlir/Dialect/LoopOps/EDSC/Builders.h"
#include "mlir/Dialect/SCF/EDSC/Builders.h"
#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
@ -513,7 +513,7 @@ public:
/// outer parallel loops. All other loops are generated using loop.for
/// operation.
template <typename ConcreteOpTy>
class GenerateLoopNest<loop::ParallelOp, ConcreteOpTy> {
class GenerateLoopNest<scf::ParallelOp, ConcreteOpTy> {
public:
using IndexedValueTy = StdIndexedValue;
@ -534,9 +534,9 @@ public:
// If there are no outer parallel loops, then number of loop ops is same as
// the number of loops, and they are all loop.for ops.
if (nOuterPar) {
GenericLoopNestRangeBuilder<loop::ParallelOp>(
GenericLoopNestRangeBuilder<scf::ParallelOp>(
allIvs.take_front(nOuterPar), loopRanges.take_front(nOuterPar))([&] {
GenericLoopNestRangeBuilder<loop::ForOp>(
GenericLoopNestRangeBuilder<scf::ForOp>(
allIvs.drop_front(nOuterPar),
loopRanges.drop_front(nOuterPar))([&] {
SmallVector<Value, 4> allIvValues(allIvs.begin(), allIvs.end());
@ -547,7 +547,7 @@ public:
} else {
// If there are no parallel loops then fallback to generating all loop.for
// operations.
GenericLoopNestRangeBuilder<loop::ForOp>(allIvs, loopRanges)([&] {
GenericLoopNestRangeBuilder<scf::ForOp>(allIvs, loopRanges)([&] {
SmallVector<Value, 4> allIvValues(allIvs.begin(), allIvs.end());
LinalgScopedEmitter<StdIndexedValue,
ConcreteOpTy>::emitScalarImplementation(allIvValues,
@ -715,13 +715,13 @@ struct LowerToAffineLoops
};
struct LowerToLoops : public LinalgLowerToLoopsBase<LowerToLoops> {
void runOnFunction() override {
lowerLinalgToLoopsImpl<loop::ForOp>(getFunction(), &getContext());
lowerLinalgToLoopsImpl<scf::ForOp>(getFunction(), &getContext());
}
};
struct LowerToParallelLoops
: public LinalgLowerToParallelLoopsBase<LowerToParallelLoops> {
void runOnFunction() override {
lowerLinalgToLoopsImpl<loop::ParallelOp>(getFunction(), &getContext());
lowerLinalgToLoopsImpl<scf::ParallelOp>(getFunction(), &getContext());
}
};
} // namespace
@ -751,7 +751,7 @@ Optional<LinalgLoops> mlir::linalg::linalgLowerOpToLoops(OpBuilder &builder,
template <typename ConcreteOp>
LogicalResult mlir::linalg::linalgOpToLoops(OpBuilder &builder, Operation *op) {
Optional<LinalgLoops> loops =
linalgLowerOpToLoops<loop::ForOp, ConcreteOp>(builder, op);
linalgLowerOpToLoops<scf::ForOp, ConcreteOp>(builder, op);
return loops ? success() : failure();
}
@ -769,7 +769,7 @@ template <typename ConcreteOp>
LogicalResult mlir::linalg::linalgOpToParallelLoops(OpBuilder &builder,
Operation *op) {
Optional<LinalgLoops> loops =
linalgLowerOpToLoops<loop::ParallelOp, ConcreteOp>(builder, op);
linalgLowerOpToLoops<scf::ParallelOp, ConcreteOp>(builder, op);
return loops ? success() : failure();
}
@ -783,7 +783,7 @@ LogicalResult mlir::linalg::linalgOpToParallelLoops(OpBuilder &builder,
template LogicalResult mlir::linalg::linalgOpToParallelLoops<OP_TYPE>( \
OpBuilder & builder, Operation * op); \
template Optional<LinalgLoops> \
mlir::linalg::linalgLowerOpToLoops<loop::ParallelOp, OP_TYPE>( \
mlir::linalg::linalgLowerOpToLoops<scf::ParallelOp, OP_TYPE>( \
OpBuilder & builder, Operation * op);
INSTANTIATE_LINALG_OP_TO_LOOPS(CopyOp)

View File

@ -18,7 +18,7 @@
#include "mlir/Dialect/Linalg/Passes.h"
#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
#include "mlir/Dialect/Linalg/Utils/Utils.h"
#include "mlir/Dialect/LoopOps/LoopOps.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineExprVisitor.h"
@ -33,7 +33,7 @@ using namespace mlir;
using namespace mlir::edsc;
using namespace mlir::edsc::intrinsics;
using namespace mlir::linalg;
using namespace mlir::loop;
using namespace mlir::scf;
using llvm::SetVector;

View File

@ -17,7 +17,7 @@
#include "mlir/Dialect/Linalg/Passes.h"
#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
#include "mlir/Dialect/Linalg/Utils/Utils.h"
#include "mlir/Dialect/LoopOps/EDSC/Builders.h"
#include "mlir/Dialect/SCF/EDSC/Builders.h"
#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineExprVisitor.h"
@ -31,7 +31,7 @@ using namespace mlir;
using namespace mlir::edsc;
using namespace mlir::edsc::intrinsics;
using namespace mlir::linalg;
using namespace mlir::loop;
using namespace mlir::scf;
using folded_affine_min = FoldedValueBuilder<AffineMinOp>;
@ -468,29 +468,29 @@ Optional<TiledLinalgOp>
mlir::linalg::tileLinalgOp(OpBuilder &b, LinalgOp op, ArrayRef<Value> tileSizes,
ArrayRef<unsigned> interchangeVector,
OperationFolder *folder) {
return tileLinalgOpImpl<loop::ForOp>(b, op, tileSizes, interchangeVector,
folder);
return tileLinalgOpImpl<scf::ForOp>(b, op, tileSizes, interchangeVector,
folder);
}
Optional<TiledLinalgOp> mlir::linalg::tileLinalgOpToParallelLoops(
OpBuilder &b, LinalgOp op, ArrayRef<Value> tileSizes,
ArrayRef<unsigned> interchangeVector, OperationFolder *folder) {
return tileLinalgOpImpl<loop::ParallelOp>(b, op, tileSizes, interchangeVector,
folder);
return tileLinalgOpImpl<scf::ParallelOp>(b, op, tileSizes, interchangeVector,
folder);
}
Optional<TiledLinalgOp> mlir::linalg::tileLinalgOp(
OpBuilder &b, LinalgOp op, ArrayRef<int64_t> tileSizes,
ArrayRef<unsigned> interchangeVector, OperationFolder *folder) {
return tileLinalgOpImpl<loop::ForOp>(b, op, tileSizes, interchangeVector,
folder);
return tileLinalgOpImpl<scf::ForOp>(b, op, tileSizes, interchangeVector,
folder);
}
Optional<TiledLinalgOp> mlir::linalg::tileLinalgOpToParallelLoops(
OpBuilder &b, LinalgOp op, ArrayRef<int64_t> tileSizes,
ArrayRef<unsigned> interchangeVector, OperationFolder *folder) {
return tileLinalgOpImpl<loop::ParallelOp>(b, op, tileSizes, interchangeVector,
folder);
return tileLinalgOpImpl<scf::ParallelOp>(b, op, tileSizes, interchangeVector,
folder);
}
template <typename LoopTy>
@ -518,7 +518,7 @@ struct LinalgTilingPass : public LinalgTilingBase<LinalgTilingPass> {
LinalgTilingPass(ArrayRef<int64_t> sizes) { tileSizes = sizes; }
void runOnFunction() override {
tileLinalgOps<loop::ForOp>(getFunction(), tileSizes);
tileLinalgOps<scf::ForOp>(getFunction(), tileSizes);
}
};
@ -530,7 +530,7 @@ struct LinalgTilingToParallelLoopsPass
}
void runOnFunction() override {
tileLinalgOps<loop::ParallelOp>(getFunction(), tileSizes);
tileLinalgOps<scf::ParallelOp>(getFunction(), tileSizes);
}
};

View File

@ -9,7 +9,7 @@ add_mlir_dialect_library(MLIRLinalgUtils
MLIREDSC
MLIRIR
MLIRLinalgOps
MLIRLoopOps
MLIRSCF
MLIRPass
MLIRStandardOps
MLIRTransformUtils

View File

@ -14,7 +14,7 @@
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Linalg/IR/LinalgOps.h"
#include "mlir/Dialect/Linalg/IR/LinalgTypes.h"
#include "mlir/Dialect/LoopOps/LoopOps.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
@ -25,7 +25,7 @@
using namespace mlir;
using namespace mlir::linalg;
using namespace mlir::loop;
using namespace mlir::scf;
Optional<RegionMatcher::BinaryOpKind>
RegionMatcher::matchAsScalarBinaryOp(GenericOp op) {

View File

@ -1,5 +1,5 @@
file(GLOB globbed *.c *.cpp)
add_mlir_dialect_library(MLIRLoopOps
add_mlir_dialect_library(MLIRSCF
${globbed}
EDSC/Builders.cpp
@ -7,7 +7,7 @@ add_mlir_dialect_library(MLIRLoopOps
${MLIR_MAIN_INCLUDE_DIR}/mlir/LoopOps
DEPENDS
MLIRLoopOpsIncGen
MLIRSCFOpsIncGen
LINK_LIBS PUBLIC
MLIREDSC

View File

@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
#include "mlir/Dialect/LoopOps/EDSC/Builders.h"
#include "mlir/Dialect/SCF/EDSC/Builders.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
@ -82,7 +82,7 @@ LoopBuilder mlir::edsc::makeParallelLoopBuilder(MutableArrayRef<Value> ivs,
ArrayRef<Value> lbs,
ArrayRef<Value> ubs,
ArrayRef<Value> steps) {
loop::ParallelOp parallelOp = OperationBuilder<loop::ParallelOp>(
scf::ParallelOp parallelOp = OperationBuilder<scf::ParallelOp>(
SmallVector<Value, 4>(lbs.begin(), lbs.end()),
SmallVector<Value, 4>(ubs.begin(), ubs.end()),
SmallVector<Value, 4>(steps.begin(), steps.end()));
@ -98,10 +98,10 @@ mlir::edsc::makeLoopBuilder(Value *iv, Value lb, Value ub, Value step,
MutableArrayRef<Value> iterArgsHandles,
ValueRange iterArgsInitValues) {
mlir::edsc::LoopBuilder result;
loop::ForOp forOp =
OperationBuilder<loop::ForOp>(lb, ub, step, iterArgsInitValues);
scf::ForOp forOp =
OperationBuilder<scf::ForOp>(lb, ub, step, iterArgsInitValues);
*iv = Value(forOp.getInductionVar());
auto *body = loop::getForInductionVarOwner(*iv).getBody();
auto *body = scf::getForInductionVarOwner(*iv).getBody();
for (size_t i = 0, e = iterArgsHandles.size(); i < e; ++i) {
// Skipping the induction variable.
iterArgsHandles[i] = body->getArgument(i + 1);

View File

@ -1,4 +1,4 @@
//===- Ops.cpp - Loop MLIR Operations -------------------------------------===//
//===- SCF.cpp - Structured Control Flow Operations -----------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
#include "mlir/Dialect/LoopOps/LoopOps.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
@ -21,17 +21,17 @@
#include "mlir/Support/MathExtras.h"
using namespace mlir;
using namespace mlir::loop;
using namespace mlir::scf;
//===----------------------------------------------------------------------===//
// LoopOpsDialect
// SCFDialect
//===----------------------------------------------------------------------===//
LoopOpsDialect::LoopOpsDialect(MLIRContext *context)
SCFDialect::SCFDialect(MLIRContext *context)
: Dialect(getDialectNamespace(), context) {
addOperations<
#define GET_OP_LIST
#include "mlir/Dialect/LoopOps/LoopOps.cpp.inc"
#include "mlir/Dialect/SCF/SCFOps.cpp.inc"
>();
}
@ -187,7 +187,7 @@ LogicalResult ForOp::moveOutOfLoop(ArrayRef<Operation *> ops) {
return success();
}
ForOp mlir::loop::getForInductionVarOwner(Value val) {
ForOp mlir::scf::getForInductionVarOwner(Value val) {
auto ivArg = val.dyn_cast<BlockArgument>();
if (!ivArg)
return ForOp();
@ -542,7 +542,7 @@ LogicalResult ParallelOp::moveOutOfLoop(ArrayRef<Operation *> ops) {
return success();
}
ParallelOp mlir::loop::getParallelForInductionVarOwner(Value val) {
ParallelOp mlir::scf::getParallelForInductionVarOwner(Value val) {
auto ivArg = val.dyn_cast<BlockArgument>();
if (!ivArg)
return ParallelOp();
@ -682,4 +682,4 @@ static void print(OpAsmPrinter &p, YieldOp op) {
//===----------------------------------------------------------------------===//
#define GET_OP_CLASSES
#include "mlir/Dialect/LoopOps/LoopOps.cpp.inc"
#include "mlir/Dialect/SCF/SCFOps.cpp.inc"

View File

@ -1,19 +1,19 @@
add_mlir_dialect_library(MLIRLoopOpsTransforms
add_mlir_dialect_library(MLIRSCFTransforms
ParallelLoopFusion.cpp
ParallelLoopSpecialization.cpp
ParallelLoopTiling.cpp
ADDITIONAL_HEADER_DIRS
${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/LoopOps
${MLIR_MAIN_INCLUDE_DIR}/mlir/Dialect/SCF
DEPENDS
MLIRLoopPassIncGen
MLIRSCFPassIncGen
LINK_LIBS PUBLIC
MLIRAffineOps
MLIRIR
MLIRPass
MLIRLoopOps
MLIRSCF
MLIRStandardOps
MLIRSupport
)

View File

@ -11,16 +11,16 @@
//===----------------------------------------------------------------------===//
#include "PassDetail.h"
#include "mlir/Dialect/LoopOps/LoopOps.h"
#include "mlir/Dialect/LoopOps/Passes.h"
#include "mlir/Dialect/LoopOps/Transforms.h"
#include "mlir/Dialect/SCF/Passes.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/Dialect/SCF/Transforms.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/IR/BlockAndValueMapping.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/OpDefinition.h"
using namespace mlir;
using namespace mlir::loop;
using namespace mlir::scf;
/// Verify there are no nested ParallelOps.
static bool hasNestedParallelOp(ParallelOp ploop) {
@ -128,7 +128,7 @@ static void fuseIfLegal(ParallelOp firstPloop, ParallelOp secondPloop,
firstPloop.erase();
}
void mlir::loop::naivelyFuseParallelOps(Region &region) {
void mlir::scf::naivelyFuseParallelOps(Region &region) {
OpBuilder b(region);
// Consider every single block and attempt to fuse adjacent loops.
for (auto &block : region) {

View File

@ -12,14 +12,14 @@
#include "PassDetail.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/LoopOps/LoopOps.h"
#include "mlir/Dialect/LoopOps/Passes.h"
#include "mlir/Dialect/SCF/Passes.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/BlockAndValueMapping.h"
using namespace mlir;
using loop::ParallelOp;
using scf::ParallelOp;
/// Rewrite a loop with bounds defined by an affine.min with a constant into 2
/// loops after checking if the bounds are equal to that constant. This is
@ -52,7 +52,7 @@ static void specializeLoopForUnrolling(ParallelOp op) {
cond = cond ? b.create<AndOp>(op.getLoc(), cond, cmp) : cmp;
map.map(std::get<0>(bound), constant);
}
auto ifOp = b.create<loop::IfOp>(op.getLoc(), cond, /*withElseRegion=*/true);
auto ifOp = b.create<scf::IfOp>(op.getLoc(), cond, /*withElseRegion=*/true);
ifOp.getThenBodyBuilder().clone(*op.getOperation(), map);
ifOp.getElseBodyBuilder().clone(*op.getOperation());
op.erase();

View File

@ -12,15 +12,15 @@
#include "PassDetail.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/LoopOps/LoopOps.h"
#include "mlir/Dialect/LoopOps/Passes.h"
#include "mlir/Dialect/LoopOps/Transforms.h"
#include "mlir/Dialect/SCF/Passes.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/Dialect/SCF/Transforms.h"
#include "mlir/Dialect/StandardOps/IR/Ops.h"
#include "mlir/Transforms/RegionUtils.h"
#include "llvm/Support/CommandLine.h"
using namespace mlir;
using namespace mlir::loop;
using namespace mlir::scf;
/// Tile a parallel loop of the form
/// loop.parallel (%i0, %i1) = (%arg0, %arg1) to (%arg2, %arg3)
@ -34,7 +34,7 @@ using namespace mlir::loop;
/// min(tileSize[1], %arg3-%j1))
/// step (%arg4, %arg5)
/// The old loop is replaced with the new one.
void mlir::loop::tileParallelLoop(ParallelOp op, ArrayRef<int64_t> tileSizes) {
void mlir::scf::tileParallelLoop(ParallelOp op, ArrayRef<int64_t> tileSizes) {
OpBuilder b(op);
auto zero = b.create<ConstantIndexOp>(op.getLoc(), 0);
SmallVector<Value, 2> tileSizeConstants;

View File

@ -14,7 +14,7 @@
namespace mlir {
#define GEN_PASS_CLASSES
#include "mlir/Dialect/LoopOps/Passes.h.inc"
#include "mlir/Dialect/SCF/Passes.h.inc"
} // end namespace mlir

View File

@ -16,7 +16,7 @@ add_mlir_dialect_library(MLIRVector
MLIRIR
MLIRStandardOps
MLIRAffineOps
MLIRLoopOps
MLIRSCF
MLIRLoopAnalysis
MLIRSideEffects
)

View File

@ -31,7 +31,7 @@ add_mlir_library(MLIRTransforms
MLIRAffineOps
MLIRAnalysis
MLIRLoopLikeInterface
MLIRLoopOps
MLIRSCF
MLIRPass
MLIRTransformUtils
MLIRVector

View File

@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
#include "PassDetail.h"
#include "mlir/Dialect/LoopOps/LoopOps.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/Transforms/LoopUtils.h"
#include "mlir/Transforms/Passes.h"
#include "mlir/Transforms/RegionUtils.h"
@ -23,12 +23,12 @@ struct LoopCoalescingPass : public LoopCoalescingBase<LoopCoalescingPass> {
void runOnFunction() override {
FuncOp func = getFunction();
func.walk([](loop::ForOp op) {
func.walk([](scf::ForOp op) {
// Ignore nested loops.
if (op.getParentOfType<loop::ForOp>())
if (op.getParentOfType<scf::ForOp>())
return;
SmallVector<loop::ForOp, 4> loops;
SmallVector<scf::ForOp, 4> loops;
getPerfectlyNestedLoops(loops, op);
LLVM_DEBUG(llvm::dbgs()
<< "found a perfect nest of depth " << loops.size() << '\n');

View File

@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
#include "PassDetail.h"
#include "mlir/Dialect/LoopOps/LoopOps.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/Transforms/LoopUtils.h"
#include "mlir/Transforms/Passes.h"
#include "mlir/Transforms/RegionUtils.h"
@ -24,7 +24,7 @@ struct ParallelLoopCollapsing
void runOnOperation() override {
Operation *module = getOperation();
module->walk([&](loop::ParallelOp op) {
module->walk([&](scf::ParallelOp op) {
// The common case for GPU dialect will be simplifying the ParallelOp to 3
// arguments, so we do that here to simplify things.
llvm::SmallVector<std::vector<unsigned>, 3> combinedLoops;

View File

@ -17,7 +17,7 @@ add_mlir_library(MLIRTransformUtils
MLIRAffineOps
MLIRAnalysis
MLIRLoopAnalysis
MLIRLoopOps
MLIRSCF
MLIRPass
MLIRStandardOps
)

View File

@ -18,7 +18,7 @@
#include "mlir/Analysis/Utils.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Affine/IR/AffineValueMap.h"
#include "mlir/Dialect/LoopOps/LoopOps.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/BlockAndValueMapping.h"
#include "mlir/IR/Function.h"
@ -192,7 +192,7 @@ LogicalResult mlir::promoteIfSingleIteration(AffineForOp forOp) {
/// Promotes the loop body of a forOp to its containing block if the forOp
/// it can be determined that the loop has a single iteration.
LogicalResult mlir::promoteIfSingleIteration(loop::ForOp forOp) {
LogicalResult mlir::promoteIfSingleIteration(scf::ForOp forOp) {
auto lbCstOp =
dyn_cast_or_null<ConstantIndexOp>(forOp.lowerBound().getDefiningOp());
auto ubCstOp =
@ -445,8 +445,8 @@ void mlir::getPerfectlyNestedLoops(SmallVectorImpl<AffineForOp> &nestedLoops,
getPerfectlyNestedLoopsImpl(nestedLoops, root);
}
void mlir::getPerfectlyNestedLoops(SmallVectorImpl<loop::ForOp> &nestedLoops,
loop::ForOp root) {
void mlir::getPerfectlyNestedLoops(SmallVectorImpl<scf::ForOp> &nestedLoops,
scf::ForOp root) {
getPerfectlyNestedLoopsImpl(nestedLoops, root);
}
@ -474,7 +474,7 @@ LogicalResult mlir::loopUnrollUpToFactor(AffineForOp forOp,
return loopUnrollByFactor(forOp, unrollFactor);
}
// Generates unrolled copies of AffineForOp or loop::ForOp 'loopBodyBlock', with
// Generates unrolled copies of AffineForOp or scf::ForOp 'loopBodyBlock', with
// associated 'forOpIV' by 'unrollFactor', calling 'ivRemapFn' to remap
// 'forOpIV' for each unrolled body.
static void generateUnrolledLoop(
@ -571,7 +571,7 @@ LogicalResult mlir::loopUnrollByFactor(AffineForOp forOp,
}
/// Unrolls 'forOp' by 'unrollFactor', returns success if the loop is unrolled.
LogicalResult mlir::loopUnrollByFactor(loop::ForOp forOp,
LogicalResult mlir::loopUnrollByFactor(scf::ForOp forOp,
uint64_t unrollFactor) {
assert(unrollFactor > 0 && "expected positive unroll factor");
if (unrollFactor == 1)
@ -649,7 +649,7 @@ LogicalResult mlir::loopUnrollByFactor(loop::ForOp forOp,
if (generateEpilogueLoop) {
OpBuilder epilogueBuilder(forOp.getOperation()->getBlock(),
std::next(Block::iterator(forOp)));
auto epilogueForOp = cast<loop::ForOp>(epilogueBuilder.clone(*forOp));
auto epilogueForOp = cast<scf::ForOp>(epilogueBuilder.clone(*forOp));
epilogueForOp.setLowerBound(upperBoundUnrolled);
promoteIfSingleIteration(epilogueForOp);
}
@ -1088,8 +1088,8 @@ stripmineSink(AffineForOp forOp, uint64_t factor,
return innerLoops;
}
static Loops stripmineSink(loop::ForOp forOp, Value factor,
ArrayRef<loop::ForOp> targets) {
static Loops stripmineSink(scf::ForOp forOp, Value factor,
ArrayRef<scf::ForOp> targets) {
auto originalStep = forOp.step();
auto iv = forOp.getInductionVar();
@ -1111,7 +1111,7 @@ static Loops stripmineSink(loop::ForOp forOp, Value factor,
b.create<SelectOp>(t.getLoc(), less, forOp.upperBound(), stepped);
// Splice [begin, begin + nOps - 1) into `newForOp` and replace uses.
auto newForOp = b.create<loop::ForOp>(t.getLoc(), iv, ub, originalStep);
auto newForOp = b.create<scf::ForOp>(t.getLoc(), iv, ub, originalStep);
newForOp.getBody()->getOperations().splice(
newForOp.getBody()->getOperations().begin(),
t.getBody()->getOperations(), begin, std::next(begin, nOps - 1));
@ -1157,9 +1157,9 @@ mlir::tile(ArrayRef<AffineForOp> forOps, ArrayRef<uint64_t> sizes,
return tileImpl(forOps, sizes, targets);
}
SmallVector<Loops, 8> mlir::tile(ArrayRef<loop::ForOp> forOps,
SmallVector<Loops, 8> mlir::tile(ArrayRef<scf::ForOp> forOps,
ArrayRef<Value> sizes,
ArrayRef<loop::ForOp> targets) {
ArrayRef<scf::ForOp> targets) {
return tileImpl(forOps, sizes, targets);
}
@ -1180,15 +1180,15 @@ SmallVector<AffineForOp, 8> mlir::tile(ArrayRef<AffineForOp> forOps,
return tileImpl(forOps, sizes, target);
}
Loops mlir::tile(ArrayRef<loop::ForOp> forOps, ArrayRef<Value> sizes,
loop::ForOp target) {
Loops mlir::tile(ArrayRef<scf::ForOp> forOps, ArrayRef<Value> sizes,
scf::ForOp target) {
return tileImpl(forOps, sizes, target);
}
Loops mlir::tilePerfectlyNested(loop::ForOp rootForOp, ArrayRef<Value> sizes) {
Loops mlir::tilePerfectlyNested(scf::ForOp rootForOp, ArrayRef<Value> sizes) {
// Collect perfectly nested loops. If more size values provided than nested
// loops available, truncate `sizes`.
SmallVector<loop::ForOp, 4> forOps;
SmallVector<scf::ForOp, 4> forOps;
forOps.reserve(sizes.size());
getPerfectlyNestedLoopsImpl(forOps, rootForOp, sizes.size());
if (forOps.size() < sizes.size())
@ -1202,7 +1202,7 @@ Loops mlir::tilePerfectlyNested(loop::ForOp rootForOp, ArrayRef<Value> sizes) {
// Ops that come from triangular loops (i.e. that belong to the program slice
// rooted at `outer`) and ops that have side effects cannot be hoisted.
// Return failure when any op fails to hoist.
static LogicalResult hoistOpsBetween(loop::ForOp outer, loop::ForOp inner) {
static LogicalResult hoistOpsBetween(scf::ForOp outer, scf::ForOp inner) {
SetVector<Operation *> forwardSlice;
getForwardSlice(outer.getOperation(), &forwardSlice, [&inner](Operation *op) {
return op != inner.getOperation();
@ -1218,7 +1218,7 @@ static LogicalResult hoistOpsBetween(loop::ForOp outer, loop::ForOp inner) {
status = failure();
continue;
}
// Skip loop::ForOp, these are not considered a failure.
// Skip scf::ForOp, these are not considered a failure.
if (op.getNumRegions() > 0)
continue;
// Skip other ops with regions.
@ -1261,11 +1261,11 @@ static LogicalResult tryIsolateBands(const TileLoops &tileLoops) {
return status;
}
TileLoops mlir::extractFixedOuterLoops(loop::ForOp rootForOp,
TileLoops mlir::extractFixedOuterLoops(scf::ForOp rootForOp,
ArrayRef<int64_t> sizes) {
// Collect perfectly nested loops. If more size values provided than nested
// loops available, truncate `sizes`.
SmallVector<loop::ForOp, 4> forOps;
SmallVector<scf::ForOp, 4> forOps;
forOps.reserve(sizes.size());
getPerfectlyNestedLoopsImpl(forOps, rootForOp, sizes.size());
if (forOps.size() < sizes.size())
@ -1363,8 +1363,7 @@ static LoopParams normalizeLoop(OpBuilder &boundsBuilder,
/// expected to be either `loop` or another loop perfectly nested under `loop`.
/// Insert the definition of new bounds immediate before `outer`, which is
/// expected to be either `loop` or its parent in the loop nest.
static void normalizeLoop(loop::ForOp loop, loop::ForOp outer,
loop::ForOp inner) {
static void normalizeLoop(scf::ForOp loop, scf::ForOp outer, scf::ForOp inner) {
OpBuilder builder(outer);
OpBuilder innerBuilder = OpBuilder::atBlockBegin(inner.getBody());
auto loopPieces =
@ -1376,12 +1375,12 @@ static void normalizeLoop(loop::ForOp loop, loop::ForOp outer,
loop.setStep(loopPieces.step);
}
void mlir::coalesceLoops(MutableArrayRef<loop::ForOp> loops) {
void mlir::coalesceLoops(MutableArrayRef<scf::ForOp> loops) {
if (loops.size() < 2)
return;
loop::ForOp innermost = loops.back();
loop::ForOp outermost = loops.front();
scf::ForOp innermost = loops.back();
scf::ForOp outermost = loops.front();
// 1. Make sure all loops iterate from 0 to upperBound with step 1. This
// allows the following code to assume upperBound is the number of iterations.
@ -1423,7 +1422,7 @@ void mlir::coalesceLoops(MutableArrayRef<loop::ForOp> loops) {
// 4. Move the operations from the innermost just above the second-outermost
// loop, delete the extra terminator and the second-outermost loop.
loop::ForOp second = loops[1];
scf::ForOp second = loops[1];
innermost.getBody()->back().erase();
outermost.getBody()->getOperations().splice(
Block::iterator(second.getOperation()),
@ -1432,8 +1431,7 @@ void mlir::coalesceLoops(MutableArrayRef<loop::ForOp> loops) {
}
void mlir::collapseParallelLoops(
loop::ParallelOp loops,
ArrayRef<std::vector<unsigned>> combinedDimensions) {
scf::ParallelOp loops, ArrayRef<std::vector<unsigned>> combinedDimensions) {
OpBuilder outsideBuilder(loops);
Location loc = loops.getLoc();
@ -1476,8 +1474,8 @@ void mlir::collapseParallelLoops(
// value. The remainders then determine based on that range, which iteration
// of the original induction value this represents. This is a normalized value
// that is un-normalized already by the previous logic.
auto newPloop = outsideBuilder.create<loop::ParallelOp>(loc, lowerBounds,
upperBounds, steps);
auto newPloop = outsideBuilder.create<scf::ParallelOp>(loc, lowerBounds,
upperBounds, steps);
OpBuilder insideBuilder(newPloop.region());
for (unsigned i = 0, e = combinedDimensions.size(); i < e; ++i) {
Value previous = newPloop.getBody()->getArgument(i);
@ -1512,7 +1510,7 @@ void mlir::collapseParallelLoops(
loops.erase();
}
void mlir::mapLoopToProcessorIds(loop::ForOp forOp, ArrayRef<Value> processorId,
void mlir::mapLoopToProcessorIds(scf::ForOp forOp, ArrayRef<Value> processorId,
ArrayRef<Value> numProcessors) {
assert(processorId.size() == numProcessors.size());
if (processorId.empty())

View File

@ -16,7 +16,7 @@ target_link_libraries(mlir-edsc-builder-api-test
MLIRIR
MLIRLinalgEDSC
MLIRLinalgOps
MLIRLoopOps
MLIRSCF
MLIRStandardOps
MLIRTransforms
MLIRVector

View File

@ -11,7 +11,7 @@
#include "mlir/Dialect/Affine/EDSC/Intrinsics.h"
#include "mlir/Dialect/Linalg/EDSC/Builders.h"
#include "mlir/Dialect/Linalg/EDSC/Intrinsics.h"
#include "mlir/Dialect/LoopOps/EDSC/Intrinsics.h"
#include "mlir/Dialect/SCF/EDSC/Intrinsics.h"
#include "mlir/Dialect/StandardOps/EDSC/Intrinsics.h"
#include "mlir/Dialect/Vector/EDSC/Intrinsics.h"
#include "mlir/EDSC/Builders.h"
@ -39,7 +39,7 @@ static MLIRContext &globalContext() {
static bool init_once = []() {
registerDialect<AffineDialect>();
registerDialect<linalg::LinalgDialect>();
registerDialect<loop::LoopOpsDialect>();
registerDialect<scf::SCFDialect>();
registerDialect<StandardOpsDialect>();
registerDialect<vector::VectorDialect>();
return true;

View File

@ -39,7 +39,7 @@ add_mlir_library(MLIRTestTransforms
MLIRGPUtoCUDATransforms
MLIRLinalgOps
MLIRLinalgTransforms
MLIRLoopOps
MLIRSCF
MLIRGPU
MLIRPass
MLIRStandardToStandard

View File

@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
#include "mlir/Dialect/LoopOps/LoopOps.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/IR/Builders.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/LoopUtils.h"
@ -41,9 +41,9 @@ public:
numProcessors.push_back(op->getResult(1));
});
func.walk([&processorIds, &numProcessors](loop::ForOp op) {
func.walk([&processorIds, &numProcessors](scf::ForOp op) {
// Ignore nested loops.
if (op.getParentRegion()->getParentOfType<loop::ForOp>())
if (op.getParentRegion()->getParentOfType<scf::ForOp>())
return;
mapLoopToProcessorIds(op, processorIds, numProcessors);
});

View File

@ -10,7 +10,7 @@
//
//===----------------------------------------------------------------------===//
#include "mlir/Dialect/LoopOps/LoopOps.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/IR/Builders.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/LoopUtils.h"
@ -33,9 +33,9 @@ public:
void runOnFunction() override {
FuncOp func = getFunction();
func.walk([this](loop::ForOp op) {
func.walk([this](scf::ForOp op) {
// Ignore nested loops.
if (op.getParentRegion()->getParentOfType<loop::ForOp>())
if (op.getParentRegion()->getParentOfType<scf::ForOp>())
return;
extractFixedOuterLoops(op, sizes);
});

View File

@ -10,7 +10,7 @@
//
//===----------------------------------------------------------------------===//
#include "mlir/Dialect/LoopOps/LoopOps.h"
#include "mlir/Dialect/SCF/SCF.h"
#include "mlir/IR/Builders.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/LoopUtils.h"
@ -24,7 +24,7 @@ static unsigned getNestingDepth(Operation *op) {
Operation *currOp = op;
unsigned depth = 0;
while ((currOp = currOp->getParentOp())) {
if (isa<loop::ForOp>(currOp))
if (isa<scf::ForOp>(currOp))
depth++;
}
return depth;
@ -43,8 +43,8 @@ public:
void runOnFunction() override {
FuncOp func = getFunction();
SmallVector<loop::ForOp, 4> loops;
func.walk([&](loop::ForOp forOp) {
SmallVector<scf::ForOp, 4> loops;
func.walk([&](scf::ForOp forOp) {
if (getNestingDepth(forOp) == loopDepth)
loops.push_back(forOp);
});