[PatternMatch] Big mechanical rename OwningRewritePatternList -> RewritePatternSet and insert -> add. NFC

This doesn't change APIs, this just cleans up the many in-tree uses of these
names to use the new preferred names.  We'll keep the old names around for a
couple weeks to help transitions.

Differential Revision: https://reviews.llvm.org/D99127
This commit is contained in:
Chris Lattner 2021-03-22 16:58:34 -07:00
parent 5c2e50b5d2
commit dc4e913be9
186 changed files with 976 additions and 1016 deletions

View File

@ -156,19 +156,19 @@ is very small, and follows the basic pattern of any dialect conversion pass.
```
void mlir::populateTensorBufferizePatterns(
MLIRContext *context, BufferizeTypeConverter &typeConverter,
OwningRewritePatternList &patterns) {
patterns.insert<BufferizeCastOp, BufferizeExtractOp>(typeConverter, context);
BufferizeTypeConverter &typeConverter, RewritePatternSet &patterns) {
patterns.add<BufferizeCastOp, BufferizeExtractOp>(typeConverter,
patterns.getContext());
}
struct TensorBufferizePass : public TensorBufferizeBase<TensorBufferizePass> {
void runOnFunction() override {
auto *context = &getContext();
BufferizeTypeConverter typeConverter;
OwningRewritePatternList patterns;
RewritePatternSet patterns(context);
ConversionTarget target(*context);
populateTensorBufferizePatterns(context, typeConverter, patterns);
populateTensorBufferizePatterns(typeConverter, patterns);
target.addIllegalOp<tensor::CastOp, tensor::ExtractOp>();
target.addLegalDialect<StandardOpsDialect>();
@ -180,7 +180,7 @@ struct TensorBufferizePass : public TensorBufferizeBase<TensorBufferizePass> {
```
The pass has all the hallmarks of a dialect conversion pass that does type
conversions: a `TypeConverter`, a `OwningRewritePatternList`, and a
conversions: a `TypeConverter`, a `RewritePatternSet`, and a
`ConversionTarget`, and a call to `applyPartialConversion`. Note that a function
`populateTensorBufferizePatterns` is separated, so that power users can use the
patterns independently, if necessary (such as to combine multiple sets of

View File

@ -79,9 +79,9 @@ def MyOp : ... {
Canonicalization patterns can then be provided in the source file:
```c++
void MyOp::getCanonicalizationPatterns(OwningRewritePatternList &patterns,
void MyOp::getCanonicalizationPatterns(RewritePatternSet &patterns,
MLIRContext *context) {
patterns.insert<...>(...);
patterns.add<...>(...);
}
```

View File

@ -154,10 +154,10 @@ creation, as well as many useful attribute and type construction methods.
After a set of patterns have been defined, they are collected and provided to a
specific driver for application. A driver consists of several high levels parts:
* Input `OwningRewritePatternList`
* Input `RewritePatternSet`
The input patterns to a driver are provided in the form of an
`OwningRewritePatternList`. This class provides a simplified API for building a
`RewritePatternSet`. This class provides a simplified API for building a
list of patterns.
* Driver-specific `PatternRewriter`
@ -173,7 +173,7 @@ mutation directly.
Each driver is responsible for defining its own operation visitation order as
well as pattern cost model, but the final application is performed via a
`PatternApplicator` class. This class takes as input the
`OwningRewritePatternList` and transforms the patterns based upon a provided
`RewritePatternSet` and transforms the patterns based upon a provided
cost model. This cost model computes a final benefit for a given pattern, using
whatever driver specific information necessary. After a cost model has been
computed, the driver may begin to match patterns against operations using
@ -189,8 +189,8 @@ public:
};
/// Populate the pattern list.
void collectMyPatterns(OwningRewritePatternList &patterns, MLIRContext *ctx) {
patterns.insert<MyPattern>(/*benefit=*/1, ctx);
void collectMyPatterns(RewritePatternSet &patterns, MLIRContext *ctx) {
patterns.add<MyPattern>(/*benefit=*/1, ctx);
}
/// Define a custom PatternRewriter for use by the driver.
@ -203,7 +203,7 @@ public:
/// Apply the custom driver to `op`.
void applyMyPatternDriver(Operation *op,
const OwningRewritePatternList &patterns) {
const RewritePatternSet &patterns) {
// Initialize the custom PatternRewriter.
MyPatternRewriter rewriter(op->getContext());

View File

@ -155,7 +155,7 @@ add_public_tablegen_target(<name-of-the-cmake-target>)
Then you can `#include` the generated file in any C++ implementation file you
like. (You will also need to make sure the library depends on the CMake target
defined in the above.) The generated file will have a `populateWithGenerated(
OwningRewritePatternList &patterns)` function that you can
RewritePatternSet &patterns)` function that you can
use to collect all the generated patterns inside `patterns` and then use
`patterns` in any pass you would like.

View File

@ -114,8 +114,8 @@ pattern with the canonicalization framework.
```c++
// Register our patterns for rewrite by the Canonicalization framework.
void TransposeOp::getCanonicalizationPatterns(
OwningRewritePatternList &results, MLIRContext *context) {
results.insert<SimplifyRedundantTranspose>(context);
RewritePatternSet &results, MLIRContext *context) {
results.add<SimplifyRedundantTranspose>(context);
}
```

View File

@ -147,8 +147,8 @@ void ToyToAffineLoweringPass::runOnFunction() {
// Now that the conversion target has been defined, we just need to provide
// the set of patterns that will lower the Toy operations.
mlir::OwningRewritePatternList patterns;
patterns.insert<..., TransposeOpLowering>(&getContext());
mlir::RewritePatternSet patterns;
patterns.add<..., TransposeOpLowering>(&getContext());
...
```

View File

@ -90,14 +90,14 @@ into LLVM dialect. These patterns allow for lowering the IR in multiple stages
by relying on [transitive lowering](../../../getting_started/Glossary.md#transitive-lowering).
```c++
mlir::OwningRewritePatternList patterns;
mlir::RewritePatternSet patterns;
mlir::populateAffineToStdConversionPatterns(patterns, &getContext());
mlir::populateLoopToStdConversionPatterns(patterns, &getContext());
mlir::populateStdToLLVMConversionPatterns(typeConverter, patterns);
// The only remaining operation, to lower from the `toy` dialect, is the
// PrintOp.
patterns.insert<PrintOpLowering>(&getContext());
patterns.add<PrintOpLowering>(&getContext());
```
### Full Lowering

View File

@ -54,15 +54,15 @@ struct SimplifyRedundantTranspose : public mlir::OpRewritePattern<TransposeOp> {
/// Register our patterns as "canonicalization" patterns on the TransposeOp so
/// that they can be picked up by the Canonicalization framework.
void TransposeOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
void TransposeOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<SimplifyRedundantTranspose>(context);
results.add<SimplifyRedundantTranspose>(context);
}
/// Register our patterns as "canonicalization" patterns on the ReshapeOp so
/// that they can be picked up by the Canonicalization framework.
void ReshapeOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
void ReshapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<ReshapeReshapeOptPattern, RedundantReshapeOptPattern,
results.add<ReshapeReshapeOptPattern, RedundantReshapeOptPattern,
FoldConstantReshapeOptPattern>(context);
}

View File

@ -54,15 +54,15 @@ struct SimplifyRedundantTranspose : public mlir::OpRewritePattern<TransposeOp> {
/// Register our patterns as "canonicalization" patterns on the TransposeOp so
/// that they can be picked up by the Canonicalization framework.
void TransposeOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
void TransposeOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<SimplifyRedundantTranspose>(context);
results.add<SimplifyRedundantTranspose>(context);
}
/// Register our patterns as "canonicalization" patterns on the ReshapeOp so
/// that they can be picked up by the Canonicalization framework.
void ReshapeOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
void ReshapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<ReshapeReshapeOptPattern, RedundantReshapeOptPattern,
results.add<ReshapeReshapeOptPattern, RedundantReshapeOptPattern,
FoldConstantReshapeOptPattern>(context);
}

View File

@ -297,8 +297,8 @@ void ToyToAffineLoweringPass::runOnFunction() {
// Now that the conversion target has been defined, we just need to provide
// the set of patterns that will lower the Toy operations.
OwningRewritePatternList patterns(&getContext());
patterns.insert<AddOpLowering, ConstantOpLowering, MulOpLowering,
RewritePatternSet patterns(&getContext());
patterns.add<AddOpLowering, ConstantOpLowering, MulOpLowering,
ReturnOpLowering, TransposeOpLowering>(&getContext());
// With the target and rewrite patterns defined, we can now attempt the

View File

@ -54,15 +54,15 @@ struct SimplifyRedundantTranspose : public mlir::OpRewritePattern<TransposeOp> {
/// Register our patterns as "canonicalization" patterns on the TransposeOp so
/// that they can be picked up by the Canonicalization framework.
void TransposeOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
void TransposeOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<SimplifyRedundantTranspose>(context);
results.add<SimplifyRedundantTranspose>(context);
}
/// Register our patterns as "canonicalization" patterns on the ReshapeOp so
/// that they can be picked up by the Canonicalization framework.
void ReshapeOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
void ReshapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<ReshapeReshapeOptPattern, RedundantReshapeOptPattern,
results.add<ReshapeReshapeOptPattern, RedundantReshapeOptPattern,
FoldConstantReshapeOptPattern>(context);
}

View File

@ -296,8 +296,8 @@ void ToyToAffineLoweringPass::runOnFunction() {
// Now that the conversion target has been defined, we just need to provide
// the set of patterns that will lower the Toy operations.
OwningRewritePatternList patterns(&getContext());
patterns.insert<AddOpLowering, ConstantOpLowering, MulOpLowering,
RewritePatternSet patterns(&getContext());
patterns.add<AddOpLowering, ConstantOpLowering, MulOpLowering,
ReturnOpLowering, TransposeOpLowering>(&getContext());
// With the target and rewrite patterns defined, we can now attempt the

View File

@ -191,14 +191,14 @@ void ToyToLLVMLoweringPass::runOnOperation() {
// lowerings. Transitive lowering, or A->B->C lowering, is when multiple
// patterns must be applied to fully transform an illegal operation into a
// set of legal ones.
OwningRewritePatternList patterns(&getContext());
RewritePatternSet patterns(&getContext());
populateAffineToStdConversionPatterns(patterns);
populateLoopToStdConversionPatterns(patterns);
populateStdToLLVMConversionPatterns(typeConverter, patterns);
// The only remaining operation to lower from the `toy` dialect, is the
// PrintOp.
patterns.insert<PrintOpLowering>(&getContext());
patterns.add<PrintOpLowering>(&getContext());
// We want to completely lower to LLVM, so we use a `FullConversion`. This
// ensures that only legal operations will remain after the conversion.

View File

@ -54,15 +54,15 @@ struct SimplifyRedundantTranspose : public mlir::OpRewritePattern<TransposeOp> {
/// Register our patterns as "canonicalization" patterns on the TransposeOp so
/// that they can be picked up by the Canonicalization framework.
void TransposeOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
void TransposeOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<SimplifyRedundantTranspose>(context);
results.add<SimplifyRedundantTranspose>(context);
}
/// Register our patterns as "canonicalization" patterns on the ReshapeOp so
/// that they can be picked up by the Canonicalization framework.
void ReshapeOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
void ReshapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<ReshapeReshapeOptPattern, RedundantReshapeOptPattern,
results.add<ReshapeReshapeOptPattern, RedundantReshapeOptPattern,
FoldConstantReshapeOptPattern>(context);
}

View File

@ -297,8 +297,8 @@ void ToyToAffineLoweringPass::runOnFunction() {
// Now that the conversion target has been defined, we just need to provide
// the set of patterns that will lower the Toy operations.
OwningRewritePatternList patterns(&getContext());
patterns.insert<AddOpLowering, ConstantOpLowering, MulOpLowering,
RewritePatternSet patterns(&getContext());
patterns.add<AddOpLowering, ConstantOpLowering, MulOpLowering,
ReturnOpLowering, TransposeOpLowering>(&getContext());
// With the target and rewrite patterns defined, we can now attempt the

View File

@ -191,14 +191,14 @@ void ToyToLLVMLoweringPass::runOnOperation() {
// lowerings. Transitive lowering, or A->B->C lowering, is when multiple
// patterns must be applied to fully transform an illegal operation into a
// set of legal ones.
OwningRewritePatternList patterns(&getContext());
RewritePatternSet patterns(&getContext());
populateAffineToStdConversionPatterns(patterns);
populateLoopToStdConversionPatterns(patterns);
populateStdToLLVMConversionPatterns(typeConverter, patterns);
// The only remaining operation to lower from the `toy` dialect, is the
// PrintOp.
patterns.insert<PrintOpLowering>(&getContext());
patterns.add<PrintOpLowering>(&getContext());
// We want to completely lower to LLVM, so we use a `FullConversion`. This
// ensures that only legal operations will remain after the conversion.

View File

@ -72,15 +72,15 @@ struct SimplifyRedundantTranspose : public mlir::OpRewritePattern<TransposeOp> {
/// Register our patterns as "canonicalization" patterns on the TransposeOp so
/// that they can be picked up by the Canonicalization framework.
void TransposeOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
void TransposeOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<SimplifyRedundantTranspose>(context);
results.add<SimplifyRedundantTranspose>(context);
}
/// Register our patterns as "canonicalization" patterns on the ReshapeOp so
/// that they can be picked up by the Canonicalization framework.
void ReshapeOp::getCanonicalizationPatterns(OwningRewritePatternList &results,
void ReshapeOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
results.insert<ReshapeReshapeOptPattern, RedundantReshapeOptPattern,
results.add<ReshapeReshapeOptPattern, RedundantReshapeOptPattern,
FoldConstantReshapeOptPattern>(context);
}

View File

@ -42,12 +42,11 @@ Optional<SmallVector<Value, 8>> expandAffineMap(OpBuilder &builder,
/// Collect a set of patterns to convert from the Affine dialect to the Standard
/// dialect, in particular convert structured affine control flow into CFG
/// branch-based control flow.
void populateAffineToStdConversionPatterns(OwningRewritePatternList &patterns);
void populateAffineToStdConversionPatterns(RewritePatternSet &patterns);
/// Collect a set of patterns to convert vector-related Affine ops to the Vector
/// dialect.
void populateAffineToVectorConversionPatterns(
OwningRewritePatternList &patterns);
void populateAffineToVectorConversionPatterns(RewritePatternSet &patterns);
/// Emit code that computes the lower bound of the given affine loop using
/// standard arithmetic operations.

View File

@ -17,7 +17,7 @@ using OwningRewritePatternList = RewritePatternSet;
/// Collect a set of patterns to convert from the ArmSVE dialect to LLVM.
void populateArmSVEToLLVMConversionPatterns(LLVMTypeConverter &converter,
OwningRewritePatternList &patterns);
RewritePatternSet &patterns);
} // namespace mlir

View File

@ -34,7 +34,7 @@ std::unique_ptr<OperationPass<ModuleOp>> createConvertAsyncToLLVMPass();
/// the TypeConverter, but otherwise don't care what type conversions are
/// happening.
void populateAsyncStructuralTypeConversionsAndLegality(
TypeConverter &typeConverter, OwningRewritePatternList &patterns,
TypeConverter &typeConverter, RewritePatternSet &patterns,
ConversionTarget &target);
} // namespace mlir

View File

@ -18,8 +18,8 @@ template <typename T>
class OperationPass;
/// Populate the given list with patterns that convert from Complex to LLVM.
void populateComplexToLLVMConversionPatterns(
LLVMTypeConverter &converter, OwningRewritePatternList &patterns);
void populateComplexToLLVMConversionPatterns(LLVMTypeConverter &converter,
RewritePatternSet &patterns);
/// Create a pass to convert Complex operations to the LLVMIR dialect.
std::unique_ptr<OperationPass<ModuleOp>> createConvertComplexToLLVMPass();

View File

@ -29,7 +29,7 @@ void configureGpuToNVVMConversionLegality(ConversionTarget &target);
/// Collect a set of patterns to convert from the GPU dialect to NVVM.
void populateGpuToNVVMConversionPatterns(LLVMTypeConverter &converter,
OwningRewritePatternList &patterns);
RewritePatternSet &patterns);
/// Creates a pass that lowers GPU dialect operations to NVVM counterparts. The
/// index bitwidth used for the lowering of the device side index computations

View File

@ -26,7 +26,7 @@ class GPUModuleOp;
/// Collect a set of patterns to convert from the GPU dialect to ROCDL.
void populateGpuToROCDLConversionPatterns(LLVMTypeConverter &converter,
OwningRewritePatternList &patterns);
RewritePatternSet &patterns);
/// Configure target to convert from the GPU dialect to ROCDL.
void configureGpuToROCDLConversionLegality(ConversionTarget &target);

View File

@ -22,7 +22,7 @@ class SPIRVTypeConverter;
/// SPIR-V ops. For a gpu.func to be converted, it should have a
/// spv.entry_point_abi attribute.
void populateGPUToSPIRVPatterns(SPIRVTypeConverter &typeConverter,
OwningRewritePatternList &patterns);
RewritePatternSet &patterns);
} // namespace mlir
#endif // MLIR_CONVERSION_GPUTOSPIRV_GPUTOSPIRV_H

View File

@ -14,11 +14,12 @@
namespace mlir {
class MLIRContext;
class ModuleOp;
template <typename T> class OperationPass;
template <typename T>
class OperationPass;
/// Populate the given list with patterns that convert from Linalg to LLVM.
void populateLinalgToLLVMConversionPatterns(LLVMTypeConverter &converter,
OwningRewritePatternList &patterns);
RewritePatternSet &patterns);
/// Create a pass to convert Linalg operations to the LLVMIR dialect.
std::unique_ptr<OperationPass<ModuleOp>> createConvertLinalgToLLVMPass();

View File

@ -22,7 +22,7 @@ using OwningRewritePatternList = RewritePatternSet;
/// Appends to a pattern list additional patterns for translating Linalg ops to
/// SPIR-V ops.
void populateLinalgToSPIRVPatterns(SPIRVTypeConverter &typeConverter,
OwningRewritePatternList &patterns);
RewritePatternSet &patterns);
} // namespace mlir

View File

@ -69,8 +69,7 @@ public:
};
/// Populate the given list with patterns that convert from Linalg to Standard.
void populateLinalgToStandardConversionPatterns(
OwningRewritePatternList &patterns);
void populateLinalgToStandardConversionPatterns(RewritePatternSet &patterns);
} // namespace linalg

View File

@ -21,7 +21,7 @@ using OwningRewritePatternList = RewritePatternSet;
/// Populate the given list with patterns that convert from OpenMP to LLVM.
void populateOpenMPToLLVMConversionPatterns(LLVMTypeConverter &converter,
OwningRewritePatternList &patterns);
RewritePatternSet &patterns);
/// Create a pass to convert OpenMP operations to the LLVMIR dialect.
std::unique_ptr<OperationPass<ModuleOp>> createConvertOpenMPToLLVMPass();

View File

@ -43,7 +43,7 @@ LogicalResult convertAffineLoopNestToGPULaunch(AffineForOp forOp,
/// Adds the conversion pattern from `scf.parallel` to `gpu.launch` to the
/// provided pattern list.
void populateParallelLoopToGPUPatterns(OwningRewritePatternList &patterns);
void populateParallelLoopToGPUPatterns(RewritePatternSet &patterns);
/// Configures the rewrite target such that only `scf.parallel` operations that
/// are not rewritten by the provided patterns are legal.

View File

@ -37,7 +37,7 @@ private:
/// loop.terminator to CFG operations within the SPIR-V dialect.
void populateSCFToSPIRVPatterns(SPIRVTypeConverter &typeConverter,
ScfToSPIRVContext &scfToSPIRVContext,
OwningRewritePatternList &patterns);
RewritePatternSet &patterns);
} // namespace mlir
#endif // MLIR_CONVERSION_SCFTOSPIRV_SCFTOSPIRV_H_

View File

@ -22,7 +22,7 @@ using OwningRewritePatternList = RewritePatternSet;
/// Collect a set of patterns to lower from scf.for, scf.if, and
/// loop.terminator to CFG operations within the Standard dialect, in particular
/// convert structured control flow into CFG branch-based control flow.
void populateLoopToStdConversionPatterns(OwningRewritePatternList &patterns);
void populateLoopToStdConversionPatterns(RewritePatternSet &patterns);
/// Creates a pass to convert scf.for, scf.if and loop.terminator ops to CFG.
std::unique_ptr<Pass> createLowerToCFGPass();

View File

@ -41,16 +41,16 @@ void populateSPIRVToLLVMTypeConversion(LLVMTypeConverter &typeConverter);
/// Populates the given list with patterns that convert from SPIR-V to LLVM.
void populateSPIRVToLLVMConversionPatterns(LLVMTypeConverter &typeConverter,
OwningRewritePatternList &patterns);
RewritePatternSet &patterns);
/// Populates the given list with patterns for function conversion from SPIR-V
/// to LLVM.
void populateSPIRVToLLVMFunctionConversionPatterns(
LLVMTypeConverter &typeConverter, OwningRewritePatternList &patterns);
LLVMTypeConverter &typeConverter, RewritePatternSet &patterns);
/// Populates the given patterns for module conversion from SPIR-V to LLVM.
void populateSPIRVToLLVMModuleConversionPatterns(
LLVMTypeConverter &typeConverter, OwningRewritePatternList &patterns);
LLVMTypeConverter &typeConverter, RewritePatternSet &patterns);
} // namespace mlir

View File

@ -20,13 +20,12 @@ class OperationPass;
class RewritePatternSet;
using OwningRewritePatternList = RewritePatternSet;
void populateShapeToStandardConversionPatterns(
OwningRewritePatternList &patterns);
void populateShapeToStandardConversionPatterns(RewritePatternSet &patterns);
std::unique_ptr<OperationPass<ModuleOp>> createConvertShapeToStandardPass();
void populateConvertShapeConstraintsConversionPatterns(
OwningRewritePatternList &patterns);
RewritePatternSet &patterns);
std::unique_ptr<OperationPass<FuncOp>> createConvertShapeConstraintsPass();

View File

@ -49,27 +49,27 @@ struct LowerToLLVMOptions {
/// Collect a set of patterns to convert memory-related operations from the
/// Standard dialect to the LLVM dialect, excluding non-memory-related
/// operations and FuncOp.
void populateStdToLLVMMemoryConversionPatterns(
LLVMTypeConverter &converter, OwningRewritePatternList &patterns);
void populateStdToLLVMMemoryConversionPatterns(LLVMTypeConverter &converter,
RewritePatternSet &patterns);
/// Collect a set of patterns to convert from the Standard dialect to the LLVM
/// dialect, excluding the memory-related operations.
void populateStdToLLVMNonMemoryConversionPatterns(
LLVMTypeConverter &converter, OwningRewritePatternList &patterns);
void populateStdToLLVMNonMemoryConversionPatterns(LLVMTypeConverter &converter,
RewritePatternSet &patterns);
/// Collect the default pattern to convert a FuncOp to the LLVM dialect. If
/// `emitCWrappers` is set, the pattern will also produce functions
/// that pass memref descriptors by pointer-to-structure in addition to the
/// default unpacked form.
void populateStdToLLVMFuncOpConversionPattern(
LLVMTypeConverter &converter, OwningRewritePatternList &patterns);
void populateStdToLLVMFuncOpConversionPattern(LLVMTypeConverter &converter,
RewritePatternSet &patterns);
/// Collect the patterns to convert from the Standard dialect to LLVM. The
/// conversion patterns capture the LLVMTypeConverter and the LowerToLLVMOptions
/// by reference meaning the references have to remain alive during the entire
/// pattern lifetime.
void populateStdToLLVMConversionPatterns(LLVMTypeConverter &converter,
OwningRewritePatternList &patterns);
RewritePatternSet &patterns);
/// Creates a pass to convert the Standard dialect into the LLVMIR dialect.
/// stdlib malloc/free is used by default for allocating memrefs allocated with

View File

@ -22,7 +22,7 @@ class SPIRVTypeConverter;
/// to SPIR-V ops. Also adds the patterns to legalize ops not directly
/// translated to SPIR-V dialect.
void populateStandardToSPIRVPatterns(SPIRVTypeConverter &typeConverter,
OwningRewritePatternList &patterns);
RewritePatternSet &patterns);
/// Appends to a pattern list additional patterns for translating tensor ops
/// to SPIR-V ops.
@ -38,12 +38,12 @@ void populateStandardToSPIRVPatterns(SPIRVTypeConverter &typeConverter,
/// threshold is used to control when the patterns should kick in.
void populateTensorToSPIRVPatterns(SPIRVTypeConverter &typeConverter,
int64_t byteCountThreshold,
OwningRewritePatternList &patterns);
RewritePatternSet &patterns);
/// Appends to a pattern list patterns to legalize ops that are not directly
/// lowered to SPIR-V.
void populateStdLegalizationPatternsForSPIRVLowering(
OwningRewritePatternList &patterns);
RewritePatternSet &patterns);
} // namespace mlir

View File

@ -28,7 +28,7 @@ void addTosaToLinalgOnTensorsPasses(OpPassManager &pm);
/// Populates conversion passes from TOSA dialect to Linalg dialect.
void populateTosaToLinalgOnTensorsConversionPatterns(
OwningRewritePatternList *patterns);
RewritePatternSet *patterns);
} // namespace tosa
} // namespace mlir

View File

@ -20,7 +20,7 @@ namespace tosa {
std::unique_ptr<Pass> createTosaToSCF();
void populateTosaToSCFConversionPatterns(OwningRewritePatternList *patterns);
void populateTosaToSCFConversionPatterns(RewritePatternSet *patterns);
/// Populates passes to convert from TOSA to SCF.
void addTosaToSCFPasses(OpPassManager &pm);

View File

@ -20,11 +20,10 @@ namespace tosa {
std::unique_ptr<Pass> createTosaToStandard();
void populateTosaToStandardConversionPatterns(
OwningRewritePatternList *patterns);
void populateTosaToStandardConversionPatterns(RewritePatternSet *patterns);
void populateTosaRescaleToStandardConversionPatterns(
OwningRewritePatternList *patterns);
RewritePatternSet *patterns);
/// Populates passes to convert from TOSA to Standard.
void addTosaToStandardPasses(OpPassManager &pm);

View File

@ -62,12 +62,12 @@ struct LowerVectorToLLVMOptions {
/// Collect a set of patterns to convert from Vector contractions to LLVM Matrix
/// Intrinsics. To lower to assembly, the LLVM flag -lower-matrix-intrinsics
/// will be needed when invoking LLVM.
void populateVectorToLLVMMatrixConversionPatterns(
LLVMTypeConverter &converter, OwningRewritePatternList &patterns);
void populateVectorToLLVMMatrixConversionPatterns(LLVMTypeConverter &converter,
RewritePatternSet &patterns);
/// Collect a set of patterns to convert from the Vector dialect to LLVM.
void populateVectorToLLVMConversionPatterns(
LLVMTypeConverter &converter, OwningRewritePatternList &patterns,
LLVMTypeConverter &converter, RewritePatternSet &patterns,
bool reassociateFPReductions = false, bool enableIndexOptimizations = true);
/// Create a pass to convert vector operations to the LLVMIR dialect.

View File

@ -19,8 +19,8 @@ class RewritePatternSet;
using OwningRewritePatternList = RewritePatternSet;
/// Collect a set of patterns to convert from the GPU dialect to ROCDL.
void populateVectorToROCDLConversionPatterns(
LLVMTypeConverter &converter, OwningRewritePatternList &patterns);
void populateVectorToROCDLConversionPatterns(LLVMTypeConverter &converter,
RewritePatternSet &patterns);
/// Create a pass to convert vector operations to the ROCDL dialect.
std::unique_ptr<OperationPass<ModuleOp>> createConvertVectorToROCDLPass();

View File

@ -163,7 +163,7 @@ struct VectorTransferRewriter : public RewritePattern {
/// Collect a set of patterns to convert from the Vector dialect to SCF + std.
void populateVectorToSCFConversionPatterns(
OwningRewritePatternList &patterns,
RewritePatternSet &patterns,
const VectorTransferToSCFOptions &options = VectorTransferToSCFOptions());
/// Create a pass to convert a subset of vector ops to SCF.

View File

@ -21,7 +21,7 @@ class SPIRVTypeConverter;
/// Appends to a pattern list additional patterns for translating Vector Ops to
/// SPIR-V ops.
void populateVectorToSPIRVPatterns(SPIRVTypeConverter &typeConverter,
OwningRewritePatternList &patterns);
RewritePatternSet &patterns);
} // namespace mlir

View File

@ -18,8 +18,8 @@ using OwningRewritePatternList = RewritePatternSet;
/// Collect a set of patterns to lower AMX ops to ops that map to LLVM
/// intrinsics.
void populateAMXLegalizeForLLVMExportPatterns(
LLVMTypeConverter &converter, OwningRewritePatternList &patterns);
void populateAMXLegalizeForLLVMExportPatterns(LLVMTypeConverter &converter,
RewritePatternSet &patterns);
/// Configure the target to support lowering AMX ops to ops that map to LLVM
/// intrinsics.

View File

@ -18,8 +18,8 @@ using OwningRewritePatternList = RewritePatternSet;
/// Collect a set of patterns to lower AVX512 ops to ops that map to LLVM
/// intrinsics.
void populateAVX512LegalizeForLLVMExportPatterns(
LLVMTypeConverter &converter, OwningRewritePatternList &patterns);
void populateAVX512LegalizeForLLVMExportPatterns(LLVMTypeConverter &converter,
RewritePatternSet &patterns);
/// Configure the target to support lowering AVX512 ops to ops that map to LLVM
/// intrinsics.

View File

@ -31,10 +31,10 @@ std::unique_ptr<OperationPass<ModuleOp>> createGpuKernelOutliningPass();
std::unique_ptr<OperationPass<FuncOp>> createGpuAsyncRegionPass();
/// Collect a set of patterns to rewrite all-reduce ops within the GPU dialect.
void populateGpuAllReducePatterns(OwningRewritePatternList &patterns);
void populateGpuAllReducePatterns(RewritePatternSet &patterns);
/// Collect all patterns to rewrite ops within the GPU dialect.
inline void populateGpuRewritePatterns(OwningRewritePatternList &patterns) {
inline void populateGpuRewritePatterns(RewritePatternSet &patterns) {
populateGpuAllReducePatterns(patterns);
}

View File

@ -52,8 +52,7 @@ std::unique_ptr<OperationPass<FuncOp>> createLinalgBufferizePass();
/// Populate patterns that convert `ElementwiseMappable` ops to linalg
/// parallel loops.
void populateElementwiseToLinalgConversionPatterns(
OwningRewritePatternList &patterns);
void populateElementwiseToLinalgConversionPatterns(RewritePatternSet &patterns);
/// Create a pass to conver named Linalg operations to Linalg generic
/// operations.
@ -66,15 +65,13 @@ std::unique_ptr<Pass> createLinalgDetensorizePass();
/// Patterns to fold an expanding (collapsing) tensor_reshape operation with its
/// producer (consumer) generic operation by expanding the dimensionality of the
/// loop in the generic op.
void populateFoldReshapeOpsByExpansionPatterns(
OwningRewritePatternList &patterns);
void populateFoldReshapeOpsByExpansionPatterns(RewritePatternSet &patterns);
/// Patterns to fold a collapsing (expanding) tensor_reshape operation with its
/// producer (consumer) generic/indexed_generic operation by linearizing the
/// indexing map used to access the source (target) of the reshape operation in
/// the generic/indexed_generic operation.
void populateFoldReshapeOpsByLinearizationPatterns(
OwningRewritePatternList &patterns);
void populateFoldReshapeOpsByLinearizationPatterns(RewritePatternSet &patterns);
/// Patterns to fold a collapsing (expanding) tensor_reshape operation with its
/// producer (consumer) generic/indexed_generic operation by linearizing the
@ -83,15 +80,14 @@ void populateFoldReshapeOpsByLinearizationPatterns(
/// the tensor reshape involved is collapsing (introducing) unit-extent
/// dimensions.
void populateFoldUnitDimsReshapeOpsByLinearizationPatterns(
OwningRewritePatternList &patterns);
RewritePatternSet &patterns);
/// Patterns for fusing linalg operation on tensors.
void populateLinalgTensorOpsFusionPatterns(OwningRewritePatternList &patterns);
void populateLinalgTensorOpsFusionPatterns(RewritePatternSet &patterns);
/// Patterns to fold unit-extent dimensions in operands/results of linalg ops on
/// tensors.
void populateLinalgFoldUnitExtentDimsPatterns(
OwningRewritePatternList &patterns);
void populateLinalgFoldUnitExtentDimsPatterns(RewritePatternSet &patterns);
//===----------------------------------------------------------------------===//
// Registration

View File

@ -24,7 +24,7 @@ struct Transformation {
explicit Transformation(linalg::LinalgTransformationFilter::FilterFunction f)
: filter(f) {}
virtual ~Transformation() = default;
virtual OwningRewritePatternList
virtual RewritePatternSet
buildRewritePatterns(MLIRContext *context,
linalg::LinalgTransformationFilter m) = 0;
linalg::LinalgTransformationFilter::FilterFunction filter = nullptr;
@ -35,11 +35,11 @@ template <template <typename> class PatternType, typename ConcreteOpType,
typename OptionsType,
typename = std::enable_if_t<std::is_member_function_pointer<
decltype(&ConcreteOpType::getOperationName)>::value>>
void sfinae_enqueue(OwningRewritePatternList &patternList, OptionsType options,
void sfinae_enqueue(RewritePatternSet &patternList, OptionsType options,
StringRef opName, linalg::LinalgTransformationFilter m) {
assert(opName == ConcreteOpType::getOperationName() &&
"explicit name must match ConcreteOpType::getOperationName");
patternList.insert<PatternType<ConcreteOpType>>(patternList.getContext(),
patternList.add<PatternType<ConcreteOpType>>(patternList.getContext(),
options, m);
}
@ -47,21 +47,20 @@ void sfinae_enqueue(OwningRewritePatternList &patternList, OptionsType options,
/// (e.g. LinalgOp, other interfaces, Operation*).
template <template <typename> class PatternType, typename OpType,
typename OptionsType>
void sfinae_enqueue(OwningRewritePatternList &patternList, OptionsType options,
void sfinae_enqueue(RewritePatternSet &patternList, OptionsType options,
StringRef opName, linalg::LinalgTransformationFilter m) {
assert(!opName.empty() && "opName must not be empty");
patternList.insert<PatternType<OpType>>(opName, patternList.getContext(),
patternList.add<PatternType<OpType>>(opName, patternList.getContext(),
options, m);
}
template <typename PatternType, typename OpType, typename OptionsType>
void enqueue(OwningRewritePatternList &patternList, OptionsType options,
void enqueue(RewritePatternSet &patternList, OptionsType options,
StringRef opName, linalg::LinalgTransformationFilter m) {
if (!opName.empty())
patternList.insert<PatternType>(opName, patternList.getContext(), options,
m);
patternList.add<PatternType>(opName, patternList.getContext(), options, m);
else
patternList.insert<PatternType>(m.addOpFilter<OpType>(), options);
patternList.add<PatternType>(m.addOpFilter<OpType>(), options);
}
/// Promotion transformation enqueues a particular stage-1 pattern for
@ -77,10 +76,10 @@ struct Tile : public Transformation {
linalg::LinalgTransformationFilter::FilterFunction f = nullptr)
: Transformation(f), opName(name), options(options) {}
OwningRewritePatternList
RewritePatternSet
buildRewritePatterns(MLIRContext *context,
linalg::LinalgTransformationFilter m) override {
OwningRewritePatternList tilingPatterns(context);
RewritePatternSet tilingPatterns(context);
sfinae_enqueue<linalg::LinalgTilingPattern, LinalgOpType>(
tilingPatterns, options, opName, m);
return tilingPatterns;
@ -105,10 +104,10 @@ struct Promote : public Transformation {
linalg::LinalgTransformationFilter::FilterFunction f = nullptr)
: Transformation(f), opName(name), options(options) {}
OwningRewritePatternList
RewritePatternSet
buildRewritePatterns(MLIRContext *context,
linalg::LinalgTransformationFilter m) override {
OwningRewritePatternList promotionPatterns(context);
RewritePatternSet promotionPatterns(context);
sfinae_enqueue<linalg::LinalgPromotionPattern, LinalgOpType>(
promotionPatterns, options, opName, m);
return promotionPatterns;
@ -133,13 +132,13 @@ struct Vectorize : public Transformation {
linalg::LinalgTransformationFilter::FilterFunction f = nullptr)
: Transformation(f), opName(name), options(options) {}
OwningRewritePatternList
RewritePatternSet
buildRewritePatterns(MLIRContext *context,
linalg::LinalgTransformationFilter m) override {
OwningRewritePatternList vectorizationPatterns(context);
RewritePatternSet vectorizationPatterns(context);
enqueue<linalg::LinalgVectorizationPattern, LinalgOpType>(
vectorizationPatterns, options, opName, m);
vectorizationPatterns.insert<linalg::LinalgCopyVTRForwardingPattern,
vectorizationPatterns.add<linalg::LinalgCopyVTRForwardingPattern,
linalg::LinalgCopyVTWForwardingPattern>(
context, /*benefit=*/2);
return vectorizationPatterns;

View File

@ -33,12 +33,12 @@ using LinalgLoops = SmallVector<Operation *, 4>;
/// Populates patterns for vectorization of all ConvN-D ops.
void populateConvVectorizationPatterns(
MLIRContext *context, SmallVectorImpl<OwningRewritePatternList> &patterns,
MLIRContext *context, SmallVectorImpl<RewritePatternSet> &patterns,
ArrayRef<int64_t> tileSizes);
/// Populates the given list with patterns to bufferize linalg ops.
void populateLinalgBufferizePatterns(BufferizeTypeConverter &converter,
OwningRewritePatternList &patterns);
RewritePatternSet &patterns);
/// Performs standalone tiling of a single LinalgOp by `tileSizes`.
/// and permute the loop nest according to `interchangeVector`
@ -441,10 +441,8 @@ struct LinalgTilingOptions {
/// Canonicalization patterns relevant to apply after tiling patterns. These are
/// applied automatically by the tiling pass but need to be applied manually
/// when tiling is called programmatically.
OwningRewritePatternList
getLinalgTilingCanonicalizationPatterns(MLIRContext *ctx);
void populateLinalgTilingCanonicalizationPatterns(
OwningRewritePatternList &patterns);
RewritePatternSet getLinalgTilingCanonicalizationPatterns(MLIRContext *ctx);
void populateLinalgTilingCanonicalizationPatterns(RewritePatternSet &patterns);
/// Base pattern that applied the tiling transformation specified by `options`.
/// Abort and return failure in 2 cases:
@ -690,10 +688,10 @@ template <
typename OpType,
typename = std::enable_if_t<detect_has_get_operation_name<OpType>::value>,
typename = void>
void insertVectorizationPatternImpl(OwningRewritePatternList &patternList,
void insertVectorizationPatternImpl(RewritePatternSet &patternList,
linalg::LinalgVectorizationOptions options,
linalg::LinalgTransformationFilter f) {
patternList.insert<linalg::LinalgVectorizationPattern>(
patternList.add<linalg::LinalgVectorizationPattern>(
OpType::getOperationName(), patternList.getContext(), options, f);
}
@ -701,16 +699,16 @@ void insertVectorizationPatternImpl(OwningRewritePatternList &patternList,
/// an OpInterface).
template <typename OpType, typename = std::enable_if_t<
!detect_has_get_operation_name<OpType>::value>>
void insertVectorizationPatternImpl(OwningRewritePatternList &patternList,
void insertVectorizationPatternImpl(RewritePatternSet &patternList,
linalg::LinalgVectorizationOptions options,
linalg::LinalgTransformationFilter f) {
patternList.insert<linalg::LinalgVectorizationPattern>(
f.addOpFilter<OpType>(), options);
patternList.add<linalg::LinalgVectorizationPattern>(f.addOpFilter<OpType>(),
options);
}
/// Variadic helper function to insert vectorization patterns for C++ ops.
template <typename... OpTypes>
void insertVectorizationPatterns(OwningRewritePatternList &patternList,
void insertVectorizationPatterns(RewritePatternSet &patternList,
linalg::LinalgVectorizationOptions options,
linalg::LinalgTransformationFilter f =
linalg::LinalgTransformationFilter()) {
@ -789,13 +787,13 @@ private:
/// Populates `patterns` with patterns to convert spec-generated named ops to
/// linalg.generic ops.
void populateLinalgNamedOpsGeneralizationPatterns(
OwningRewritePatternList &patterns,
RewritePatternSet &patterns,
LinalgTransformationFilter filter = LinalgTransformationFilter());
/// Populates `patterns` with patterns to convert linalg.conv ops to
/// linalg.generic ops.
void populateLinalgConvGeneralizationPatterns(
OwningRewritePatternList &patterns,
RewritePatternSet &patterns,
LinalgTransformationFilter filter = LinalgTransformationFilter());
//===----------------------------------------------------------------------===//
@ -1056,12 +1054,11 @@ struct SparsificationOptions {
/// Sets up sparsification rewriting rules with the given options.
void populateSparsificationPatterns(
OwningRewritePatternList &patterns,
RewritePatternSet &patterns,
const SparsificationOptions &options = SparsificationOptions());
/// Sets up sparsification conversion rules with the given options.
void populateSparsificationConversionPatterns(
OwningRewritePatternList &patterns);
void populateSparsificationConversionPatterns(RewritePatternSet &patterns);
} // namespace linalg
} // namespace mlir

View File

@ -61,7 +61,7 @@ tileParallelLoop(ParallelOp op, llvm::ArrayRef<int64_t> tileSizes);
/// corresponding scf.yield ops need to update their types accordingly to the
/// TypeConverter, but otherwise don't care what type conversions are happening.
void populateSCFStructuralTypeConversionsAndLegality(
TypeConverter &typeConverter, OwningRewritePatternList &patterns,
TypeConverter &typeConverter, RewritePatternSet &patterns,
ConversionTarget &target);
} // namespace scf

View File

@ -24,7 +24,7 @@
namespace mlir {
namespace spirv {
void populateSPIRVGLSLCanonicalizationPatterns(
mlir::OwningRewritePatternList &results);
mlir::RewritePatternSet &results);
} // namespace spirv
} // namespace mlir

View File

@ -68,7 +68,7 @@ private:
/// interface/ABI; they convert function parameters to be of SPIR-V allowed
/// types.
void populateBuiltinFuncToSPIRVPatterns(SPIRVTypeConverter &typeConverter,
OwningRewritePatternList &patterns);
RewritePatternSet &patterns);
namespace spirv {
class AccessChainOp;

View File

@ -28,7 +28,7 @@ namespace mlir {
std::unique_ptr<Pass> createShapeToShapeLowering();
/// Collects a set of patterns to rewrite ops within the Shape dialect.
void populateShapeRewritePatterns(OwningRewritePatternList &patterns);
void populateShapeRewritePatterns(RewritePatternSet &patterns);
// Collects a set of patterns to replace all constraints with passing witnesses.
// This is intended to then allow all ShapeConstraint related ops and data to
@ -36,7 +36,7 @@ void populateShapeRewritePatterns(OwningRewritePatternList &patterns);
// canonicalization and dead code elimination.
//
// After this pass, no cstr_ operations exist.
void populateRemoveShapeConstraintsPatterns(OwningRewritePatternList &patterns);
void populateRemoveShapeConstraintsPatterns(RewritePatternSet &patterns);
std::unique_ptr<FunctionPass> createRemoveShapeConstraintsPass();
/// Populates patterns for shape dialect structural type conversions and sets up
@ -51,7 +51,7 @@ std::unique_ptr<FunctionPass> createRemoveShapeConstraintsPass();
/// do for a structural type conversion is to update both of their types
/// consistently to the new types prescribed by the TypeConverter.
void populateShapeStructuralTypeConversionsAndLegality(
TypeConverter &typeConverter, OwningRewritePatternList &patterns,
TypeConverter &typeConverter, RewritePatternSet &patterns,
ConversionTarget &target);
// Bufferizes shape dialect ops.

View File

@ -81,9 +81,10 @@ private:
/// Populates the patterns needed to drive the conversion process for
/// decomposing call graph types with the given `ValueDecomposer`.
void populateDecomposeCallGraphTypesPatterns(
MLIRContext *context, TypeConverter &typeConverter,
ValueDecomposer &decomposer, OwningRewritePatternList &patterns);
void populateDecomposeCallGraphTypesPatterns(MLIRContext *context,
TypeConverter &typeConverter,
ValueDecomposer &decomposer,
RewritePatternSet &patterns);
} // end namespace mlir

View File

@ -25,15 +25,15 @@ using OwningRewritePatternList = RewritePatternSet;
/// Add a pattern to the given pattern list to convert the operand and result
/// types of a CallOp with the given type converter.
void populateCallOpTypeConversionPattern(OwningRewritePatternList &patterns,
void populateCallOpTypeConversionPattern(RewritePatternSet &patterns,
TypeConverter &converter);
/// Add a pattern to the given pattern list to rewrite branch operations to use
/// operands that have been legalized by the conversion framework. This can only
/// be done if the branch operation implements the BranchOpInterface. Only
/// needed for partial conversions.
void populateBranchOpInterfaceTypeConversionPattern(
OwningRewritePatternList &patterns, TypeConverter &converter);
void populateBranchOpInterfaceTypeConversionPattern(RewritePatternSet &patterns,
TypeConverter &converter);
/// Return true if op is a BranchOpInterface op whose operands are all legal
/// according to converter.
@ -42,7 +42,7 @@ bool isLegalForBranchOpInterfaceTypeConversionPattern(Operation *op,
/// Add a pattern to the given pattern list to rewrite `return` ops to use
/// operands that have been legalized by the conversion framework.
void populateReturnOpTypeConversionPattern(OwningRewritePatternList &patterns,
void populateReturnOpTypeConversionPattern(RewritePatternSet &patterns,
TypeConverter &converter);
/// For ReturnLike ops (except `return`), return True. If op is a `return` &&

View File

@ -23,7 +23,7 @@ class RewritePatternSet;
using OwningRewritePatternList = RewritePatternSet;
void populateStdBufferizePatterns(BufferizeTypeConverter &typeConverter,
OwningRewritePatternList &patterns);
RewritePatternSet &patterns);
/// Creates an instance of std bufferization pass.
std::unique_ptr<Pass> createStdBufferizePass();
@ -42,7 +42,7 @@ std::unique_ptr<Pass> createTensorConstantBufferizePass();
std::unique_ptr<Pass> createStdExpandOpsPass();
/// Collects a set of patterns to rewrite ops within the Std dialect.
void populateStdExpandOpsPatterns(OwningRewritePatternList &patterns);
void populateStdExpandOpsPatterns(RewritePatternSet &patterns);
//===----------------------------------------------------------------------===//
// Registration

View File

@ -18,7 +18,7 @@ class RewritePatternSet;
using OwningRewritePatternList = RewritePatternSet;
void populateTensorBufferizePatterns(BufferizeTypeConverter &typeConverter,
OwningRewritePatternList &patterns);
RewritePatternSet &patterns);
/// Creates an instance of `tensor` dialect bufferization pass.
std::unique_ptr<Pass> createTensorBufferizePass();

View File

@ -40,11 +40,10 @@ struct BitmaskEnumStorage;
/// Collect a set of vector-to-vector canonicalization patterns.
void populateVectorToVectorCanonicalizationPatterns(
OwningRewritePatternList &patterns);
RewritePatternSet &patterns);
/// Collect a set of vector-to-vector transformation patterns.
void populateVectorToVectorTransformationPatterns(
OwningRewritePatternList &patterns);
void populateVectorToVectorTransformationPatterns(RewritePatternSet &patterns);
/// Collect a set of patterns to split transfer read/write ops.
///
@ -55,7 +54,7 @@ void populateVectorToVectorTransformationPatterns(
/// of being generic canonicalization patterns. Also one can let the
/// `ignoreFilter` to return true to fail matching for fine-grained control.
void populateSplitVectorTransferPatterns(
OwningRewritePatternList &patterns,
RewritePatternSet &patterns,
std::function<bool(Operation *)> ignoreFilter = nullptr);
/// Collect a set of leading one dimension removal patterns.
@ -64,15 +63,14 @@ void populateSplitVectorTransferPatterns(
/// to expose more canonical forms of read/write/insert/extract operations.
/// With them, there are more chances that we can cancel out extract-insert
/// pairs or forward write-read pairs.
void populateCastAwayVectorLeadingOneDimPatterns(
OwningRewritePatternList &patterns);
void populateCastAwayVectorLeadingOneDimPatterns(RewritePatternSet &patterns);
/// Collect a set of patterns that bubble up/down bitcast ops.
///
/// These patterns move vector.bitcast ops to be before insert ops or after
/// extract ops where suitable. With them, bitcast will happen on smaller
/// vectors and there are more chances to share extract/insert ops.
void populateBubbleVectorBitCastOpPatterns(OwningRewritePatternList &patterns);
void populateBubbleVectorBitCastOpPatterns(RewritePatternSet &patterns);
/// Collect a set of vector slices transformation patterns:
/// ExtractSlicesOpLowering, InsertSlicesOpLowering
@ -82,13 +80,13 @@ void populateBubbleVectorBitCastOpPatterns(OwningRewritePatternList &patterns);
/// use for "slices" ops), this lowering removes all tuple related
/// operations as well (through DCE and folding). If tuple values
/// "leak" coming in, however, some tuple related ops will remain.
void populateVectorSlicesLoweringPatterns(OwningRewritePatternList &patterns);
void populateVectorSlicesLoweringPatterns(RewritePatternSet &patterns);
/// Collect a set of transfer read/write lowering patterns.
///
/// These patterns lower transfer ops to simpler ops like `vector.load`,
/// `vector.store` and `vector.broadcast`.
void populateVectorTransferLoweringPatterns(OwningRewritePatternList &patterns);
void populateVectorTransferLoweringPatterns(RewritePatternSet &patterns);
/// An attribute that specifies the combining function for `vector.contract`,
/// and `vector.reduction`.
@ -172,7 +170,7 @@ struct VectorTransformsOptions {
/// These transformation express higher level vector ops in terms of more
/// elementary extraction, insertion, reduction, product, and broadcast ops.
void populateVectorContractLoweringPatterns(
OwningRewritePatternList &patterns,
RewritePatternSet &patterns,
VectorTransformsOptions vectorTransformOptions = VectorTransformsOptions());
/// Returns the integer type required for subscripts in the vector dialect.

View File

@ -27,7 +27,7 @@ class IfOp;
/// Collect a set of patterns to convert from the Vector dialect to itself.
/// Should be merged with populateVectorToSCFLoweringPattern.
void populateVectorToVectorConversionPatterns(
MLIRContext *context, OwningRewritePatternList &patterns,
MLIRContext *context, RewritePatternSet &patterns,
ArrayRef<int64_t> coarseVectorShape = {},
ArrayRef<int64_t> fineVectorShape = {});

View File

@ -185,7 +185,7 @@ public:
public:
/// This hook returns any canonicalization pattern rewrites that the operation
/// supports, for use by the canonicalization pass.
static void getCanonicalizationPatterns(OwningRewritePatternList &results,
static void getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {}
protected:

View File

@ -67,7 +67,7 @@ using OwningRewritePatternList = RewritePatternSet;
/// the concrete operation types.
class AbstractOperation {
public:
using GetCanonicalizationPatternsFn = void (*)(OwningRewritePatternList &,
using GetCanonicalizationPatternsFn = void (*)(RewritePatternSet &,
MLIRContext *);
using FoldHookFn = LogicalResult (*)(Operation *, ArrayRef<Attribute>,
SmallVectorImpl<OpFoldResult> &);
@ -126,7 +126,7 @@ public:
/// This hook returns any canonicalization pattern rewrites that the operation
/// supports, for use by the canonicalization pass.
void getCanonicalizationPatterns(OwningRewritePatternList &results,
void getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) const {
return getCanonicalizationPatternsFn(results, context);
}

View File

@ -894,7 +894,7 @@ private:
PDLPatternModule pdlPatterns;
};
// TODO: OwningRewritePatternList is soft-deprecated and will be removed in the
// TODO: RewritePatternSet is soft-deprecated and will be removed in the
// future.
using OwningRewritePatternList = RewritePatternSet;

View File

@ -27,7 +27,7 @@ class FrozenRewritePatternList {
public:
/// Freeze the patterns held in `patterns`, and take ownership.
FrozenRewritePatternList();
FrozenRewritePatternList(OwningRewritePatternList &&patterns);
FrozenRewritePatternList(RewritePatternSet &&patterns);
FrozenRewritePatternList(FrozenRewritePatternList &&patterns) = default;
FrozenRewritePatternList(const FrozenRewritePatternList &patterns) = default;
FrozenRewritePatternList &

View File

@ -56,7 +56,7 @@ void populateBufferizeMaterializationLegality(ConversionTarget &target);
///
/// In particular, these are the tensor_load/buffer_cast ops.
void populateEliminateBufferizeMaterializationsPatterns(
BufferizeTypeConverter &typeConverter, OwningRewritePatternList &patterns);
BufferizeTypeConverter &typeConverter, RewritePatternSet &patterns);
} // end namespace mlir

View File

@ -423,20 +423,20 @@ private:
/// Add a pattern to the given pattern list to convert the signature of a
/// FunctionLike op with the given type converter. This only supports
/// FunctionLike ops which use FunctionType to represent their type.
void populateFunctionLikeTypeConversionPattern(
StringRef functionLikeOpName, OwningRewritePatternList &patterns,
void populateFunctionLikeTypeConversionPattern(StringRef functionLikeOpName,
RewritePatternSet &patterns,
TypeConverter &converter);
template <typename FuncOpT>
void populateFunctionLikeTypeConversionPattern(
OwningRewritePatternList &patterns, TypeConverter &converter) {
void populateFunctionLikeTypeConversionPattern(RewritePatternSet &patterns,
TypeConverter &converter) {
populateFunctionLikeTypeConversionPattern(FuncOpT::getOperationName(),
patterns, converter);
}
/// Add a pattern to the given pattern list to convert the signature of a FuncOp
/// with the given type converter.
void populateFuncOpTypeConversionPattern(OwningRewritePatternList &patterns,
void populateFuncOpTypeConversionPattern(RewritePatternSet &patterns,
TypeConverter &converter);
//===----------------------------------------------------------------------===//

View File

@ -746,10 +746,9 @@ public:
} // end namespace
void mlir::populateAffineToStdConversionPatterns(
OwningRewritePatternList &patterns) {
void mlir::populateAffineToStdConversionPatterns(RewritePatternSet &patterns) {
// clang-format off
patterns.insert<
patterns.add<
AffineApplyLowering,
AffineDmaStartLowering,
AffineDmaWaitLowering,
@ -766,9 +765,9 @@ void mlir::populateAffineToStdConversionPatterns(
}
void mlir::populateAffineToVectorConversionPatterns(
OwningRewritePatternList &patterns) {
RewritePatternSet &patterns) {
// clang-format off
patterns.insert<
patterns.add<
AffineVectorLoadLowering,
AffineVectorStoreLowering>(patterns.getContext());
// clang-format on
@ -777,7 +776,7 @@ void mlir::populateAffineToVectorConversionPatterns(
namespace {
class LowerAffinePass : public ConvertAffineToStandardBase<LowerAffinePass> {
void runOnOperation() override {
OwningRewritePatternList patterns(&getContext());
RewritePatternSet patterns(&getContext());
populateAffineToStdConversionPatterns(patterns);
populateAffineToVectorConversionPatterns(patterns);
ConversionTarget target(getContext());

View File

@ -96,19 +96,19 @@ static Optional<Value> addUnrealizedCast(OpBuilder &builder,
}
/// Populate the given list with patterns that convert from ArmSVE to LLVM.
void mlir::populateArmSVEToLLVMConversionPatterns(
LLVMTypeConverter &converter, OwningRewritePatternList &patterns) {
void mlir::populateArmSVEToLLVMConversionPatterns(LLVMTypeConverter &converter,
RewritePatternSet &patterns) {
converter.addConversion([&converter](ScalableVectorType svType) {
return convertScalableVectorTypeToLLVM(svType, converter);
});
converter.addSourceMaterialization(addUnrealizedCast);
// clang-format off
patterns.insert<ForwardOperands<CallOp>,
patterns.add<ForwardOperands<CallOp>,
ForwardOperands<CallIndirectOp>,
ForwardOperands<ReturnOp>>(converter,
&converter.getContext());
patterns.insert<SdotOpLowering,
patterns.add<SdotOpLowering,
SmmlaOpLowering,
UdotOpLowering,
UmmlaOpLowering,

View File

@ -875,7 +875,7 @@ void ConvertAsyncToLLVMPass::runOnOperation() {
// Convert async dialect types and operations to LLVM dialect.
AsyncRuntimeTypeConverter converter;
OwningRewritePatternList patterns(ctx);
RewritePatternSet patterns(ctx);
// We use conversion to LLVM type to lower async.runtime load and store
// operations.
@ -887,24 +887,24 @@ void ConvertAsyncToLLVMPass::runOnOperation() {
populateCallOpTypeConversionPattern(patterns, converter);
// Convert return operations inside async.execute regions.
patterns.insert<ReturnOpOpConversion>(converter, ctx);
patterns.add<ReturnOpOpConversion>(converter, ctx);
// Lower async.runtime operations to the async runtime API calls.
patterns.insert<RuntimeSetAvailableOpLowering, RuntimeAwaitOpLowering,
patterns.add<RuntimeSetAvailableOpLowering, RuntimeAwaitOpLowering,
RuntimeAwaitAndResumeOpLowering, RuntimeResumeOpLowering,
RuntimeAddToGroupOpLowering, RuntimeAddRefOpLowering,
RuntimeDropRefOpLowering>(converter, ctx);
// Lower async.runtime operations that rely on LLVM type converter to convert
// from async value payload type to the LLVM type.
patterns.insert<RuntimeCreateOpLowering, RuntimeStoreOpLowering,
patterns.add<RuntimeCreateOpLowering, RuntimeStoreOpLowering,
RuntimeLoadOpLowering>(llvmConverter, ctx);
// Lower async coroutine operations to LLVM coroutine intrinsics.
patterns.insert<CoroIdOpConversion, CoroBeginOpConversion,
CoroFreeOpConversion, CoroEndOpConversion,
CoroSaveOpConversion, CoroSuspendOpConversion>(converter,
ctx);
patterns
.add<CoroIdOpConversion, CoroBeginOpConversion, CoroFreeOpConversion,
CoroEndOpConversion, CoroSaveOpConversion, CoroSuspendOpConversion>(
converter, ctx);
ConversionTarget target(*ctx);
target.addLegalOp<ConstantOp>();
@ -985,15 +985,14 @@ std::unique_ptr<OperationPass<ModuleOp>> mlir::createConvertAsyncToLLVMPass() {
}
void mlir::populateAsyncStructuralTypeConversionsAndLegality(
TypeConverter &typeConverter, OwningRewritePatternList &patterns,
TypeConverter &typeConverter, RewritePatternSet &patterns,
ConversionTarget &target) {
typeConverter.addConversion([&](TokenType type) { return type; });
typeConverter.addConversion([&](ValueType type) {
return ValueType::get(typeConverter.convertType(type.getValueType()));
});
patterns
.insert<ConvertExecuteOpTypes, ConvertAwaitOpTypes, ConvertYieldOpTypes>(
patterns.add<ConvertExecuteOpTypes, ConvertAwaitOpTypes, ConvertYieldOpTypes>(
typeConverter, patterns.getContext());
target.addDynamicallyLegalOp<AwaitOp, ExecuteOp, async::YieldOp>(

View File

@ -258,9 +258,9 @@ struct SubOpConversion : public ConvertOpToLLVMPattern<complex::SubOp> {
} // namespace
void mlir::populateComplexToLLVMConversionPatterns(
LLVMTypeConverter &converter, OwningRewritePatternList &patterns) {
LLVMTypeConverter &converter, RewritePatternSet &patterns) {
// clang-format off
patterns.insert<
patterns.add<
AbsOpConversion,
AddOpConversion,
CreateOpConversion,
@ -284,7 +284,7 @@ void ConvertComplexToLLVMPass::runOnOperation() {
auto module = getOperation();
// Convert to the LLVM IR dialect using the converter defined above.
OwningRewritePatternList patterns(&getContext());
RewritePatternSet patterns(&getContext());
LLVMTypeConverter converter(&getContext());
populateComplexToLLVMConversionPatterns(converter, patterns);

View File

@ -308,7 +308,7 @@ private:
void GpuToLLVMConversionPass::runOnOperation() {
LLVMTypeConverter converter(&getContext());
OwningRewritePatternList patterns(&getContext());
RewritePatternSet patterns(&getContext());
LLVMConversionTarget target(getContext());
populateVectorToLLVMConversionPatterns(converter, patterns);
@ -320,16 +320,16 @@ void GpuToLLVMConversionPass::runOnOperation() {
[context = &converter.getContext()](gpu::AsyncTokenType type) -> Type {
return LLVM::LLVMPointerType::get(IntegerType::get(context, 8));
});
patterns.insert<ConvertAllocOpToGpuRuntimeCallPattern,
patterns.add<ConvertAllocOpToGpuRuntimeCallPattern,
ConvertDeallocOpToGpuRuntimeCallPattern,
ConvertHostRegisterOpToGpuRuntimeCallPattern,
ConvertMemcpyOpToGpuRuntimeCallPattern,
ConvertWaitAsyncOpToGpuRuntimeCallPattern,
ConvertWaitOpToGpuRuntimeCallPattern,
ConvertAsyncYieldToGpuRuntimeCallPattern>(converter);
patterns.insert<ConvertLaunchFuncOpToGpuRuntimeCallPattern>(
converter, gpuBinaryAnnotation);
patterns.insert<EraseGpuModuleOpPattern>(&converter.getContext());
patterns.add<ConvertLaunchFuncOpToGpuRuntimeCallPattern>(converter,
gpuBinaryAnnotation);
patterns.add<EraseGpuModuleOpPattern>(&converter.getContext());
if (failed(
applyPartialConversion(getOperation(), target, std::move(patterns))))

View File

@ -125,8 +125,8 @@ struct LowerGpuOpsToNVVMOpsPass
return converter.convertType(MemRefType::Builder(type).setMemorySpace(0));
});
OwningRewritePatternList patterns(m.getContext());
OwningRewritePatternList llvmPatterns(m.getContext());
RewritePatternSet patterns(m.getContext());
RewritePatternSet llvmPatterns(m.getContext());
// Apply in-dialect lowering first. In-dialect lowering will replace ops
// which need to be lowered further, which is not supported by a single
@ -158,11 +158,11 @@ void mlir::configureGpuToNVVMConversionLegality(ConversionTarget &target) {
target.addLegalOp<gpu::YieldOp, gpu::GPUModuleOp, gpu::ModuleEndOp>();
}
void mlir::populateGpuToNVVMConversionPatterns(
LLVMTypeConverter &converter, OwningRewritePatternList &patterns) {
void mlir::populateGpuToNVVMConversionPatterns(LLVMTypeConverter &converter,
RewritePatternSet &patterns) {
populateWithGenerated(patterns);
patterns
.insert<GPUIndexIntrinsicOpLowering<gpu::ThreadIdOp, NVVM::ThreadIdXOp,
.add<GPUIndexIntrinsicOpLowering<gpu::ThreadIdOp, NVVM::ThreadIdXOp,
NVVM::ThreadIdYOp, NVVM::ThreadIdZOp>,
GPUIndexIntrinsicOpLowering<gpu::BlockDimOp, NVVM::BlockDimXOp,
NVVM::BlockDimYOp, NVVM::BlockDimZOp>,
@ -175,44 +175,44 @@ void mlir::populateGpuToNVVMConversionPatterns(
// Explicitly drop memory space when lowering private memory
// attributions since NVVM models it as `alloca`s in the default
// memory space and does not support `alloca`s with addrspace(5).
patterns.insert<GPUFuncOpLowering>(
patterns.add<GPUFuncOpLowering>(
converter, /*allocaAddrSpace=*/0,
Identifier::get(NVVM::NVVMDialect::getKernelFuncAttrName(),
&converter.getContext()));
patterns.insert<OpToFuncCallLowering<AbsFOp>>(converter, "__nv_fabsf",
patterns.add<OpToFuncCallLowering<AbsFOp>>(converter, "__nv_fabsf",
"__nv_fabs");
patterns.insert<OpToFuncCallLowering<math::AtanOp>>(converter, "__nv_atanf",
patterns.add<OpToFuncCallLowering<math::AtanOp>>(converter, "__nv_atanf",
"__nv_atan");
patterns.insert<OpToFuncCallLowering<math::Atan2Op>>(converter, "__nv_atan2f",
patterns.add<OpToFuncCallLowering<math::Atan2Op>>(converter, "__nv_atan2f",
"__nv_atan2");
patterns.insert<OpToFuncCallLowering<CeilFOp>>(converter, "__nv_ceilf",
patterns.add<OpToFuncCallLowering<CeilFOp>>(converter, "__nv_ceilf",
"__nv_ceil");
patterns.insert<OpToFuncCallLowering<math::CosOp>>(converter, "__nv_cosf",
patterns.add<OpToFuncCallLowering<math::CosOp>>(converter, "__nv_cosf",
"__nv_cos");
patterns.insert<OpToFuncCallLowering<math::ExpOp>>(converter, "__nv_expf",
patterns.add<OpToFuncCallLowering<math::ExpOp>>(converter, "__nv_expf",
"__nv_exp");
patterns.insert<OpToFuncCallLowering<math::ExpM1Op>>(converter, "__nv_expm1f",
patterns.add<OpToFuncCallLowering<math::ExpM1Op>>(converter, "__nv_expm1f",
"__nv_expm1");
patterns.insert<OpToFuncCallLowering<FloorFOp>>(converter, "__nv_floorf",
patterns.add<OpToFuncCallLowering<FloorFOp>>(converter, "__nv_floorf",
"__nv_floor");
patterns.insert<OpToFuncCallLowering<math::LogOp>>(converter, "__nv_logf",
patterns.add<OpToFuncCallLowering<math::LogOp>>(converter, "__nv_logf",
"__nv_log");
patterns.insert<OpToFuncCallLowering<math::Log1pOp>>(converter, "__nv_log1pf",
patterns.add<OpToFuncCallLowering<math::Log1pOp>>(converter, "__nv_log1pf",
"__nv_log1p");
patterns.insert<OpToFuncCallLowering<math::Log10Op>>(converter, "__nv_log10f",
patterns.add<OpToFuncCallLowering<math::Log10Op>>(converter, "__nv_log10f",
"__nv_log10");
patterns.insert<OpToFuncCallLowering<math::Log2Op>>(converter, "__nv_log2f",
patterns.add<OpToFuncCallLowering<math::Log2Op>>(converter, "__nv_log2f",
"__nv_log2");
patterns.insert<OpToFuncCallLowering<math::PowFOp>>(converter, "__nv_powf",
patterns.add<OpToFuncCallLowering<math::PowFOp>>(converter, "__nv_powf",
"__nv_pow");
patterns.insert<OpToFuncCallLowering<math::RsqrtOp>>(converter, "__nv_rsqrtf",
patterns.add<OpToFuncCallLowering<math::RsqrtOp>>(converter, "__nv_rsqrtf",
"__nv_rsqrt");
patterns.insert<OpToFuncCallLowering<math::SinOp>>(converter, "__nv_sinf",
patterns.add<OpToFuncCallLowering<math::SinOp>>(converter, "__nv_sinf",
"__nv_sin");
patterns.insert<OpToFuncCallLowering<math::SqrtOp>>(converter, "__nv_sqrtf",
patterns.add<OpToFuncCallLowering<math::SqrtOp>>(converter, "__nv_sqrtf",
"__nv_sqrt");
patterns.insert<OpToFuncCallLowering<math::TanhOp>>(converter, "__nv_tanhf",
patterns.add<OpToFuncCallLowering<math::TanhOp>>(converter, "__nv_tanhf",
"__nv_tanh");
}

View File

@ -60,8 +60,8 @@ struct LowerGpuOpsToROCDLOpsPass
/*useAlignedAlloc =*/false};
LLVMTypeConverter converter(m.getContext(), options);
OwningRewritePatternList patterns(m.getContext());
OwningRewritePatternList llvmPatterns(m.getContext());
RewritePatternSet patterns(m.getContext());
RewritePatternSet llvmPatterns(m.getContext());
populateGpuRewritePatterns(patterns);
(void)applyPatternsAndFoldGreedily(m, std::move(patterns));
@ -92,11 +92,11 @@ void mlir::configureGpuToROCDLConversionLegality(ConversionTarget &target) {
target.addLegalOp<gpu::YieldOp, gpu::GPUModuleOp, gpu::ModuleEndOp>();
}
void mlir::populateGpuToROCDLConversionPatterns(
LLVMTypeConverter &converter, OwningRewritePatternList &patterns) {
void mlir::populateGpuToROCDLConversionPatterns(LLVMTypeConverter &converter,
RewritePatternSet &patterns) {
populateWithGenerated(patterns);
patterns.insert<
GPUIndexIntrinsicOpLowering<gpu::ThreadIdOp, ROCDL::ThreadIdXOp,
patterns
.add<GPUIndexIntrinsicOpLowering<gpu::ThreadIdOp, ROCDL::ThreadIdXOp,
ROCDL::ThreadIdYOp, ROCDL::ThreadIdZOp>,
GPUIndexIntrinsicOpLowering<gpu::BlockDimOp, ROCDL::BlockDimXOp,
ROCDL::BlockDimYOp, ROCDL::BlockDimZOp>,
@ -105,44 +105,44 @@ void mlir::populateGpuToROCDLConversionPatterns(
GPUIndexIntrinsicOpLowering<gpu::GridDimOp, ROCDL::GridDimXOp,
ROCDL::GridDimYOp, ROCDL::GridDimZOp>,
GPUReturnOpLowering>(converter);
patterns.insert<GPUFuncOpLowering>(
patterns.add<GPUFuncOpLowering>(
converter, /*allocaAddrSpace=*/5,
Identifier::get(ROCDL::ROCDLDialect::getKernelFuncAttrName(),
&converter.getContext()));
patterns.insert<OpToFuncCallLowering<AbsFOp>>(converter, "__ocml_fabs_f32",
patterns.add<OpToFuncCallLowering<AbsFOp>>(converter, "__ocml_fabs_f32",
"__ocml_fabs_f64");
patterns.insert<OpToFuncCallLowering<math::AtanOp>>(
converter, "__ocml_atan_f32", "__ocml_atan_f64");
patterns.insert<OpToFuncCallLowering<math::Atan2Op>>(
patterns.add<OpToFuncCallLowering<math::AtanOp>>(converter, "__ocml_atan_f32",
"__ocml_atan_f64");
patterns.add<OpToFuncCallLowering<math::Atan2Op>>(
converter, "__ocml_atan2_f32", "__ocml_atan2_f64");
patterns.insert<OpToFuncCallLowering<CeilFOp>>(converter, "__ocml_ceil_f32",
patterns.add<OpToFuncCallLowering<CeilFOp>>(converter, "__ocml_ceil_f32",
"__ocml_ceil_f64");
patterns.insert<OpToFuncCallLowering<math::CosOp>>(
converter, "__ocml_cos_f32", "__ocml_cos_f64");
patterns.insert<OpToFuncCallLowering<math::ExpOp>>(
converter, "__ocml_exp_f32", "__ocml_exp_f64");
patterns.insert<OpToFuncCallLowering<math::ExpM1Op>>(
patterns.add<OpToFuncCallLowering<math::CosOp>>(converter, "__ocml_cos_f32",
"__ocml_cos_f64");
patterns.add<OpToFuncCallLowering<math::ExpOp>>(converter, "__ocml_exp_f32",
"__ocml_exp_f64");
patterns.add<OpToFuncCallLowering<math::ExpM1Op>>(
converter, "__ocml_expm1_f32", "__ocml_expm1_f64");
patterns.insert<OpToFuncCallLowering<FloorFOp>>(converter, "__ocml_floor_f32",
patterns.add<OpToFuncCallLowering<FloorFOp>>(converter, "__ocml_floor_f32",
"__ocml_floor_f64");
patterns.insert<OpToFuncCallLowering<math::LogOp>>(
converter, "__ocml_log_f32", "__ocml_log_f64");
patterns.insert<OpToFuncCallLowering<math::Log10Op>>(
patterns.add<OpToFuncCallLowering<math::LogOp>>(converter, "__ocml_log_f32",
"__ocml_log_f64");
patterns.add<OpToFuncCallLowering<math::Log10Op>>(
converter, "__ocml_log10_f32", "__ocml_log10_f64");
patterns.insert<OpToFuncCallLowering<math::Log1pOp>>(
patterns.add<OpToFuncCallLowering<math::Log1pOp>>(
converter, "__ocml_log1p_f32", "__ocml_log1p_f64");
patterns.insert<OpToFuncCallLowering<math::Log2Op>>(
converter, "__ocml_log2_f32", "__ocml_log2_f64");
patterns.insert<OpToFuncCallLowering<math::PowFOp>>(
converter, "__ocml_pow_f32", "__ocml_pow_f64");
patterns.insert<OpToFuncCallLowering<math::RsqrtOp>>(
patterns.add<OpToFuncCallLowering<math::Log2Op>>(converter, "__ocml_log2_f32",
"__ocml_log2_f64");
patterns.add<OpToFuncCallLowering<math::PowFOp>>(converter, "__ocml_pow_f32",
"__ocml_pow_f64");
patterns.add<OpToFuncCallLowering<math::RsqrtOp>>(
converter, "__ocml_rsqrt_f32", "__ocml_rsqrt_f64");
patterns.insert<OpToFuncCallLowering<math::SinOp>>(
converter, "__ocml_sin_f32", "__ocml_sin_f64");
patterns.insert<OpToFuncCallLowering<math::SqrtOp>>(
converter, "__ocml_sqrt_f32", "__ocml_sqrt_f64");
patterns.insert<OpToFuncCallLowering<math::TanhOp>>(
converter, "__ocml_tanh_f32", "__ocml_tanh_f64");
patterns.add<OpToFuncCallLowering<math::SinOp>>(converter, "__ocml_sin_f32",
"__ocml_sin_f64");
patterns.add<OpToFuncCallLowering<math::SqrtOp>>(converter, "__ocml_sqrt_f32",
"__ocml_sqrt_f64");
patterns.add<OpToFuncCallLowering<math::TanhOp>>(converter, "__ocml_tanh_f32",
"__ocml_tanh_f64");
}
std::unique_ptr<OperationPass<gpu::GPUModuleOp>>

View File

@ -330,9 +330,9 @@ namespace {
}
void mlir::populateGPUToSPIRVPatterns(SPIRVTypeConverter &typeConverter,
OwningRewritePatternList &patterns) {
RewritePatternSet &patterns) {
populateWithGenerated(patterns);
patterns.insert<
patterns.add<
GPUFuncOpConversion, GPUModuleConversion, GPUReturnOpConversion,
LaunchConfigConversion<gpu::BlockIdOp, spirv::BuiltIn::WorkgroupId>,
LaunchConfigConversion<gpu::GridDimOp, spirv::BuiltIn::NumWorkgroups>,

View File

@ -57,7 +57,7 @@ void GPUToSPIRVPass::runOnOperation() {
spirv::SPIRVConversionTarget::get(targetAttr);
SPIRVTypeConverter typeConverter(targetAttr);
OwningRewritePatternList patterns(context);
RewritePatternSet patterns(context);
populateGPUToSPIRVPatterns(typeConverter, patterns);
populateStandardToSPIRVPatterns(typeConverter, patterns);

View File

@ -200,9 +200,9 @@ public:
} // namespace
/// Populate the given list with patterns that convert from Linalg to LLVM.
void mlir::populateLinalgToLLVMConversionPatterns(
LLVMTypeConverter &converter, OwningRewritePatternList &patterns) {
patterns.insert<RangeOpConversion, ReshapeOpConversion, YieldOpConversion>(
void mlir::populateLinalgToLLVMConversionPatterns(LLVMTypeConverter &converter,
RewritePatternSet &patterns) {
patterns.add<RangeOpConversion, ReshapeOpConversion, YieldOpConversion>(
converter);
// Populate the type conversions for the linalg types.
@ -221,7 +221,7 @@ void ConvertLinalgToLLVMPass::runOnOperation() {
auto module = getOperation();
// Convert to the LLVM IR dialect using the converter defined above.
OwningRewritePatternList patterns(&getContext());
RewritePatternSet patterns(&getContext());
LLVMTypeConverter converter(&getContext());
populateLinalgToLLVMConversionPatterns(converter, patterns);

View File

@ -204,7 +204,6 @@ LogicalResult SingleWorkgroupReduction::matchAndRewrite(
//===----------------------------------------------------------------------===//
void mlir::populateLinalgToSPIRVPatterns(SPIRVTypeConverter &typeConverter,
OwningRewritePatternList &patterns) {
patterns.insert<SingleWorkgroupReduction>(typeConverter,
patterns.getContext());
RewritePatternSet &patterns) {
patterns.add<SingleWorkgroupReduction>(typeConverter, patterns.getContext());
}

View File

@ -30,7 +30,7 @@ void LinalgToSPIRVPass::runOnOperation() {
spirv::SPIRVConversionTarget::get(targetAttr);
SPIRVTypeConverter typeConverter(targetAttr);
OwningRewritePatternList patterns(context);
RewritePatternSet patterns(context);
populateLinalgToSPIRVPatterns(typeConverter, patterns);
populateBuiltinFuncToSPIRVPatterns(typeConverter, patterns);

View File

@ -192,15 +192,15 @@ mlir::linalg::IndexedGenericOpToLibraryCallRewrite::matchAndRewrite(
/// Populate the given list with patterns that convert from Linalg to Standard.
void mlir::linalg::populateLinalgToStandardConversionPatterns(
OwningRewritePatternList &patterns) {
RewritePatternSet &patterns) {
// TODO: ConvOp conversion needs to export a descriptor with relevant
// attribute values such as kernel striding and dilation.
// clang-format off
patterns.insert<
patterns.add<
CopyOpToLibraryCallRewrite,
CopyTransposeRewrite,
IndexedGenericOpToLibraryCallRewrite>(patterns.getContext());
patterns.insert<LinalgOpToLibraryCallRewrite>();
patterns.add<LinalgOpToLibraryCallRewrite>();
// clang-format on
}
@ -218,7 +218,7 @@ void ConvertLinalgToStandardPass::runOnOperation() {
StandardOpsDialect>();
target.addLegalOp<ModuleOp, FuncOp, ModuleTerminatorOp, ReturnOp>();
target.addLegalOp<linalg::ReshapeOp, linalg::RangeOp>();
OwningRewritePatternList patterns(&getContext());
RewritePatternSet patterns(&getContext());
populateLinalgToStandardConversionPatterns(patterns);
if (failed(applyFullConversion(module, target, std::move(patterns))))
signalPassFailure();

View File

@ -41,9 +41,9 @@ struct RegionOpConversion : public ConvertOpToLLVMPattern<OpType> {
};
} // namespace
void mlir::populateOpenMPToLLVMConversionPatterns(
LLVMTypeConverter &converter, OwningRewritePatternList &patterns) {
patterns.insert<RegionOpConversion<omp::ParallelOp>,
void mlir::populateOpenMPToLLVMConversionPatterns(LLVMTypeConverter &converter,
RewritePatternSet &patterns) {
patterns.add<RegionOpConversion<omp::ParallelOp>,
RegionOpConversion<omp::WsLoopOp>>(converter);
}
@ -58,7 +58,7 @@ void ConvertOpenMPToLLVMPass::runOnOperation() {
auto module = getOperation();
// Convert to OpenMP operations with LLVM IR dialect
OwningRewritePatternList patterns(&getContext());
RewritePatternSet patterns(&getContext());
LLVMTypeConverter converter(&getContext());
populateStdToLLVMConversionPatterns(converter, patterns);
populateOpenMPToLLVMConversionPatterns(converter, patterns);

View File

@ -642,9 +642,8 @@ ParallelToGpuLaunchLowering::matchAndRewrite(ParallelOp parallelOp,
return success();
}
void mlir::populateParallelLoopToGPUPatterns(
OwningRewritePatternList &patterns) {
patterns.insert<ParallelToGpuLaunchLowering>(patterns.getContext());
void mlir::populateParallelLoopToGPUPatterns(RewritePatternSet &patterns) {
patterns.add<ParallelToGpuLaunchLowering>(patterns.getContext());
}
void mlir::configureParallelLoopToGPULegality(ConversionTarget &target) {

View File

@ -47,7 +47,7 @@ struct ForLoopMapper : public ConvertAffineForToGPUBase<ForLoopMapper> {
struct ParallelLoopToGpuPass
: public ConvertParallelLoopToGpuBase<ParallelLoopToGpuPass> {
void runOnOperation() override {
OwningRewritePatternList patterns(&getContext());
RewritePatternSet patterns(&getContext());
populateParallelLoopToGPUPatterns(patterns);
ConversionTarget target(getContext());
target.markUnknownOpDynamicallyLegal([](Operation *) { return true; });

View File

@ -90,8 +90,8 @@ static LogicalResult applyPatterns(FuncOp func) {
[](scf::YieldOp op) { return !isa<scf::ParallelOp>(op->getParentOp()); });
target.addLegalDialect<omp::OpenMPDialect>();
OwningRewritePatternList patterns(func.getContext());
patterns.insert<ParallelOpLowering>(func.getContext());
RewritePatternSet patterns(func.getContext());
patterns.add<ParallelOpLowering>(func.getContext());
FrozenRewritePatternList frozen(std::move(patterns));
return applyPartialConversion(func, target, frozen);
}

View File

@ -321,7 +321,7 @@ LogicalResult TerminatorOpConversion::matchAndRewrite(
void mlir::populateSCFToSPIRVPatterns(SPIRVTypeConverter &typeConverter,
ScfToSPIRVContext &scfToSPIRVContext,
OwningRewritePatternList &patterns) {
patterns.insert<ForOpConversion, IfOpConversion, TerminatorOpConversion>(
RewritePatternSet &patterns) {
patterns.add<ForOpConversion, IfOpConversion, TerminatorOpConversion>(
patterns.getContext(), typeConverter, scfToSPIRVContext.getImpl());
}

View File

@ -37,7 +37,7 @@ void SCFToSPIRVPass::runOnOperation() {
SPIRVTypeConverter typeConverter(targetAttr);
ScfToSPIRVContext scfContext;
OwningRewritePatternList patterns(context);
RewritePatternSet patterns(context);
populateSCFToSPIRVPatterns(typeConverter, scfContext, patterns);
populateStandardToSPIRVPatterns(typeConverter, patterns);
populateBuiltinFuncToSPIRVPatterns(typeConverter, patterns);

View File

@ -568,15 +568,14 @@ DoWhileLowering::matchAndRewrite(WhileOp whileOp,
return success();
}
void mlir::populateLoopToStdConversionPatterns(
OwningRewritePatternList &patterns) {
patterns.insert<ForLowering, IfLowering, ParallelLowering, WhileLowering>(
void mlir::populateLoopToStdConversionPatterns(RewritePatternSet &patterns) {
patterns.add<ForLowering, IfLowering, ParallelLowering, WhileLowering>(
patterns.getContext());
patterns.insert<DoWhileLowering>(patterns.getContext(), /*benefit=*/2);
patterns.add<DoWhileLowering>(patterns.getContext(), /*benefit=*/2);
}
void SCFToStandardPass::runOnOperation() {
OwningRewritePatternList patterns(&getContext());
RewritePatternSet patterns(&getContext());
populateLoopToStdConversionPatterns(patterns);
// Configure conversion to lower out scf.for, scf.if, scf.parallel and
// scf.while. Anything else is fine.

View File

@ -278,10 +278,10 @@ public:
/*emitCWrappers=*/true,
/*indexBitwidth=*/kDeriveIndexBitwidthFromDataLayout};
auto *context = module.getContext();
OwningRewritePatternList patterns(context);
RewritePatternSet patterns(context);
LLVMTypeConverter typeConverter(context, options);
populateStdToLLVMConversionPatterns(typeConverter, patterns);
patterns.insert<GPULaunchLowering>(typeConverter);
patterns.add<GPULaunchLowering>(typeConverter);
// Pull in SPIR-V type conversion patterns to convert SPIR-V global
// variable's type to LLVM dialect type.

View File

@ -1385,8 +1385,8 @@ void mlir::populateSPIRVToLLVMTypeConversion(LLVMTypeConverter &typeConverter) {
}
void mlir::populateSPIRVToLLVMConversionPatterns(
LLVMTypeConverter &typeConverter, OwningRewritePatternList &patterns) {
patterns.insert<
LLVMTypeConverter &typeConverter, RewritePatternSet &patterns) {
patterns.add<
// Arithmetic ops
DirectConversionPattern<spirv::IAddOp, LLVM::AddOp>,
DirectConversionPattern<spirv::IMulOp, LLVM::MulOp>,
@ -1499,13 +1499,13 @@ void mlir::populateSPIRVToLLVMConversionPatterns(
}
void mlir::populateSPIRVToLLVMFunctionConversionPatterns(
LLVMTypeConverter &typeConverter, OwningRewritePatternList &patterns) {
patterns.insert<FuncConversionPattern>(patterns.getContext(), typeConverter);
LLVMTypeConverter &typeConverter, RewritePatternSet &patterns) {
patterns.add<FuncConversionPattern>(patterns.getContext(), typeConverter);
}
void mlir::populateSPIRVToLLVMModuleConversionPatterns(
LLVMTypeConverter &typeConverter, OwningRewritePatternList &patterns) {
patterns.insert<ModuleConversionPattern, ModuleEndConversionPattern>(
LLVMTypeConverter &typeConverter, RewritePatternSet &patterns) {
patterns.add<ModuleConversionPattern, ModuleEndConversionPattern>(
patterns.getContext(), typeConverter);
}

View File

@ -36,7 +36,7 @@ void ConvertSPIRVToLLVMPass::runOnOperation() {
// Encode global variable's descriptor set and binding if they exist.
encodeBindAttribute(module);
OwningRewritePatternList patterns(context);
RewritePatternSet patterns(context);
populateSPIRVToLLVMTypeConversion(converter);

View File

@ -37,10 +37,10 @@ public:
} // namespace
void mlir::populateConvertShapeConstraintsConversionPatterns(
OwningRewritePatternList &patterns) {
patterns.insert<CstrBroadcastableToRequire>(patterns.getContext());
patterns.insert<CstrEqToRequire>(patterns.getContext());
patterns.insert<ConvertCstrRequireOp>(patterns.getContext());
RewritePatternSet &patterns) {
patterns.add<CstrBroadcastableToRequire>(patterns.getContext());
patterns.add<CstrEqToRequire>(patterns.getContext());
patterns.add<ConvertCstrRequireOp>(patterns.getContext());
}
namespace {
@ -54,7 +54,7 @@ class ConvertShapeConstraints
auto func = getOperation();
auto *context = &getContext();
OwningRewritePatternList patterns(context);
RewritePatternSet patterns(context);
populateConvertShapeConstraintsConversionPatterns(patterns);
if (failed(applyPatternsAndFoldGreedily(func, std::move(patterns))))

View File

@ -678,7 +678,7 @@ void ConvertShapeToStandardPass::runOnOperation() {
target.addLegalOp<CstrRequireOp, FuncOp, ModuleOp, ModuleTerminatorOp>();
// Setup conversion patterns.
OwningRewritePatternList patterns(&ctx);
RewritePatternSet patterns(&ctx);
populateShapeToStandardConversionPatterns(patterns);
// Apply conversion.
@ -688,10 +688,10 @@ void ConvertShapeToStandardPass::runOnOperation() {
}
void mlir::populateShapeToStandardConversionPatterns(
OwningRewritePatternList &patterns) {
RewritePatternSet &patterns) {
// clang-format off
populateWithGenerated(patterns);
patterns.insert<
patterns.add<
AnyOpConversion,
BinaryOpConversion<AddOp, AddIOp>,
BinaryOpConversion<MulOp, MulIOp>,

View File

@ -3856,10 +3856,10 @@ private:
/// Collect a set of patterns to convert from the Standard dialect to LLVM.
void mlir::populateStdToLLVMNonMemoryConversionPatterns(
LLVMTypeConverter &converter, OwningRewritePatternList &patterns) {
LLVMTypeConverter &converter, RewritePatternSet &patterns) {
// FIXME: this should be tablegen'ed
// clang-format off
patterns.insert<
patterns.add<
AbsFOpLowering,
AddFOpLowering,
AddIOpLowering,
@ -3926,9 +3926,9 @@ void mlir::populateStdToLLVMNonMemoryConversionPatterns(
}
void mlir::populateStdToLLVMMemoryConversionPatterns(
LLVMTypeConverter &converter, OwningRewritePatternList &patterns) {
LLVMTypeConverter &converter, RewritePatternSet &patterns) {
// clang-format off
patterns.insert<
patterns.add<
AssumeAlignmentOpLowering,
DeallocOpLowering,
DimOpLowering,
@ -3945,21 +3945,21 @@ void mlir::populateStdToLLVMMemoryConversionPatterns(
ViewOpLowering>(converter);
// clang-format on
if (converter.getOptions().useAlignedAlloc)
patterns.insert<AlignedAllocOpLowering>(converter);
patterns.add<AlignedAllocOpLowering>(converter);
else
patterns.insert<AllocOpLowering>(converter);
patterns.add<AllocOpLowering>(converter);
}
void mlir::populateStdToLLVMFuncOpConversionPattern(
LLVMTypeConverter &converter, OwningRewritePatternList &patterns) {
LLVMTypeConverter &converter, RewritePatternSet &patterns) {
if (converter.getOptions().useBarePtrCallConv)
patterns.insert<BarePtrFuncOpConversion>(converter);
patterns.add<BarePtrFuncOpConversion>(converter);
else
patterns.insert<FuncOpConversion>(converter);
patterns.add<FuncOpConversion>(converter);
}
void mlir::populateStdToLLVMConversionPatterns(
LLVMTypeConverter &converter, OwningRewritePatternList &patterns) {
void mlir::populateStdToLLVMConversionPatterns(LLVMTypeConverter &converter,
RewritePatternSet &patterns) {
populateStdToLLVMFuncOpConversionPattern(converter, patterns);
populateStdToLLVMNonMemoryConversionPatterns(converter, patterns);
populateStdToLLVMMemoryConversionPatterns(converter, patterns);
@ -4079,7 +4079,7 @@ struct LLVMLoweringPass : public ConvertStandardToLLVMBase<LLVMLoweringPass> {
llvm::DataLayout(this->dataLayout)};
LLVMTypeConverter typeConverter(&getContext(), options);
OwningRewritePatternList patterns(&getContext());
RewritePatternSet patterns(&getContext());
populateStdToLLVMConversionPatterns(typeConverter, patterns);
LLVMConversionTarget target(getContext());

View File

@ -193,8 +193,8 @@ StoreOpOfSubViewFolder<OpTy>::matchAndRewrite(OpTy storeOp,
//===----------------------------------------------------------------------===//
void mlir::populateStdLegalizationPatternsForSPIRVLowering(
OwningRewritePatternList &patterns) {
patterns.insert<LoadOpOfSubViewFolder<memref::LoadOp>,
RewritePatternSet &patterns) {
patterns.add<LoadOpOfSubViewFolder<memref::LoadOp>,
LoadOpOfSubViewFolder<vector::TransferReadOp>,
StoreOpOfSubViewFolder<memref::StoreOp>,
StoreOpOfSubViewFolder<vector::TransferWriteOp>>(
@ -213,7 +213,7 @@ struct SPIRVLegalization final
} // namespace
void SPIRVLegalization::runOnOperation() {
OwningRewritePatternList patterns(&getContext());
RewritePatternSet patterns(&getContext());
populateStdLegalizationPatternsForSPIRVLowering(patterns);
(void)applyPatternsAndFoldGreedily(getOperation()->getRegions(),
std::move(patterns));

View File

@ -1225,10 +1225,10 @@ XOrOpPattern::matchAndRewrite(XOrOp xorOp, ArrayRef<Value> operands,
namespace mlir {
void populateStandardToSPIRVPatterns(SPIRVTypeConverter &typeConverter,
OwningRewritePatternList &patterns) {
RewritePatternSet &patterns) {
MLIRContext *context = patterns.getContext();
patterns.insert<
patterns.add<
// Math dialect operations.
// TODO: Move to separate pass.
UnaryAndBinaryOpPattern<math::CosOp, spirv::GLSLCosOp>,
@ -1290,14 +1290,14 @@ void populateStandardToSPIRVPatterns(SPIRVTypeConverter &typeConverter,
// Give CmpFOpNanKernelPattern a higher benefit so it can prevail when Kernel
// capability is available.
patterns.insert<CmpFOpNanKernelPattern>(typeConverter, context,
patterns.add<CmpFOpNanKernelPattern>(typeConverter, context,
/*benefit=*/2);
}
void populateTensorToSPIRVPatterns(SPIRVTypeConverter &typeConverter,
int64_t byteCountThreshold,
OwningRewritePatternList &patterns) {
patterns.insert<TensorExtractPattern>(typeConverter, patterns.getContext(),
RewritePatternSet &patterns) {
patterns.add<TensorExtractPattern>(typeConverter, patterns.getContext(),
byteCountThreshold);
}

View File

@ -35,7 +35,7 @@ void ConvertStandardToSPIRVPass::runOnOperation() {
spirv::SPIRVConversionTarget::get(targetAttr);
SPIRVTypeConverter typeConverter(targetAttr);
OwningRewritePatternList patterns(context);
RewritePatternSet patterns(context);
populateStandardToSPIRVPatterns(typeConverter, patterns);
populateTensorToSPIRVPatterns(typeConverter,
/*byteCountThreshold=*/64, patterns);

View File

@ -1016,8 +1016,8 @@ public:
} // namespace
void mlir::tosa::populateTosaToLinalgOnTensorsConversionPatterns(
OwningRewritePatternList *patterns) {
patterns->insert<
RewritePatternSet *patterns) {
patterns->add<
PointwiseConverter<tosa::AddOp>, PointwiseConverter<tosa::SubOp>,
PointwiseConverter<tosa::MulOp>, PointwiseConverter<tosa::NegateOp>,
PointwiseConverter<tosa::PowOp>, PointwiseConverter<tosa::RsqrtOp>,

View File

@ -37,7 +37,7 @@ public:
}
void runOnFunction() override {
OwningRewritePatternList patterns(&getContext());
RewritePatternSet patterns(&getContext());
ConversionTarget target(getContext());
target.addLegalDialect<linalg::LinalgDialect, memref::MemRefDialect,
StandardOpsDialect>();

View File

@ -103,7 +103,7 @@ public:
} // namespace
void mlir::tosa::populateTosaToSCFConversionPatterns(
OwningRewritePatternList *patterns) {
patterns->insert<IfOpConverter>(patterns->getContext());
patterns->insert<WhileOpConverter>(patterns->getContext());
RewritePatternSet *patterns) {
patterns->add<IfOpConverter>(patterns->getContext());
patterns->add<WhileOpConverter>(patterns->getContext());
}

View File

@ -29,7 +29,7 @@ namespace {
struct TosaToSCF : public TosaToSCFBase<TosaToSCF> {
public:
void runOnOperation() override {
OwningRewritePatternList patterns(&getContext());
RewritePatternSet patterns(&getContext());
ConversionTarget target(getContext());
target.addLegalDialect<tensor::TensorDialect, scf::SCFDialect>();
target.addIllegalOp<tosa::IfOp, tosa::WhileOp>();

View File

@ -154,12 +154,12 @@ public:
} // namespace
void mlir::tosa::populateTosaToStandardConversionPatterns(
OwningRewritePatternList *patterns) {
patterns->insert<ApplyScaleOpConverter, ConstOpConverter, SliceOpConverter>(
RewritePatternSet *patterns) {
patterns->add<ApplyScaleOpConverter, ConstOpConverter, SliceOpConverter>(
patterns->getContext());
}
void mlir::tosa::populateTosaRescaleToStandardConversionPatterns(
OwningRewritePatternList *patterns) {
patterns->insert<ApplyScaleOpConverter>(patterns->getContext());
RewritePatternSet *patterns) {
patterns->add<ApplyScaleOpConverter>(patterns->getContext());
}

View File

@ -29,7 +29,7 @@ namespace {
struct TosaToStandard : public TosaToStandardBase<TosaToStandard> {
public:
void runOnOperation() override {
OwningRewritePatternList patterns(&getContext());
RewritePatternSet patterns(&getContext());
ConversionTarget target(getContext());
target.addIllegalOp<tosa::ConstOp>();
target.addIllegalOp<tosa::SliceOp>();

View File

@ -1482,47 +1482,37 @@ public:
/// Populate the given list with patterns that convert from Vector to LLVM.
void mlir::populateVectorToLLVMConversionPatterns(
LLVMTypeConverter &converter, OwningRewritePatternList &patterns,
LLVMTypeConverter &converter, RewritePatternSet &patterns,
bool reassociateFPReductions, bool enableIndexOptimizations) {
MLIRContext *ctx = converter.getDialect()->getContext();
// clang-format off
patterns.insert<VectorFMAOpNDRewritePattern,
patterns.add<VectorFMAOpNDRewritePattern,
VectorInsertStridedSliceOpDifferentRankRewritePattern,
VectorInsertStridedSliceOpSameRankRewritePattern,
VectorExtractStridedSliceOpConversion>(ctx);
patterns.insert<VectorReductionOpConversion>(
converter, reassociateFPReductions);
patterns.insert<VectorCreateMaskOpConversion,
patterns.add<VectorReductionOpConversion>(converter, reassociateFPReductions);
patterns.add<VectorCreateMaskOpConversion,
VectorTransferConversion<TransferReadOp>,
VectorTransferConversion<TransferWriteOp>>(
converter, enableIndexOptimizations);
patterns
.insert<VectorBitCastOpConversion,
VectorShuffleOpConversion,
VectorExtractElementOpConversion,
VectorExtractOpConversion,
VectorFMAOp1DConversion,
VectorInsertElementOpConversion,
VectorInsertOpConversion,
VectorPrintOpConversion,
.add<VectorBitCastOpConversion, VectorShuffleOpConversion,
VectorExtractElementOpConversion, VectorExtractOpConversion,
VectorFMAOp1DConversion, VectorInsertElementOpConversion,
VectorInsertOpConversion, VectorPrintOpConversion,
VectorTypeCastOpConversion,
VectorLoadStoreConversion<vector::LoadOp,
vector::LoadOpAdaptor>,
VectorLoadStoreConversion<vector::LoadOp, vector::LoadOpAdaptor>,
VectorLoadStoreConversion<vector::MaskedLoadOp,
vector::MaskedLoadOpAdaptor>,
VectorLoadStoreConversion<vector::StoreOp,
vector::StoreOpAdaptor>,
VectorLoadStoreConversion<vector::StoreOp, vector::StoreOpAdaptor>,
VectorLoadStoreConversion<vector::MaskedStoreOp,
vector::MaskedStoreOpAdaptor>,
VectorGatherOpConversion,
VectorScatterOpConversion,
VectorExpandLoadOpConversion,
VectorCompressStoreOpConversion>(converter);
// clang-format on
VectorGatherOpConversion, VectorScatterOpConversion,
VectorExpandLoadOpConversion, VectorCompressStoreOpConversion>(
converter);
}
void mlir::populateVectorToLLVMMatrixConversionPatterns(
LLVMTypeConverter &converter, OwningRewritePatternList &patterns) {
patterns.insert<VectorMatmulOpConversion>(converter);
patterns.insert<VectorFlatTransposeOpConversion>(converter);
LLVMTypeConverter &converter, RewritePatternSet &patterns) {
patterns.add<VectorMatmulOpConversion>(converter);
patterns.add<VectorFlatTransposeOpConversion>(converter);
}

Some files were not shown because too many files have changed in this diff Show More