Refactor the LowerVectorTransfers pass to use the RewritePattern infra - NFC

This is step 1/n in refactoring infrastructure along the Vector dialect to make it ready for retargetability and composable progressive lowering.

PiperOrigin-RevId: 280529784
This commit is contained in:
Nicolas Vasilache 2019-11-14 15:39:36 -08:00 committed by A. Unique TensorFlower
parent a78bd84cf8
commit 0b271b7dfe
12 changed files with 87 additions and 54 deletions

View File

@ -1,4 +1,4 @@
//===- VectorToLLVM.h - Pass converting vector to LLVM dialect --*- C++ -*-===//
//===- VectorConversions.h - Utils to convert from the vector dialect -----===//
//
// Copyright 2019 The MLIR Authors.
//
@ -19,10 +19,17 @@
namespace mlir {
class LLVMTypeConverter;
class MLIRContext;
class ModuleOp;
template <typename T> class OpPassBase;
class OwningRewritePatternList;
template <typename T> class OpPassBase;
/// Collect a set of patterns to convert from the Vector dialect to affine loops
/// surrounding ops in different dialects (vector, std etc).
/// This is the general place where we want to implement Vector -> Vector and
/// Vector -> Std legalizations.
void populateVectorToAffineLoopsConversionPatterns(
MLIRContext *context, OwningRewritePatternList &patterns);
/// Collect a set of patterns to convert from the Vector dialect to LLVM.
void populateVectorToLLVMConversionPatterns(LLVMTypeConverter &converter,
@ -30,6 +37,7 @@ void populateVectorToLLVMConversionPatterns(LLVMTypeConverter &converter,
/// Create a pass to convert vector operations to the LLVMIR dialect.
OpPassBase<ModuleOp> *createLowerVectorToLLVMPass();
} // namespace mlir
#endif // MLIR_CONVERSION_VECTORTOLLVM_VECTORTOLLVM_H_

View File

@ -122,9 +122,6 @@ std::unique_ptr<OpPassBase<FuncOp>> createAffineDataCopyGenerationPass(
unsigned tagMemorySpace = 0, int minDmaTransferSize = 1024,
uint64_t fastMemCapacityBytes = std::numeric_limits<uint64_t>::max());
/// Creates a pass to lower VectorTransferReadOp and VectorTransferWriteOp.
std::unique_ptr<OpPassBase<FuncOp>> createLowerVectorTransfersPass();
/// Creates a pass to perform optimizations relying on memref dataflow such as
/// store to load forwarding, elimination of dead stores, and dead allocs.
std::unique_ptr<OpPassBase<FuncOp>> createMemRefDataFlowOptPass();

View File

@ -7,4 +7,4 @@ add_subdirectory(LoopsToGPU)
add_subdirectory(LoopToStandard)
add_subdirectory(StandardToLLVM)
add_subdirectory(StandardToSPIRV)
add_subdirectory(VectorToLLVM)
add_subdirectory(VectorConversions)

View File

@ -1,8 +1,9 @@
add_llvm_library(MLIRVectorToLLVM
VectorToLLVM.cpp
VectorToLoops.cpp
ADDITIONAL_HEADER_DIRS
${MLIR_MAIN_INCLUDE_DIR}/mlir/Conversion/VectorToLLVM
${MLIR_MAIN_INCLUDE_DIR}/mlir/Conversion/VectorConversions
)
set(LIBS
MLIRLLVMIR

View File

@ -1,4 +1,4 @@
//===- LowerToLLVMDialect.cpp - conversion from Linalg to LLVM dialect ----===//
//===- VectorToLLVM.cpp - Conversion from Vector to the LLVM dialect ------===//
//
// Copyright 2019 The MLIR Authors.
//
@ -15,7 +15,7 @@
// limitations under the License.
// =============================================================================
#include "mlir/Conversion/VectorToLLVM/VectorToLLVM.h"
#include "mlir/Conversion/VectorConversions/VectorConversions.h"
#include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h"
#include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"

View File

@ -1,4 +1,4 @@
//===- LowerVectorTransfers.cpp - LowerVectorTransfers Pass Impl ----------===//
//===- VectorToLoops.cpp - Conversion from Vector to mix of Loops and Std -===//
//
// Copyright 2019 The MLIR Authors.
//
@ -21,12 +21,7 @@
#include <type_traits>
#include "mlir/Analysis/AffineAnalysis.h"
#include "mlir/Analysis/NestedMatcher.h"
#include "mlir/Analysis/Utils.h"
#include "mlir/Analysis/VectorAnalysis.h"
#include "mlir/Dialect/LoopOps/LoopOps.h"
#include "mlir/Dialect/StandardOps/Ops.h"
#include "mlir/Conversion/VectorConversions/VectorConversions.h"
#include "mlir/Dialect/VectorOps/VectorOps.h"
#include "mlir/EDSC/Builders.h"
#include "mlir/EDSC/Helpers.h"
@ -39,9 +34,12 @@
#include "mlir/IR/OperationSupport.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/Types.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Support/Functional.h"
#include "mlir/Transforms/Passes.h"
using namespace mlir;
using vector::VectorTransferReadOp;
using vector::VectorTransferWriteOp;
namespace {
/// Implements lowering of VectorTransferReadOp and VectorTransferWriteOp to a
/// proper abstraction for the hardware.
@ -58,7 +56,7 @@
/// loop.for %i0 = 0 to %0 {
/// loop.for %i1 = 0 to %1 step %c256 {
/// loop.for %i2 = 0 to %2 step %c32 {
/// %v = vector.transfer_read %A[%i0, %i1, %i2], (%f0)
/// %v = vector.transfer_read %A[%i0, %i1, %i2], %f0
/// {permutation_map: (d0, d1, d2) -> (d2, d1)} :
/// memref<?x?x?xf32>, vector<32x256xf32>
/// }}}
@ -81,17 +79,16 @@
/// which creates individual indexing expressions of the form:
///
/// ```mlir-dsc
/// SELECT(i + ii < zero, zero, SELECT(i + ii < N, i + ii, N - one))
/// auto condMax = i + ii < N;
/// auto max = select(condMax, i + ii, N - one)
/// auto cond = i + ii < zero;
/// select(cond, zero, max);
/// ```
using namespace mlir;
using vector::VectorTransferReadOp;
using vector::VectorTransferWriteOp;
#define DEBUG_TYPE "affine-lower-vector-transfers"
namespace {
///
/// In the future, clipping should not be the only way and instead we should
/// load vectors + mask them. Similarly on the write side, load/mask/store for
/// implementing RMW behavior.
///
/// Lowers VectorTransferOp into a combination of:
/// 1. local memory allocation;
/// 2. perfect loop nest over:
@ -359,26 +356,12 @@ VectorTransferRewriter<VectorTransferWriteOp>::matchAndRewrite(
rewriter.eraseOp(op);
return matchSuccess();
}
} // namespace
struct LowerVectorTransfersPass
: public FunctionPass<LowerVectorTransfersPass> {
void runOnFunction() override {
OwningRewritePatternList patterns;
auto *context = &getContext();
patterns.insert<VectorTransferRewriter<vector::VectorTransferReadOp>,
VectorTransferRewriter<vector::VectorTransferWriteOp>>(
context);
applyPatternsGreedily(getFunction(), patterns);
}
};
} // end anonymous namespace
std::unique_ptr<OpPassBase<FuncOp>> mlir::createLowerVectorTransfersPass() {
return std::make_unique<LowerVectorTransfersPass>();
/// Populate the given list with patterns that convert from Vector to LLVM.
void mlir::populateVectorToAffineLoopsConversionPatterns(
MLIRContext *context, OwningRewritePatternList &patterns) {
patterns.insert<VectorTransferRewriter<vector::VectorTransferReadOp>,
VectorTransferRewriter<vector::VectorTransferWriteOp>>(
context);
}
static PassRegistration<LowerVectorTransfersPass>
pass("affine-lower-vector-transfers",
"Materializes vector transfer ops to a "
"proper abstraction for the hardware");

View File

@ -19,7 +19,7 @@
#include "mlir/Conversion/LoopToStandard/ConvertLoopToStandard.h"
#include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h"
#include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h"
#include "mlir/Conversion/VectorToLLVM/VectorToLLVM.h"
#include "mlir/Conversion/VectorConversions/VectorConversions.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/Linalg/IR/LinalgOps.h"
#include "mlir/Dialect/Linalg/IR/LinalgTypes.h"

View File

@ -13,7 +13,6 @@ add_llvm_library(MLIRTransforms
LoopTiling.cpp
LoopUnrollAndJam.cpp
LoopUnroll.cpp
LowerVectorTransfers.cpp
MaterializeVectors.cpp
MemRefDataFlowOpt.cpp
PipelineDataTransfer.cpp

View File

@ -1,4 +1,4 @@
// RUN: mlir-opt %s -affine-lower-vector-transfers | FileCheck %s
// RUN: mlir-opt %s -test-affine-lower-vector-transfers | FileCheck %s
// CHECK: #[[ADD:map[0-9]+]] = (d0, d1) -> (d0 + d1)
// CHECK: #[[SUB:map[0-9]+]] = ()[s0] -> (s0 - 1)

View File

@ -5,6 +5,7 @@ add_llvm_library(MLIRTestTransforms
TestInlining.cpp
TestLoopMapping.cpp
TestLoopParametricTiling.cpp
TestLowerVectorTransfers.cpp
TestOpaqueLoc.cpp
TestMemRefStrideCalculation.cpp
TestVectorizationUtils.cpp

View File

@ -0,0 +1,44 @@
//===- TestLowerVectorTransfers.cpp - Test VectorTransfers lowering -------===//
//
// Copyright 2019 The MLIR Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// =============================================================================
#include <type_traits>
#include "mlir/Conversion/VectorConversions/VectorConversions.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/Passes.h"
using namespace mlir;
namespace {
struct TestLowerVectorTransfersPass
: public FunctionPass<TestLowerVectorTransfersPass> {
void runOnFunction() override {
OwningRewritePatternList patterns;
auto *context = &getContext();
populateVectorToAffineLoopsConversionPatterns(context, patterns);
applyPatternsGreedily(getFunction(), patterns);
}
};
} // end anonymous namespace
static PassRegistration<TestLowerVectorTransfersPass>
pass("test-affine-lower-vector-transfers",
"Materializes vector transfer ops to a "
"proper abstraction for the hardware");