2018-10-12 08:21:55 +08:00
|
|
|
//===- Canonicalizer.cpp - Canonicalize MLIR operations -------------------===//
|
|
|
|
//
|
|
|
|
// Copyright 2019 The MLIR Authors.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
// =============================================================================
|
|
|
|
//
|
|
|
|
// This transformation pass converts operations into their canonical forms by
|
|
|
|
// folding constants, applying operation identity transformations etc.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2018-10-26 07:44:04 +08:00
|
|
|
#include "mlir/IR/MLIRContext.h"
|
|
|
|
#include "mlir/IR/PatternMatch.h"
|
Introduce memref bound checking.
Introduce analysis to check memref accesses (in MLFunctions) for out of bound
ones. It works as follows:
$ mlir-opt -memref-bound-check test/Transforms/memref-bound-check.mlir
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
#map0 = (d0, d1) -> (d0, d1)
#map1 = (d0, d1) -> (d0 * 128 - d1)
mlfunc @test() {
%0 = alloc() : memref<9x9xi32>
%1 = alloc() : memref<128xi32>
for %i0 = -1 to 9 {
for %i1 = -1 to 9 {
%2 = affine_apply #map0(%i0, %i1)
%3 = load %0[%2tensorflow/mlir#0, %2tensorflow/mlir#1] : memref<9x9xi32>
%4 = affine_apply #map1(%i0, %i1)
%5 = load %1[%4] : memref<128xi32>
}
}
return
}
- Improves productivity while manually / semi-automatically developing MLIR for
testing / prototyping; also provides an indirect way to catch errors in
transformations.
- This pass is an easy way to test the underlying affine analysis
machinery including low level routines.
Some code (in getMemoryRegion()) borrowed from @andydavis cl/218263256.
While on this:
- create mlir/Analysis/Passes.h; move Pass.h up from mlir/Transforms/ to mlir/
- fix a bug in AffineAnalysis.cpp::toAffineExpr
TODO: extend to non-constant loop bounds (straightforward). Will transparently
work for all accesses once floordiv, mod, ceildiv are supported in the
AffineMap -> FlatAffineConstraints conversion.
PiperOrigin-RevId: 219397961
2018-10-31 08:43:06 +08:00
|
|
|
#include "mlir/Pass.h"
|
2018-10-12 08:21:55 +08:00
|
|
|
#include "mlir/Transforms/Passes.h"
|
|
|
|
using namespace mlir;
|
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// The actual Canonicalizer Pass.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
namespace {
|
2018-10-22 10:03:29 +08:00
|
|
|
|
2018-10-12 08:21:55 +08:00
|
|
|
/// Canonicalize operations in functions.
|
|
|
|
struct Canonicalizer : public FunctionPass {
|
2018-11-08 02:24:03 +08:00
|
|
|
Canonicalizer() : FunctionPass(&Canonicalizer::passID) {}
|
2018-10-26 07:58:08 +08:00
|
|
|
PassResult runOnFunction(Function *fn) override;
|
2018-11-07 10:34:18 +08:00
|
|
|
|
|
|
|
static char passID;
|
2018-10-12 08:21:55 +08:00
|
|
|
};
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
2018-11-07 10:34:18 +08:00
|
|
|
char Canonicalizer::passID = 0;
|
|
|
|
|
2018-10-26 04:11:06 +08:00
|
|
|
PassResult Canonicalizer::runOnFunction(Function *fn) {
|
|
|
|
auto *context = fn->getContext();
|
2018-11-29 07:09:39 +08:00
|
|
|
OwningRewritePatternList patterns;
|
2018-10-26 04:11:06 +08:00
|
|
|
|
2018-10-26 07:44:04 +08:00
|
|
|
// TODO: Instead of adding all known patterns from the whole system lazily add
|
|
|
|
// and cache the canonicalization patterns for ops we see in practice when
|
|
|
|
// building the worklist. For now, we just grab everything.
|
|
|
|
for (auto *op : fn->getContext()->getRegisteredOperations())
|
|
|
|
op->getCanonicalizationPatterns(patterns, context);
|
2018-10-26 04:11:06 +08:00
|
|
|
|
|
|
|
applyPatternsGreedily(fn, std::move(patterns));
|
|
|
|
return success();
|
2018-10-12 08:21:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Create a Canonicalizer pass.
|
|
|
|
FunctionPass *mlir::createCanonicalizerPass() { return new Canonicalizer(); }
|
2018-11-07 10:34:18 +08:00
|
|
|
|
|
|
|
static PassRegistration<Canonicalizer> pass("canonicalize",
|
|
|
|
"Canonicalize operations");
|