2018-06-22 00:49:33 +08:00
|
|
|
//===- mlir-opt.cpp - MLIR Optimizer Driver -------------------------------===//
|
|
|
|
//
|
|
|
|
// Copyright 2019 The MLIR Authors.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
// =============================================================================
|
|
|
|
//
|
|
|
|
// This is a command line utility that parses an MLIR file, runs an optimization
|
|
|
|
// pass, then prints the result back out. It is designed to support unit
|
|
|
|
// testing.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
Introduce memref bound checking.
Introduce analysis to check memref accesses (in MLFunctions) for out of bound
ones. It works as follows:
$ mlir-opt -memref-bound-check test/Transforms/memref-bound-check.mlir
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
#map0 = (d0, d1) -> (d0, d1)
#map1 = (d0, d1) -> (d0 * 128 - d1)
mlfunc @test() {
%0 = alloc() : memref<9x9xi32>
%1 = alloc() : memref<128xi32>
for %i0 = -1 to 9 {
for %i1 = -1 to 9 {
%2 = affine_apply #map0(%i0, %i1)
%3 = load %0[%2tensorflow/mlir#0, %2tensorflow/mlir#1] : memref<9x9xi32>
%4 = affine_apply #map1(%i0, %i1)
%5 = load %1[%4] : memref<128xi32>
}
}
return
}
- Improves productivity while manually / semi-automatically developing MLIR for
testing / prototyping; also provides an indirect way to catch errors in
transformations.
- This pass is an easy way to test the underlying affine analysis
machinery including low level routines.
Some code (in getMemoryRegion()) borrowed from @andydavis cl/218263256.
While on this:
- create mlir/Analysis/Passes.h; move Pass.h up from mlir/Transforms/ to mlir/
- fix a bug in AffineAnalysis.cpp::toAffineExpr
TODO: extend to non-constant loop bounds (straightforward). Will transparently
work for all accesses once floordiv, mod, ceildiv are supported in the
AffineMap -> FlatAffineConstraints conversion.
PiperOrigin-RevId: 219397961
2018-10-31 08:43:06 +08:00
|
|
|
#include "mlir/Analysis/Passes.h"
|
2018-08-06 12:12:29 +08:00
|
|
|
#include "mlir/IR/Attributes.h"
|
2018-09-05 13:04:21 +08:00
|
|
|
#include "mlir/IR/CFGFunction.h"
|
2018-09-03 13:01:45 +08:00
|
|
|
#include "mlir/IR/Location.h"
|
2018-07-25 11:01:16 +08:00
|
|
|
#include "mlir/IR/MLFunction.h"
|
2018-06-23 13:03:48 +08:00
|
|
|
#include "mlir/IR/MLIRContext.h"
|
2018-06-22 06:22:42 +08:00
|
|
|
#include "mlir/IR/Module.h"
|
2018-06-23 01:39:19 +08:00
|
|
|
#include "mlir/Parser.h"
|
Introduce memref bound checking.
Introduce analysis to check memref accesses (in MLFunctions) for out of bound
ones. It works as follows:
$ mlir-opt -memref-bound-check test/Transforms/memref-bound-check.mlir
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
#map0 = (d0, d1) -> (d0, d1)
#map1 = (d0, d1) -> (d0 * 128 - d1)
mlfunc @test() {
%0 = alloc() : memref<9x9xi32>
%1 = alloc() : memref<128xi32>
for %i0 = -1 to 9 {
for %i1 = -1 to 9 {
%2 = affine_apply #map0(%i0, %i1)
%3 = load %0[%2tensorflow/mlir#0, %2tensorflow/mlir#1] : memref<9x9xi32>
%4 = affine_apply #map1(%i0, %i1)
%5 = load %1[%4] : memref<128xi32>
}
}
return
}
- Improves productivity while manually / semi-automatically developing MLIR for
testing / prototyping; also provides an indirect way to catch errors in
transformations.
- This pass is an easy way to test the underlying affine analysis
machinery including low level routines.
Some code (in getMemoryRegion()) borrowed from @andydavis cl/218263256.
While on this:
- create mlir/Analysis/Passes.h; move Pass.h up from mlir/Transforms/ to mlir/
- fix a bug in AffineAnalysis.cpp::toAffineExpr
TODO: extend to non-constant loop bounds (straightforward). Will transparently
work for all accesses once floordiv, mod, ceildiv are supported in the
AffineMap -> FlatAffineConstraints conversion.
PiperOrigin-RevId: 219397961
2018-10-31 08:43:06 +08:00
|
|
|
#include "mlir/Pass.h"
|
2018-07-30 03:37:35 +08:00
|
|
|
#include "mlir/TensorFlow/ControlFlowOps.h"
|
|
|
|
#include "mlir/TensorFlow/Passes.h"
|
2018-09-05 13:04:21 +08:00
|
|
|
#include "mlir/Transforms/CFGFunctionViewGraph.h"
|
2018-07-30 03:37:35 +08:00
|
|
|
#include "mlir/Transforms/Passes.h"
|
2018-09-26 03:06:41 +08:00
|
|
|
#include "mlir/XLA/Passes.h"
|
2018-06-22 00:49:33 +08:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2018-06-22 06:22:42 +08:00
|
|
|
#include "llvm/Support/FileUtilities.h"
|
2018-06-22 00:49:33 +08:00
|
|
|
#include "llvm/Support/InitLLVM.h"
|
2018-08-06 12:12:29 +08:00
|
|
|
#include "llvm/Support/PrettyStackTrace.h"
|
2018-06-25 23:10:46 +08:00
|
|
|
#include "llvm/Support/Regex.h"
|
2018-07-08 10:12:22 +08:00
|
|
|
#include "llvm/Support/SourceMgr.h"
|
2018-06-22 06:22:42 +08:00
|
|
|
#include "llvm/Support/ToolOutputFile.h"
|
2018-08-31 08:35:15 +08:00
|
|
|
|
2018-06-22 00:49:33 +08:00
|
|
|
using namespace mlir;
|
2018-06-22 06:22:42 +08:00
|
|
|
using namespace llvm;
|
2018-09-03 13:01:45 +08:00
|
|
|
using llvm::SMLoc;
|
2018-06-22 06:22:42 +08:00
|
|
|
|
|
|
|
static cl::opt<std::string>
|
|
|
|
inputFilename(cl::Positional, cl::desc("<input file>"), cl::init("-"));
|
|
|
|
|
|
|
|
static cl::opt<std::string>
|
|
|
|
outputFilename("o", cl::desc("Output filename"), cl::value_desc("filename"),
|
|
|
|
cl::init("-"));
|
|
|
|
|
2018-06-25 00:10:36 +08:00
|
|
|
static cl::opt<bool>
|
2018-09-03 13:01:45 +08:00
|
|
|
splitInputFile("split-input-file",
|
|
|
|
cl::desc("Split the input file into pieces and process each "
|
|
|
|
"chunk independently"),
|
|
|
|
cl::init(false));
|
|
|
|
|
|
|
|
static cl::opt<bool>
|
|
|
|
verifyDiagnostics("verify",
|
|
|
|
cl::desc("Check that emitted diagnostics match "
|
|
|
|
"expected-* lines on the corresponding line"),
|
|
|
|
cl::init(false));
|
2018-06-22 06:22:42 +08:00
|
|
|
|
2018-07-30 05:13:03 +08:00
|
|
|
enum Passes {
|
2018-10-12 08:21:55 +08:00
|
|
|
Canonicalize,
|
2018-10-09 02:10:11 +08:00
|
|
|
ComposeAffineMaps,
|
2018-09-20 12:35:11 +08:00
|
|
|
ConstantFold,
|
2018-07-30 05:13:03 +08:00
|
|
|
ConvertToCFG,
|
Introduce memref bound checking.
Introduce analysis to check memref accesses (in MLFunctions) for out of bound
ones. It works as follows:
$ mlir-opt -memref-bound-check test/Transforms/memref-bound-check.mlir
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
#map0 = (d0, d1) -> (d0, d1)
#map1 = (d0, d1) -> (d0 * 128 - d1)
mlfunc @test() {
%0 = alloc() : memref<9x9xi32>
%1 = alloc() : memref<128xi32>
for %i0 = -1 to 9 {
for %i1 = -1 to 9 {
%2 = affine_apply #map0(%i0, %i1)
%3 = load %0[%2tensorflow/mlir#0, %2tensorflow/mlir#1] : memref<9x9xi32>
%4 = affine_apply #map1(%i0, %i1)
%5 = load %1[%4] : memref<128xi32>
}
}
return
}
- Improves productivity while manually / semi-automatically developing MLIR for
testing / prototyping; also provides an indirect way to catch errors in
transformations.
- This pass is an easy way to test the underlying affine analysis
machinery including low level routines.
Some code (in getMemoryRegion()) borrowed from @andydavis cl/218263256.
While on this:
- create mlir/Analysis/Passes.h; move Pass.h up from mlir/Transforms/ to mlir/
- fix a bug in AffineAnalysis.cpp::toAffineExpr
TODO: extend to non-constant loop bounds (straightforward). Will transparently
work for all accesses once floordiv, mod, ceildiv are supported in the
AffineMap -> FlatAffineConstraints conversion.
PiperOrigin-RevId: 219397961
2018-10-31 08:43:06 +08:00
|
|
|
MemRefBoundCheck,
|
2018-11-03 00:55:04 +08:00
|
|
|
MemRefDependenceCheck,
|
2018-11-01 22:26:00 +08:00
|
|
|
LoopFusion,
|
Extend loop unrolling to unroll by a given factor; add builder for affine
apply op.
- add builder for AffineApplyOp (first one for an operation that has
non-zero operands)
- add support for loop unrolling by a given factor; uses the affine apply op
builder.
While on this, change 'step' of ForStmt to be 'unsigned' instead of
AffineConstantExpr *. Add setters for ForStmt lb, ub, step.
Sample Input:
// CHECK-LABEL: mlfunc @loop_nest_unroll_cleanup() {
mlfunc @loop_nest_unroll_cleanup() {
for %i = 1 to 100 {
for %j = 0 to 17 {
%x = "addi32"(%j, %j) : (affineint, affineint) -> i32
%y = "addi32"(%x, %x) : (i32, i32) -> i32
}
}
return
}
Output:
$ mlir-opt -loop-unroll -unroll-factor=4 /tmp/single2.mlir
#map0 = (d0) -> (d0 + 1)
#map1 = (d0) -> (d0 + 2)
#map2 = (d0) -> (d0 + 3)
mlfunc @loop_nest_unroll_cleanup() {
for %i0 = 1 to 100 {
for %i1 = 0 to 17 step 4 {
%0 = "addi32"(%i1, %i1) : (affineint, affineint) -> i32
%1 = "addi32"(%0, %0) : (i32, i32) -> i32
%2 = affine_apply #map0(%i1)
%3 = "addi32"(%2, %2) : (affineint, affineint) -> i32
%4 = affine_apply #map1(%i1)
%5 = "addi32"(%4, %4) : (affineint, affineint) -> i32
%6 = affine_apply #map2(%i1)
%7 = "addi32"(%6, %6) : (affineint, affineint) -> i32
}
for %i2 = 16 to 17 {
%8 = "addi32"(%i2, %i2) : (affineint, affineint) -> i32
%9 = "addi32"(%8, %8) : (i32, i32) -> i32
}
}
return
}
PiperOrigin-RevId: 209676220
2018-08-22 07:01:23 +08:00
|
|
|
LoopUnroll,
|
2018-08-29 09:24:27 +08:00
|
|
|
LoopUnrollAndJam,
|
2018-09-29 03:17:26 +08:00
|
|
|
PipelineDataTransfer,
|
2018-09-05 13:04:21 +08:00
|
|
|
PrintCFGGraph,
|
2018-10-25 23:33:02 +08:00
|
|
|
SimplifyAffineStructures,
|
2018-07-30 05:13:03 +08:00
|
|
|
TFRaiseControlFlow,
|
Introduce memref bound checking.
Introduce analysis to check memref accesses (in MLFunctions) for out of bound
ones. It works as follows:
$ mlir-opt -memref-bound-check test/Transforms/memref-bound-check.mlir
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
#map0 = (d0, d1) -> (d0, d1)
#map1 = (d0, d1) -> (d0 * 128 - d1)
mlfunc @test() {
%0 = alloc() : memref<9x9xi32>
%1 = alloc() : memref<128xi32>
for %i0 = -1 to 9 {
for %i1 = -1 to 9 {
%2 = affine_apply #map0(%i0, %i1)
%3 = load %0[%2tensorflow/mlir#0, %2tensorflow/mlir#1] : memref<9x9xi32>
%4 = affine_apply #map1(%i0, %i1)
%5 = load %1[%4] : memref<128xi32>
}
}
return
}
- Improves productivity while manually / semi-automatically developing MLIR for
testing / prototyping; also provides an indirect way to catch errors in
transformations.
- This pass is an easy way to test the underlying affine analysis
machinery including low level routines.
Some code (in getMemoryRegion()) borrowed from @andydavis cl/218263256.
While on this:
- create mlir/Analysis/Passes.h; move Pass.h up from mlir/Transforms/ to mlir/
- fix a bug in AffineAnalysis.cpp::toAffineExpr
TODO: extend to non-constant loop bounds (straightforward). Will transparently
work for all accesses once floordiv, mod, ceildiv are supported in the
AffineMap -> FlatAffineConstraints conversion.
PiperOrigin-RevId: 219397961
2018-10-31 08:43:06 +08:00
|
|
|
Vectorize,
|
2018-09-26 03:06:41 +08:00
|
|
|
XLALower,
|
2018-07-30 05:13:03 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static cl::list<Passes> passList(
|
|
|
|
"", cl::desc("Compiler passes to run"),
|
2018-10-12 08:21:55 +08:00
|
|
|
cl::values(
|
|
|
|
clEnumValN(Canonicalize, "canonicalize", "Canonicalize operations"),
|
|
|
|
clEnumValN(ComposeAffineMaps, "compose-affine-maps",
|
|
|
|
"Compose affine maps"),
|
|
|
|
clEnumValN(ConstantFold, "constant-fold",
|
|
|
|
"Constant fold operations in functions"),
|
|
|
|
clEnumValN(ConvertToCFG, "convert-to-cfg",
|
|
|
|
"Convert all ML functions in the module to CFG ones"),
|
Introduce memref bound checking.
Introduce analysis to check memref accesses (in MLFunctions) for out of bound
ones. It works as follows:
$ mlir-opt -memref-bound-check test/Transforms/memref-bound-check.mlir
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
#map0 = (d0, d1) -> (d0, d1)
#map1 = (d0, d1) -> (d0 * 128 - d1)
mlfunc @test() {
%0 = alloc() : memref<9x9xi32>
%1 = alloc() : memref<128xi32>
for %i0 = -1 to 9 {
for %i1 = -1 to 9 {
%2 = affine_apply #map0(%i0, %i1)
%3 = load %0[%2tensorflow/mlir#0, %2tensorflow/mlir#1] : memref<9x9xi32>
%4 = affine_apply #map1(%i0, %i1)
%5 = load %1[%4] : memref<128xi32>
}
}
return
}
- Improves productivity while manually / semi-automatically developing MLIR for
testing / prototyping; also provides an indirect way to catch errors in
transformations.
- This pass is an easy way to test the underlying affine analysis
machinery including low level routines.
Some code (in getMemoryRegion()) borrowed from @andydavis cl/218263256.
While on this:
- create mlir/Analysis/Passes.h; move Pass.h up from mlir/Transforms/ to mlir/
- fix a bug in AffineAnalysis.cpp::toAffineExpr
TODO: extend to non-constant loop bounds (straightforward). Will transparently
work for all accesses once floordiv, mod, ceildiv are supported in the
AffineMap -> FlatAffineConstraints conversion.
PiperOrigin-RevId: 219397961
2018-10-31 08:43:06 +08:00
|
|
|
clEnumValN(MemRefBoundCheck, "memref-bound-check",
|
|
|
|
"Convert all ML functions in the module to CFG ones"),
|
2018-11-03 00:55:04 +08:00
|
|
|
clEnumValN(MemRefDependenceCheck, "memref-dependence-check",
|
|
|
|
"Checks dependences between all pairs of memref accesses."),
|
2018-11-01 22:26:00 +08:00
|
|
|
clEnumValN(LoopFusion, "loop-fusion", "Fuse loop nests"),
|
2018-10-12 08:21:55 +08:00
|
|
|
clEnumValN(LoopUnroll, "loop-unroll", "Unroll loops"),
|
|
|
|
clEnumValN(LoopUnrollAndJam, "loop-unroll-jam", "Unroll and jam loops"),
|
|
|
|
clEnumValN(PipelineDataTransfer, "pipeline-data-transfer",
|
|
|
|
"Pipeline non-blocking data transfers between"
|
|
|
|
"explicitly managed levels of the memory hierarchy"),
|
|
|
|
clEnumValN(PrintCFGGraph, "print-cfg-graph",
|
|
|
|
"Print CFG graph per function"),
|
2018-10-25 23:33:02 +08:00
|
|
|
clEnumValN(SimplifyAffineStructures, "simplify-affine-structures",
|
2018-10-12 08:21:55 +08:00
|
|
|
"Simplify affine expressions"),
|
|
|
|
clEnumValN(TFRaiseControlFlow, "tf-raise-control-flow",
|
|
|
|
"Dynamic TensorFlow Switch/Match nodes to a CFG"),
|
Introduce memref bound checking.
Introduce analysis to check memref accesses (in MLFunctions) for out of bound
ones. It works as follows:
$ mlir-opt -memref-bound-check test/Transforms/memref-bound-check.mlir
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
#map0 = (d0, d1) -> (d0, d1)
#map1 = (d0, d1) -> (d0 * 128 - d1)
mlfunc @test() {
%0 = alloc() : memref<9x9xi32>
%1 = alloc() : memref<128xi32>
for %i0 = -1 to 9 {
for %i1 = -1 to 9 {
%2 = affine_apply #map0(%i0, %i1)
%3 = load %0[%2tensorflow/mlir#0, %2tensorflow/mlir#1] : memref<9x9xi32>
%4 = affine_apply #map1(%i0, %i1)
%5 = load %1[%4] : memref<128xi32>
}
}
return
}
- Improves productivity while manually / semi-automatically developing MLIR for
testing / prototyping; also provides an indirect way to catch errors in
transformations.
- This pass is an easy way to test the underlying affine analysis
machinery including low level routines.
Some code (in getMemoryRegion()) borrowed from @andydavis cl/218263256.
While on this:
- create mlir/Analysis/Passes.h; move Pass.h up from mlir/Transforms/ to mlir/
- fix a bug in AffineAnalysis.cpp::toAffineExpr
TODO: extend to non-constant loop bounds (straightforward). Will transparently
work for all accesses once floordiv, mod, ceildiv are supported in the
AffineMap -> FlatAffineConstraints conversion.
PiperOrigin-RevId: 219397961
2018-10-31 08:43:06 +08:00
|
|
|
clEnumValN(Vectorize, "vectorize",
|
|
|
|
"Vectorize to a target independent n-D vector abstraction."),
|
2018-10-12 08:21:55 +08:00
|
|
|
clEnumValN(XLALower, "xla-lower", "Lower to XLA dialect")));
|
2018-07-30 03:37:35 +08:00
|
|
|
|
2018-07-04 04:24:09 +08:00
|
|
|
enum OptResult { OptSuccess, OptFailure };
|
|
|
|
|
2018-06-22 06:22:42 +08:00
|
|
|
/// Open the specified output file and return it, exiting if there is any I/O or
|
|
|
|
/// other errors.
|
|
|
|
static std::unique_ptr<ToolOutputFile> getOutputStream() {
|
|
|
|
std::error_code error;
|
2018-07-31 06:00:47 +08:00
|
|
|
auto result =
|
|
|
|
llvm::make_unique<ToolOutputFile>(outputFilename, error, sys::fs::F_None);
|
2018-06-22 06:22:42 +08:00
|
|
|
if (error) {
|
|
|
|
llvm::errs() << error.message() << '\n';
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
2018-06-22 00:49:33 +08:00
|
|
|
|
2018-09-03 13:01:45 +08:00
|
|
|
/// Given a MemoryBuffer along with a line and column within it, return the
|
|
|
|
/// location being referenced.
|
|
|
|
static SMLoc getLocFromLineAndCol(MemoryBuffer &membuf, unsigned lineNo,
|
|
|
|
unsigned columnNo) {
|
|
|
|
// TODO: This should really be upstreamed to be a method on llvm::SourceMgr.
|
|
|
|
// Doing so would allow it to use the offset cache that is already maintained
|
|
|
|
// by SrcBuffer, making this more efficient.
|
|
|
|
|
|
|
|
// Scan for the correct line number.
|
|
|
|
const char *position = membuf.getBufferStart();
|
|
|
|
const char *end = membuf.getBufferEnd();
|
|
|
|
|
|
|
|
// We start counting line and column numbers from 1.
|
|
|
|
--lineNo;
|
|
|
|
--columnNo;
|
|
|
|
|
|
|
|
while (position < end && lineNo) {
|
|
|
|
auto curChar = *position++;
|
|
|
|
|
|
|
|
// Scan for newlines. If this isn't one, ignore it.
|
|
|
|
if (curChar != '\r' && curChar != '\n')
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// We saw a line break, decrement our counter.
|
|
|
|
--lineNo;
|
|
|
|
|
|
|
|
// Check for \r\n and \n\r and treat it as a single escape. We know that
|
|
|
|
// looking past one character is safe because MemoryBuffer's are always nul
|
|
|
|
// terminated.
|
|
|
|
if (*position != curChar && (*position == '\r' || *position == '\n'))
|
|
|
|
++position;
|
|
|
|
}
|
2018-06-22 00:49:33 +08:00
|
|
|
|
2018-09-03 13:01:45 +08:00
|
|
|
// If the line/column counter was invalid, return a pointer to the start of
|
|
|
|
// the buffer.
|
|
|
|
if (lineNo || position + columnNo > end)
|
|
|
|
return SMLoc::getFromPointer(membuf.getBufferStart());
|
|
|
|
|
|
|
|
// Otherwise return the right pointer.
|
|
|
|
return SMLoc::getFromPointer(position + columnNo);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Perform the actions on the input file indicated by the command line flags
|
|
|
|
/// within the specified context.
|
|
|
|
///
|
|
|
|
/// This typically parses the main source file, runs zero or more optimization
|
|
|
|
/// passes, then prints the output.
|
|
|
|
///
|
|
|
|
static OptResult performActions(SourceMgr &sourceMgr, MLIRContext *context) {
|
|
|
|
std::unique_ptr<Module> module(parseSourceFile(sourceMgr, context));
|
2018-07-04 04:24:09 +08:00
|
|
|
if (!module)
|
|
|
|
return OptFailure;
|
2018-06-25 00:10:36 +08:00
|
|
|
|
2018-07-30 05:13:03 +08:00
|
|
|
// Run each of the passes that were selected.
|
2018-09-07 08:31:21 +08:00
|
|
|
for (unsigned i = 0, e = passList.size(); i != e; ++i) {
|
|
|
|
auto passKind = passList[i];
|
2018-07-30 05:13:03 +08:00
|
|
|
Pass *pass = nullptr;
|
|
|
|
switch (passKind) {
|
2018-10-12 08:21:55 +08:00
|
|
|
case Canonicalize:
|
|
|
|
pass = createCanonicalizerPass();
|
|
|
|
break;
|
2018-10-09 02:10:11 +08:00
|
|
|
case ComposeAffineMaps:
|
|
|
|
pass = createComposeAffineMapsPass();
|
|
|
|
break;
|
2018-09-20 12:35:11 +08:00
|
|
|
case ConstantFold:
|
|
|
|
pass = createConstantFoldPass();
|
|
|
|
break;
|
2018-07-30 05:13:03 +08:00
|
|
|
case ConvertToCFG:
|
|
|
|
pass = createConvertToCFGPass();
|
|
|
|
break;
|
Introduce memref bound checking.
Introduce analysis to check memref accesses (in MLFunctions) for out of bound
ones. It works as follows:
$ mlir-opt -memref-bound-check test/Transforms/memref-bound-check.mlir
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
#map0 = (d0, d1) -> (d0, d1)
#map1 = (d0, d1) -> (d0 * 128 - d1)
mlfunc @test() {
%0 = alloc() : memref<9x9xi32>
%1 = alloc() : memref<128xi32>
for %i0 = -1 to 9 {
for %i1 = -1 to 9 {
%2 = affine_apply #map0(%i0, %i1)
%3 = load %0[%2tensorflow/mlir#0, %2tensorflow/mlir#1] : memref<9x9xi32>
%4 = affine_apply #map1(%i0, %i1)
%5 = load %1[%4] : memref<128xi32>
}
}
return
}
- Improves productivity while manually / semi-automatically developing MLIR for
testing / prototyping; also provides an indirect way to catch errors in
transformations.
- This pass is an easy way to test the underlying affine analysis
machinery including low level routines.
Some code (in getMemoryRegion()) borrowed from @andydavis cl/218263256.
While on this:
- create mlir/Analysis/Passes.h; move Pass.h up from mlir/Transforms/ to mlir/
- fix a bug in AffineAnalysis.cpp::toAffineExpr
TODO: extend to non-constant loop bounds (straightforward). Will transparently
work for all accesses once floordiv, mod, ceildiv are supported in the
AffineMap -> FlatAffineConstraints conversion.
PiperOrigin-RevId: 219397961
2018-10-31 08:43:06 +08:00
|
|
|
case MemRefBoundCheck:
|
|
|
|
pass = createMemRefBoundCheckPass();
|
2018-11-03 00:55:04 +08:00
|
|
|
break;
|
|
|
|
case MemRefDependenceCheck:
|
|
|
|
pass = createMemRefDependenceCheckPass();
|
2018-10-18 09:01:44 +08:00
|
|
|
break;
|
2018-11-01 22:26:00 +08:00
|
|
|
case LoopFusion:
|
|
|
|
pass = createLoopFusionPass();
|
|
|
|
break;
|
Extend loop unrolling to unroll by a given factor; add builder for affine
apply op.
- add builder for AffineApplyOp (first one for an operation that has
non-zero operands)
- add support for loop unrolling by a given factor; uses the affine apply op
builder.
While on this, change 'step' of ForStmt to be 'unsigned' instead of
AffineConstantExpr *. Add setters for ForStmt lb, ub, step.
Sample Input:
// CHECK-LABEL: mlfunc @loop_nest_unroll_cleanup() {
mlfunc @loop_nest_unroll_cleanup() {
for %i = 1 to 100 {
for %j = 0 to 17 {
%x = "addi32"(%j, %j) : (affineint, affineint) -> i32
%y = "addi32"(%x, %x) : (i32, i32) -> i32
}
}
return
}
Output:
$ mlir-opt -loop-unroll -unroll-factor=4 /tmp/single2.mlir
#map0 = (d0) -> (d0 + 1)
#map1 = (d0) -> (d0 + 2)
#map2 = (d0) -> (d0 + 3)
mlfunc @loop_nest_unroll_cleanup() {
for %i0 = 1 to 100 {
for %i1 = 0 to 17 step 4 {
%0 = "addi32"(%i1, %i1) : (affineint, affineint) -> i32
%1 = "addi32"(%0, %0) : (i32, i32) -> i32
%2 = affine_apply #map0(%i1)
%3 = "addi32"(%2, %2) : (affineint, affineint) -> i32
%4 = affine_apply #map1(%i1)
%5 = "addi32"(%4, %4) : (affineint, affineint) -> i32
%6 = affine_apply #map2(%i1)
%7 = "addi32"(%6, %6) : (affineint, affineint) -> i32
}
for %i2 = 16 to 17 {
%8 = "addi32"(%i2, %i2) : (affineint, affineint) -> i32
%9 = "addi32"(%8, %8) : (i32, i32) -> i32
}
}
return
}
PiperOrigin-RevId: 209676220
2018-08-22 07:01:23 +08:00
|
|
|
case LoopUnroll:
|
2018-08-29 09:24:27 +08:00
|
|
|
pass = createLoopUnrollPass();
|
|
|
|
break;
|
|
|
|
case LoopUnrollAndJam:
|
|
|
|
pass = createLoopUnrollAndJamPass();
|
2018-08-07 09:40:34 +08:00
|
|
|
break;
|
2018-09-29 03:17:26 +08:00
|
|
|
case PipelineDataTransfer:
|
|
|
|
pass = createPipelineDataTransferPass();
|
|
|
|
break;
|
2018-09-05 13:04:21 +08:00
|
|
|
case PrintCFGGraph:
|
|
|
|
pass = createPrintCFGGraphPass();
|
|
|
|
break;
|
2018-10-25 23:33:02 +08:00
|
|
|
case SimplifyAffineStructures:
|
|
|
|
pass = createSimplifyAffineStructuresPass();
|
2018-08-31 08:35:15 +08:00
|
|
|
break;
|
2018-07-30 05:13:03 +08:00
|
|
|
case TFRaiseControlFlow:
|
|
|
|
pass = createRaiseTFControlFlowPass();
|
|
|
|
break;
|
Introduce memref bound checking.
Introduce analysis to check memref accesses (in MLFunctions) for out of bound
ones. It works as follows:
$ mlir-opt -memref-bound-check test/Transforms/memref-bound-check.mlir
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
#map0 = (d0, d1) -> (d0, d1)
#map1 = (d0, d1) -> (d0 * 128 - d1)
mlfunc @test() {
%0 = alloc() : memref<9x9xi32>
%1 = alloc() : memref<128xi32>
for %i0 = -1 to 9 {
for %i1 = -1 to 9 {
%2 = affine_apply #map0(%i0, %i1)
%3 = load %0[%2tensorflow/mlir#0, %2tensorflow/mlir#1] : memref<9x9xi32>
%4 = affine_apply #map1(%i0, %i1)
%5 = load %1[%4] : memref<128xi32>
}
}
return
}
- Improves productivity while manually / semi-automatically developing MLIR for
testing / prototyping; also provides an indirect way to catch errors in
transformations.
- This pass is an easy way to test the underlying affine analysis
machinery including low level routines.
Some code (in getMemoryRegion()) borrowed from @andydavis cl/218263256.
While on this:
- create mlir/Analysis/Passes.h; move Pass.h up from mlir/Transforms/ to mlir/
- fix a bug in AffineAnalysis.cpp::toAffineExpr
TODO: extend to non-constant loop bounds (straightforward). Will transparently
work for all accesses once floordiv, mod, ceildiv are supported in the
AffineMap -> FlatAffineConstraints conversion.
PiperOrigin-RevId: 219397961
2018-10-31 08:43:06 +08:00
|
|
|
case Vectorize:
|
|
|
|
pass = createVectorizePass();
|
|
|
|
break;
|
2018-09-26 03:06:41 +08:00
|
|
|
case XLALower:
|
|
|
|
pass = createXLALowerPass();
|
|
|
|
break;
|
2018-07-30 05:13:03 +08:00
|
|
|
}
|
2018-07-30 03:37:35 +08:00
|
|
|
|
2018-09-15 06:59:13 +08:00
|
|
|
PassResult result = pass->runOnModule(module.get());
|
2018-07-30 03:37:35 +08:00
|
|
|
delete pass;
|
2018-09-15 06:59:13 +08:00
|
|
|
if (result)
|
|
|
|
return OptFailure;
|
2018-09-07 00:17:08 +08:00
|
|
|
|
|
|
|
// Verify that the result of the pass is still valid.
|
2018-10-10 06:04:27 +08:00
|
|
|
if (module->verify())
|
|
|
|
return OptFailure;
|
2018-07-25 11:01:16 +08:00
|
|
|
}
|
|
|
|
|
2018-06-25 00:10:36 +08:00
|
|
|
// Print the output.
|
|
|
|
auto output = getOutputStream();
|
|
|
|
module->print(output->os());
|
|
|
|
output->keep();
|
2018-07-04 04:24:09 +08:00
|
|
|
return OptSuccess;
|
2018-06-25 00:10:36 +08:00
|
|
|
}
|
|
|
|
|
2018-09-27 22:43:44 +08:00
|
|
|
/// Given a diagnostic kind, return a human readable string for it.
|
|
|
|
static StringRef getDiagnosticKindString(MLIRContext::DiagnosticKind kind) {
|
|
|
|
switch (kind) {
|
|
|
|
case MLIRContext::DiagnosticKind::Note:
|
|
|
|
return "note";
|
|
|
|
case MLIRContext::DiagnosticKind::Warning:
|
|
|
|
return "warning";
|
|
|
|
case MLIRContext::DiagnosticKind::Error:
|
|
|
|
return "error";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-03 13:01:45 +08:00
|
|
|
/// Parses the memory buffer. If successfully, run a series of passes against
|
|
|
|
/// it and print the result.
|
|
|
|
static OptResult processFile(std::unique_ptr<MemoryBuffer> ownedBuffer) {
|
|
|
|
// Tell sourceMgr about this buffer, which is what the parser will pick up.
|
|
|
|
SourceMgr sourceMgr;
|
|
|
|
auto &buffer = *ownedBuffer;
|
|
|
|
sourceMgr.AddNewSourceBuffer(std::move(ownedBuffer), SMLoc());
|
|
|
|
|
|
|
|
// Parse the input file.
|
|
|
|
MLIRContext context;
|
|
|
|
|
|
|
|
// If we are in verify mode then we have a lot of work to do, otherwise just
|
|
|
|
// perform the actions without worrying about it.
|
|
|
|
if (!verifyDiagnostics) {
|
|
|
|
|
|
|
|
// Register a simple diagnostic handler that prints out info with context.
|
|
|
|
context.registerDiagnosticHandler([&](Location *location, StringRef message,
|
|
|
|
MLIRContext::DiagnosticKind kind) {
|
|
|
|
unsigned line = 1, column = 1;
|
|
|
|
if (auto fileLoc = dyn_cast<FileLineColLoc>(location)) {
|
|
|
|
line = fileLoc->getLine();
|
|
|
|
column = fileLoc->getColumn();
|
|
|
|
}
|
|
|
|
|
|
|
|
auto unexpectedLoc = getLocFromLineAndCol(buffer, line, column);
|
|
|
|
sourceMgr.PrintMessage(unexpectedLoc, SourceMgr::DK_Error, message);
|
|
|
|
});
|
2018-06-26 23:56:55 +08:00
|
|
|
|
2018-09-03 13:01:45 +08:00
|
|
|
// Run the test actions.
|
|
|
|
return performActions(sourceMgr, &context);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Keep track of the result of this file processing. If there are no issues,
|
|
|
|
// then we succeed.
|
|
|
|
auto result = OptSuccess;
|
|
|
|
|
2018-09-27 22:43:44 +08:00
|
|
|
// Record the expected diagnostic's position, substring and whether it was
|
|
|
|
// seen.
|
|
|
|
struct ExpectedDiag {
|
|
|
|
MLIRContext::DiagnosticKind kind;
|
2018-09-03 13:01:45 +08:00
|
|
|
unsigned lineNo;
|
2018-07-08 10:12:22 +08:00
|
|
|
StringRef substring;
|
|
|
|
SMLoc fileLoc;
|
2018-09-03 13:01:45 +08:00
|
|
|
bool matched = false;
|
2018-07-08 10:12:22 +08:00
|
|
|
};
|
2018-09-27 22:43:44 +08:00
|
|
|
SmallVector<ExpectedDiag, 2> expectedDiags;
|
2018-09-03 13:01:45 +08:00
|
|
|
|
|
|
|
// Error checker that verifies reported error was expected.
|
|
|
|
auto checker = [&](Location *location, StringRef message,
|
|
|
|
MLIRContext::DiagnosticKind kind) {
|
|
|
|
unsigned line = 1, column = 1;
|
|
|
|
if (auto *fileLoc = dyn_cast<FileLineColLoc>(location)) {
|
|
|
|
line = fileLoc->getLine();
|
|
|
|
column = fileLoc->getColumn();
|
|
|
|
}
|
2018-07-08 10:12:22 +08:00
|
|
|
|
2018-09-27 22:43:44 +08:00
|
|
|
// If we find something that is close then emit a more specific error.
|
|
|
|
ExpectedDiag *nearMiss = nullptr;
|
|
|
|
|
2018-09-03 13:01:45 +08:00
|
|
|
// If this was an expected error, remember that we saw it and return.
|
2018-09-27 22:43:44 +08:00
|
|
|
for (auto &e : expectedDiags) {
|
2018-09-03 13:01:45 +08:00
|
|
|
if (line == e.lineNo && message.contains(e.substring)) {
|
2018-09-27 22:43:44 +08:00
|
|
|
if (e.kind == kind) {
|
|
|
|
e.matched = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this only differs based on the diagnostic kind, then consider it
|
|
|
|
// to be a near miss.
|
|
|
|
nearMiss = &e;
|
2018-07-08 10:12:22 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-27 22:43:44 +08:00
|
|
|
// If there was a near miss, emit a specific diagnostic.
|
|
|
|
if (nearMiss) {
|
|
|
|
sourceMgr.PrintMessage(nearMiss->fileLoc, SourceMgr::DK_Error,
|
|
|
|
"'" + getDiagnosticKindString(kind) +
|
|
|
|
"' diagnostic emitted when expecting a '" +
|
|
|
|
getDiagnosticKindString(nearMiss->kind) + "'");
|
|
|
|
result = OptFailure;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-09-03 13:01:45 +08:00
|
|
|
// If this error wasn't expected, produce an error out of mlir-opt saying
|
|
|
|
// so.
|
|
|
|
auto unexpectedLoc = getLocFromLineAndCol(buffer, line, column);
|
|
|
|
sourceMgr.PrintMessage(unexpectedLoc, SourceMgr::DK_Error,
|
|
|
|
"unexpected error: " + Twine(message));
|
|
|
|
result = OptFailure;
|
|
|
|
};
|
|
|
|
|
|
|
|
// Scan the file for expected-* designators and register a callback for the
|
|
|
|
// error handler.
|
|
|
|
// Extract the expected errors from the file.
|
2018-09-27 22:43:44 +08:00
|
|
|
llvm::Regex expected(
|
|
|
|
"expected-(error|note|warning) *(@[+-][0-9]+)? *{{(.*)}}");
|
2018-09-03 13:01:45 +08:00
|
|
|
SmallVector<StringRef, 100> lines;
|
|
|
|
buffer.getBuffer().split(lines, '\n');
|
|
|
|
for (unsigned lineNo = 0, e = lines.size(); lineNo < e; ++lineNo) {
|
|
|
|
SmallVector<StringRef, 3> matches;
|
|
|
|
if (expected.match(lines[lineNo], &matches)) {
|
2018-09-27 22:43:44 +08:00
|
|
|
// Point to the start of expected-*.
|
|
|
|
SMLoc expectedStart = SMLoc::getFromPointer(matches[0].data());
|
|
|
|
|
|
|
|
MLIRContext::DiagnosticKind kind;
|
|
|
|
if (matches[1] == "error")
|
|
|
|
kind = MLIRContext::DiagnosticKind::Error;
|
|
|
|
else if (matches[1] == "warning")
|
|
|
|
kind = MLIRContext::DiagnosticKind::Warning;
|
|
|
|
else {
|
|
|
|
assert(matches[1] == "note");
|
|
|
|
kind = MLIRContext::DiagnosticKind::Note;
|
|
|
|
}
|
|
|
|
|
|
|
|
ExpectedDiag record{kind, lineNo + 1, matches[3], expectedStart, false};
|
|
|
|
auto offsetMatch = matches[2];
|
2018-09-03 13:01:45 +08:00
|
|
|
if (!offsetMatch.empty()) {
|
|
|
|
int offset;
|
|
|
|
// Get the integer value without the @ and +/- prefix.
|
|
|
|
if (!offsetMatch.drop_front(2).getAsInteger(0, offset)) {
|
|
|
|
if (offsetMatch[1] == '+')
|
2018-09-27 22:43:44 +08:00
|
|
|
record.lineNo += offset;
|
2018-09-03 13:01:45 +08:00
|
|
|
else
|
2018-09-27 22:43:44 +08:00
|
|
|
record.lineNo -= offset;
|
2018-07-08 10:12:22 +08:00
|
|
|
}
|
|
|
|
}
|
2018-09-27 22:43:44 +08:00
|
|
|
expectedDiags.push_back(record);
|
2018-09-03 13:01:45 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, register the error handler to capture them.
|
|
|
|
context.registerDiagnosticHandler(checker);
|
|
|
|
|
|
|
|
// Do any processing requested by command line flags. We don't care whether
|
|
|
|
// these actions succeed or fail, we only care what diagnostics they produce
|
|
|
|
// and whether they match our expectations.
|
|
|
|
performActions(sourceMgr, &context);
|
|
|
|
|
|
|
|
// Verify that all expected errors were seen.
|
2018-09-27 22:43:44 +08:00
|
|
|
for (auto &err : expectedDiags) {
|
2018-09-03 13:01:45 +08:00
|
|
|
if (!err.matched) {
|
|
|
|
SMRange range(err.fileLoc,
|
|
|
|
SMLoc::getFromPointer(err.fileLoc.getPointer() +
|
|
|
|
err.substring.size()));
|
2018-09-27 22:43:44 +08:00
|
|
|
auto kind = getDiagnosticKindString(err.kind);
|
|
|
|
sourceMgr.PrintMessage(err.fileLoc, SourceMgr::DK_Error,
|
|
|
|
"expected " + kind + " \"" + err.substring +
|
|
|
|
"\" was not produced",
|
|
|
|
range);
|
2018-09-03 13:01:45 +08:00
|
|
|
result = OptFailure;
|
2018-06-25 23:10:46 +08:00
|
|
|
}
|
2018-09-03 13:01:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Split the specified file on a marker and process each chunk independently
|
|
|
|
/// according to the normal processFile logic. This is primarily used to
|
|
|
|
/// allow a large number of small independent parser tests to be put into a
|
|
|
|
/// single test, but could be used for other purposes as well.
|
|
|
|
static OptResult
|
|
|
|
splitAndProcessFile(std::unique_ptr<MemoryBuffer> originalBuffer) {
|
|
|
|
const char marker[] = "-----";
|
2018-09-07 11:56:12 +08:00
|
|
|
auto *origMemBuffer = originalBuffer.get();
|
2018-09-03 13:01:45 +08:00
|
|
|
SmallVector<StringRef, 8> sourceBuffers;
|
2018-09-07 11:56:12 +08:00
|
|
|
origMemBuffer->getBuffer().split(sourceBuffers, marker);
|
2018-09-03 13:01:45 +08:00
|
|
|
|
|
|
|
// Add the original buffer to the source manager.
|
|
|
|
SourceMgr fileSourceMgr;
|
|
|
|
fileSourceMgr.AddNewSourceBuffer(std::move(originalBuffer), SMLoc());
|
|
|
|
|
|
|
|
bool hadUnexpectedResult = false;
|
2018-06-26 23:56:55 +08:00
|
|
|
|
2018-09-03 13:01:45 +08:00
|
|
|
// Process each chunk in turn. If any fails, then return a failure of the
|
|
|
|
// tool.
|
|
|
|
for (auto &subBuffer : sourceBuffers) {
|
2018-09-07 11:56:12 +08:00
|
|
|
auto splitLoc = SMLoc::getFromPointer(subBuffer.data());
|
|
|
|
unsigned splitLine = fileSourceMgr.getLineAndColumn(splitLoc).first;
|
|
|
|
auto subMemBuffer = MemoryBuffer::getMemBufferCopy(
|
|
|
|
subBuffer, origMemBuffer->getBufferIdentifier() +
|
|
|
|
Twine(" split at line #") + Twine(splitLine));
|
2018-09-03 13:01:45 +08:00
|
|
|
if (processFile(std::move(subMemBuffer)))
|
|
|
|
hadUnexpectedResult = true;
|
2018-06-25 23:10:46 +08:00
|
|
|
}
|
|
|
|
|
2018-09-03 13:01:45 +08:00
|
|
|
return hadUnexpectedResult ? OptFailure : OptSuccess;
|
2018-06-25 00:10:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int main(int argc, char **argv) {
|
2018-08-06 12:12:29 +08:00
|
|
|
llvm::PrettyStackTraceProgram x(argc, argv);
|
|
|
|
InitLLVM y(argc, argv);
|
2018-06-23 13:03:48 +08:00
|
|
|
|
2018-06-22 06:22:42 +08:00
|
|
|
cl::ParseCommandLineOptions(argc, argv, "MLIR modular optimizer driver\n");
|
2018-06-22 00:49:33 +08:00
|
|
|
|
2018-06-23 01:39:19 +08:00
|
|
|
// Set up the input file.
|
|
|
|
auto fileOrErr = MemoryBuffer::getFileOrSTDIN(inputFilename);
|
|
|
|
if (std::error_code error = fileOrErr.getError()) {
|
|
|
|
llvm::errs() << argv[0] << ": could not open input file '" << inputFilename
|
|
|
|
<< "': " << error.message() << "\n";
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2018-09-03 13:01:45 +08:00
|
|
|
// The split-input-file mode is a very specific mode that slices the file
|
|
|
|
// up into small pieces and checks each independently.
|
|
|
|
if (splitInputFile)
|
|
|
|
return splitAndProcessFile(std::move(*fileOrErr));
|
2018-06-25 23:10:46 +08:00
|
|
|
|
2018-09-03 13:01:45 +08:00
|
|
|
return processFile(std::move(*fileOrErr));
|
2018-06-22 00:49:33 +08:00
|
|
|
}
|