Introduce memref bound checking.
Introduce analysis to check memref accesses (in MLFunctions) for out of bound
ones. It works as follows:
$ mlir-opt -memref-bound-check test/Transforms/memref-bound-check.mlir
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
#map0 = (d0, d1) -> (d0, d1)
#map1 = (d0, d1) -> (d0 * 128 - d1)
mlfunc @test() {
%0 = alloc() : memref<9x9xi32>
%1 = alloc() : memref<128xi32>
for %i0 = -1 to 9 {
for %i1 = -1 to 9 {
%2 = affine_apply #map0(%i0, %i1)
%3 = load %0[%2tensorflow/mlir#0, %2tensorflow/mlir#1] : memref<9x9xi32>
%4 = affine_apply #map1(%i0, %i1)
%5 = load %1[%4] : memref<128xi32>
}
}
return
}
- Improves productivity while manually / semi-automatically developing MLIR for
testing / prototyping; also provides an indirect way to catch errors in
transformations.
- This pass is an easy way to test the underlying affine analysis
machinery including low level routines.
Some code (in getMemoryRegion()) borrowed from @andydavis cl/218263256.
While on this:
- create mlir/Analysis/Passes.h; move Pass.h up from mlir/Transforms/ to mlir/
- fix a bug in AffineAnalysis.cpp::toAffineExpr
TODO: extend to non-constant loop bounds (straightforward). Will transparently
work for all accesses once floordiv, mod, ceildiv are supported in the
AffineMap -> FlatAffineConstraints conversion.
PiperOrigin-RevId: 219397961
2018-10-31 08:43:06 +08:00
|
|
|
// RUN: mlir-opt %s -memref-bound-check -split-input-file -verify | FileCheck %s
|
|
|
|
|
|
|
|
// -----
|
|
|
|
|
|
|
|
// CHECK-LABEL: mlfunc @test() {
|
|
|
|
mlfunc @test() {
|
|
|
|
%zero = constant 0 : index
|
|
|
|
%minusone = constant -1 : index
|
|
|
|
%sym = constant 111 : index
|
|
|
|
|
|
|
|
%A = alloc() : memref<9 x 9 x i32>
|
|
|
|
%B = alloc() : memref<111 x i32>
|
|
|
|
|
2018-11-09 09:31:01 +08:00
|
|
|
for %i = -1 to 10 {
|
|
|
|
for %j = -1 to 10 {
|
Introduce memref bound checking.
Introduce analysis to check memref accesses (in MLFunctions) for out of bound
ones. It works as follows:
$ mlir-opt -memref-bound-check test/Transforms/memref-bound-check.mlir
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
#map0 = (d0, d1) -> (d0, d1)
#map1 = (d0, d1) -> (d0 * 128 - d1)
mlfunc @test() {
%0 = alloc() : memref<9x9xi32>
%1 = alloc() : memref<128xi32>
for %i0 = -1 to 9 {
for %i1 = -1 to 9 {
%2 = affine_apply #map0(%i0, %i1)
%3 = load %0[%2tensorflow/mlir#0, %2tensorflow/mlir#1] : memref<9x9xi32>
%4 = affine_apply #map1(%i0, %i1)
%5 = load %1[%4] : memref<128xi32>
}
}
return
}
- Improves productivity while manually / semi-automatically developing MLIR for
testing / prototyping; also provides an indirect way to catch errors in
transformations.
- This pass is an easy way to test the underlying affine analysis
machinery including low level routines.
Some code (in getMemoryRegion()) borrowed from @andydavis cl/218263256.
While on this:
- create mlir/Analysis/Passes.h; move Pass.h up from mlir/Transforms/ to mlir/
- fix a bug in AffineAnalysis.cpp::toAffineExpr
TODO: extend to non-constant loop bounds (straightforward). Will transparently
work for all accesses once floordiv, mod, ceildiv are supported in the
AffineMap -> FlatAffineConstraints conversion.
PiperOrigin-RevId: 219397961
2018-10-31 08:43:06 +08:00
|
|
|
%idx = affine_apply (d0, d1) -> (d0, d1)(%i, %j)
|
|
|
|
// Out of bound access.
|
|
|
|
%x = load %A[%idx#0, %idx#1] : memref<9 x 9 x i32>
|
|
|
|
// expected-error@-1 {{'load' op memref out of upper bound access along dimension #1}}
|
|
|
|
// expected-error@-2 {{'load' op memref out of lower bound access along dimension #1}}
|
|
|
|
// expected-error@-3 {{'load' op memref out of upper bound access along dimension #2}}
|
|
|
|
// expected-error@-4 {{'load' op memref out of lower bound access along dimension #2}}
|
|
|
|
// This will access 0 to 110 - hence an overflow.
|
|
|
|
%idy = affine_apply (d0, d1) -> (10*d0 - d1 + 19)(%i, %j)
|
|
|
|
%y = load %B[%idy] : memref<111 x i32>
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-09 09:31:01 +08:00
|
|
|
for %k = 0 to 10 {
|
Introduce memref bound checking.
Introduce analysis to check memref accesses (in MLFunctions) for out of bound
ones. It works as follows:
$ mlir-opt -memref-bound-check test/Transforms/memref-bound-check.mlir
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
#map0 = (d0, d1) -> (d0, d1)
#map1 = (d0, d1) -> (d0 * 128 - d1)
mlfunc @test() {
%0 = alloc() : memref<9x9xi32>
%1 = alloc() : memref<128xi32>
for %i0 = -1 to 9 {
for %i1 = -1 to 9 {
%2 = affine_apply #map0(%i0, %i1)
%3 = load %0[%2tensorflow/mlir#0, %2tensorflow/mlir#1] : memref<9x9xi32>
%4 = affine_apply #map1(%i0, %i1)
%5 = load %1[%4] : memref<128xi32>
}
}
return
}
- Improves productivity while manually / semi-automatically developing MLIR for
testing / prototyping; also provides an indirect way to catch errors in
transformations.
- This pass is an easy way to test the underlying affine analysis
machinery including low level routines.
Some code (in getMemoryRegion()) borrowed from @andydavis cl/218263256.
While on this:
- create mlir/Analysis/Passes.h; move Pass.h up from mlir/Transforms/ to mlir/
- fix a bug in AffineAnalysis.cpp::toAffineExpr
TODO: extend to non-constant loop bounds (straightforward). Will transparently
work for all accesses once floordiv, mod, ceildiv are supported in the
AffineMap -> FlatAffineConstraints conversion.
PiperOrigin-RevId: 219397961
2018-10-31 08:43:06 +08:00
|
|
|
// In bound.
|
|
|
|
%u = load %B[%zero] : memref<111 x i32>
|
|
|
|
// Out of bounds.
|
|
|
|
%v = load %B[%sym] : memref<111 x i32> // expected-error {{'load' op memref out of upper bound access along dimension #1}}
|
|
|
|
// Out of bounds.
|
|
|
|
%w = load %B[%minusone] : memref<111 x i32> // expected-error {{'load' op memref out of lower bound access along dimension #1}}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2018-11-02 06:41:08 +08:00
|
|
|
|
|
|
|
// CHECK-LABEL: mlfunc @test_
|
|
|
|
mlfunc @test_mod_floordiv_ceildiv() {
|
|
|
|
%zero = constant 0 : index
|
|
|
|
%A = alloc() : memref<128 x 64 x 64 x i32>
|
|
|
|
|
2018-11-09 09:31:01 +08:00
|
|
|
for %i = 0 to 256 {
|
|
|
|
for %j = 0 to 256 {
|
2018-11-02 06:41:08 +08:00
|
|
|
%idx = affine_apply (d0, d1, d2) -> (d0 mod 128 + 1, d1 floordiv 4 + 1, d2 ceildiv 4)(%i, %j, %j)
|
|
|
|
%x = load %A[%idx#0, %idx#1, %idx#2] : memref<128 x 64 x 64 x i32>
|
|
|
|
// expected-error@-1 {{'load' op memref out of upper bound access along dimension #1}}
|
|
|
|
// expected-error@-2 {{'load' op memref out of upper bound access along dimension #2}}
|
|
|
|
// expected-error@-3 {{'load' op memref out of upper bound access along dimension #3}}
|
|
|
|
%idy = affine_apply (d0, d1, d2) -> (d0 mod 128, d1 floordiv 4, d2 ceildiv 4 - 1)(%i, %j, %j)
|
|
|
|
%y = load %A[%idy#0, %idy#1, %idy#2] : memref<128 x 64 x 64 x i32> // expected-error {{'load' op memref out of lower bound access along dimension #3}}
|
|
|
|
// CHECK-EMPTY
|
|
|
|
} // CHECK }
|
|
|
|
} // CHECK }
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// CHECK-LABEL: mlfunc @test_no_out_of_bounds()
|
|
|
|
mlfunc @test_no_out_of_bounds() {
|
|
|
|
%zero = constant 0 : index
|
|
|
|
%A = alloc() : memref<257 x 256 x i32>
|
|
|
|
%C = alloc() : memref<257 x i32>
|
|
|
|
%B = alloc() : memref<1 x i32>
|
|
|
|
|
2018-11-09 09:31:01 +08:00
|
|
|
for %i = 0 to 256 {
|
|
|
|
for %j = 0 to 256 {
|
2018-11-02 06:41:08 +08:00
|
|
|
// All of these accesses are in bound; check that no errors are emitted.
|
|
|
|
// CHECK: %3 = affine_apply #map4(%i0, %i1)
|
|
|
|
// CHECK-NEXT: %4 = load %0[%3#0, %c0] : memref<257x256xi32>
|
|
|
|
// CHECK-NEXT: %5 = affine_apply #map5(%i0, %i0)
|
|
|
|
// CHECK-NEXT: %6 = load %2[%5] : memref<1xi32>
|
|
|
|
%idx = affine_apply (d0, d1) -> ( 64 * (d0 ceildiv 64), d1 floordiv 4 + d1 mod 4)(%i, %j)
|
|
|
|
// Without GCDTightenInequalities(), the upper bound on the region
|
|
|
|
// accessed along first memref dimension would have come out as d0 <= 318
|
|
|
|
// (instead of d0 <= 256), and led to a false positive out of bounds.
|
|
|
|
%x = load %A[%idx#0, %zero] : memref<257 x 256 x i32>
|
|
|
|
%idy = affine_apply (d0, d1) -> (d0 floordiv 256)(%i, %i)
|
|
|
|
%y = load %B[%idy] : memref<1 x i32>
|
|
|
|
} // CHECK-NEXT }
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|