[MLIR] Rename MemRefBoundCheck.cpp -> TestMemRefBoundCheck.cpp
Summary:
This makes it consistent with other test passes.
Reviewers: rriddle
Reviewed By: rriddle
Subscribers: merge_guards_bot, mgorny, mehdi_amini, jpienaar, burmako, shauheen, antiagainst, nicolasvasilache, arpith-jacob, mgester, lucyrfox, aartbik, liufengdb, Joonsoo, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D74068
2020-02-06 01:23:17 +08:00
// RUN: mlir-opt %s -test-memref-bound-check -split-input-file -verify-diagnostics | FileCheck %s
Introduce memref bound checking.
Introduce analysis to check memref accesses (in MLFunctions) for out of bound
ones. It works as follows:
$ mlir-opt -memref-bound-check test/Transforms/memref-bound-check.mlir
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
#map0 = (d0, d1) -> (d0, d1)
#map1 = (d0, d1) -> (d0 * 128 - d1)
mlfunc @test() {
%0 = alloc() : memref<9x9xi32>
%1 = alloc() : memref<128xi32>
for %i0 = -1 to 9 {
for %i1 = -1 to 9 {
%2 = affine_apply #map0(%i0, %i1)
%3 = load %0[%2tensorflow/mlir#0, %2tensorflow/mlir#1] : memref<9x9xi32>
%4 = affine_apply #map1(%i0, %i1)
%5 = load %1[%4] : memref<128xi32>
}
}
return
}
- Improves productivity while manually / semi-automatically developing MLIR for
testing / prototyping; also provides an indirect way to catch errors in
transformations.
- This pass is an easy way to test the underlying affine analysis
machinery including low level routines.
Some code (in getMemoryRegion()) borrowed from @andydavis cl/218263256.
While on this:
- create mlir/Analysis/Passes.h; move Pass.h up from mlir/Transforms/ to mlir/
- fix a bug in AffineAnalysis.cpp::toAffineExpr
TODO: extend to non-constant loop bounds (straightforward). Will transparently
work for all accesses once floordiv, mod, ceildiv are supported in the
AffineMap -> FlatAffineConstraints conversion.
PiperOrigin-RevId: 219397961
2018-10-31 08:43:06 +08:00
// -----
2019-01-03 02:20:00 +08:00
// CHECK-LABEL: func @test() {
func @test ( ) {
Introduce memref bound checking.
Introduce analysis to check memref accesses (in MLFunctions) for out of bound
ones. It works as follows:
$ mlir-opt -memref-bound-check test/Transforms/memref-bound-check.mlir
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
#map0 = (d0, d1) -> (d0, d1)
#map1 = (d0, d1) -> (d0 * 128 - d1)
mlfunc @test() {
%0 = alloc() : memref<9x9xi32>
%1 = alloc() : memref<128xi32>
for %i0 = -1 to 9 {
for %i1 = -1 to 9 {
%2 = affine_apply #map0(%i0, %i1)
%3 = load %0[%2tensorflow/mlir#0, %2tensorflow/mlir#1] : memref<9x9xi32>
%4 = affine_apply #map1(%i0, %i1)
%5 = load %1[%4] : memref<128xi32>
}
}
return
}
- Improves productivity while manually / semi-automatically developing MLIR for
testing / prototyping; also provides an indirect way to catch errors in
transformations.
- This pass is an easy way to test the underlying affine analysis
machinery including low level routines.
Some code (in getMemoryRegion()) borrowed from @andydavis cl/218263256.
While on this:
- create mlir/Analysis/Passes.h; move Pass.h up from mlir/Transforms/ to mlir/
- fix a bug in AffineAnalysis.cpp::toAffineExpr
TODO: extend to non-constant loop bounds (straightforward). Will transparently
work for all accesses once floordiv, mod, ceildiv are supported in the
AffineMap -> FlatAffineConstraints conversion.
PiperOrigin-RevId: 219397961
2018-10-31 08:43:06 +08:00
%zero = constant 0 : index
%minusone = constant -1 : index
%sym = constant 111 : index
%A = alloc( ) : memref < 9 x 9 x i32 >
%B = alloc( ) : memref < 111 x i32 >
2019-03-26 01:14:34 +08:00
affine. for %i = -1 to 10 {
affine. for %j = -1 to 10 {
2020-01-14 05:12:37 +08:00
%idx0 = affine. apply affine_map< ( d0, d1) -> ( d0) > ( %i , %j )
%idx1 = affine. apply affine_map< ( d0, d1) -> ( d1) > ( %i , %j )
Introduce memref bound checking.
Introduce analysis to check memref accesses (in MLFunctions) for out of bound
ones. It works as follows:
$ mlir-opt -memref-bound-check test/Transforms/memref-bound-check.mlir
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
#map0 = (d0, d1) -> (d0, d1)
#map1 = (d0, d1) -> (d0 * 128 - d1)
mlfunc @test() {
%0 = alloc() : memref<9x9xi32>
%1 = alloc() : memref<128xi32>
for %i0 = -1 to 9 {
for %i1 = -1 to 9 {
%2 = affine_apply #map0(%i0, %i1)
%3 = load %0[%2tensorflow/mlir#0, %2tensorflow/mlir#1] : memref<9x9xi32>
%4 = affine_apply #map1(%i0, %i1)
%5 = load %1[%4] : memref<128xi32>
}
}
return
}
- Improves productivity while manually / semi-automatically developing MLIR for
testing / prototyping; also provides an indirect way to catch errors in
transformations.
- This pass is an easy way to test the underlying affine analysis
machinery including low level routines.
Some code (in getMemoryRegion()) borrowed from @andydavis cl/218263256.
While on this:
- create mlir/Analysis/Passes.h; move Pass.h up from mlir/Transforms/ to mlir/
- fix a bug in AffineAnalysis.cpp::toAffineExpr
TODO: extend to non-constant loop bounds (straightforward). Will transparently
work for all accesses once floordiv, mod, ceildiv are supported in the
AffineMap -> FlatAffineConstraints conversion.
PiperOrigin-RevId: 219397961
2018-10-31 08:43:06 +08:00
// Out of bound access.
2019-07-04 01:35:03 +08:00
%x = affine. load %A [ %idx0 , %idx1 ] : memref < 9 x 9 x i32 >
// expected-error@-1 {{'affine.load' op memref out of upper bound access along dimension #1}}
// expected-error@-2 {{'affine.load' op memref out of lower bound access along dimension #1}}
// expected-error@-3 {{'affine.load' op memref out of upper bound access along dimension #2}}
// expected-error@-4 {{'affine.load' op memref out of lower bound access along dimension #2}}
Introduce memref bound checking.
Introduce analysis to check memref accesses (in MLFunctions) for out of bound
ones. It works as follows:
$ mlir-opt -memref-bound-check test/Transforms/memref-bound-check.mlir
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
#map0 = (d0, d1) -> (d0, d1)
#map1 = (d0, d1) -> (d0 * 128 - d1)
mlfunc @test() {
%0 = alloc() : memref<9x9xi32>
%1 = alloc() : memref<128xi32>
for %i0 = -1 to 9 {
for %i1 = -1 to 9 {
%2 = affine_apply #map0(%i0, %i1)
%3 = load %0[%2tensorflow/mlir#0, %2tensorflow/mlir#1] : memref<9x9xi32>
%4 = affine_apply #map1(%i0, %i1)
%5 = load %1[%4] : memref<128xi32>
}
}
return
}
- Improves productivity while manually / semi-automatically developing MLIR for
testing / prototyping; also provides an indirect way to catch errors in
transformations.
- This pass is an easy way to test the underlying affine analysis
machinery including low level routines.
Some code (in getMemoryRegion()) borrowed from @andydavis cl/218263256.
While on this:
- create mlir/Analysis/Passes.h; move Pass.h up from mlir/Transforms/ to mlir/
- fix a bug in AffineAnalysis.cpp::toAffineExpr
TODO: extend to non-constant loop bounds (straightforward). Will transparently
work for all accesses once floordiv, mod, ceildiv are supported in the
AffineMap -> FlatAffineConstraints conversion.
PiperOrigin-RevId: 219397961
2018-10-31 08:43:06 +08:00
// This will access 0 to 110 - hence an overflow.
2020-01-14 05:12:37 +08:00
%idy = affine. apply affine_map< ( d0, d1) -> ( 10 * d0 - d1 + 19 ) > ( %i , %j )
2019-07-04 01:35:03 +08:00
%y = affine. load %B [ %idy ] : memref < 111 x i32 >
Introduce memref bound checking.
Introduce analysis to check memref accesses (in MLFunctions) for out of bound
ones. It works as follows:
$ mlir-opt -memref-bound-check test/Transforms/memref-bound-check.mlir
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
#map0 = (d0, d1) -> (d0, d1)
#map1 = (d0, d1) -> (d0 * 128 - d1)
mlfunc @test() {
%0 = alloc() : memref<9x9xi32>
%1 = alloc() : memref<128xi32>
for %i0 = -1 to 9 {
for %i1 = -1 to 9 {
%2 = affine_apply #map0(%i0, %i1)
%3 = load %0[%2tensorflow/mlir#0, %2tensorflow/mlir#1] : memref<9x9xi32>
%4 = affine_apply #map1(%i0, %i1)
%5 = load %1[%4] : memref<128xi32>
}
}
return
}
- Improves productivity while manually / semi-automatically developing MLIR for
testing / prototyping; also provides an indirect way to catch errors in
transformations.
- This pass is an easy way to test the underlying affine analysis
machinery including low level routines.
Some code (in getMemoryRegion()) borrowed from @andydavis cl/218263256.
While on this:
- create mlir/Analysis/Passes.h; move Pass.h up from mlir/Transforms/ to mlir/
- fix a bug in AffineAnalysis.cpp::toAffineExpr
TODO: extend to non-constant loop bounds (straightforward). Will transparently
work for all accesses once floordiv, mod, ceildiv are supported in the
AffineMap -> FlatAffineConstraints conversion.
PiperOrigin-RevId: 219397961
2018-10-31 08:43:06 +08:00
}
}
2019-03-26 01:14:34 +08:00
affine. for %k = 0 to 10 {
Introduce memref bound checking.
Introduce analysis to check memref accesses (in MLFunctions) for out of bound
ones. It works as follows:
$ mlir-opt -memref-bound-check test/Transforms/memref-bound-check.mlir
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
#map0 = (d0, d1) -> (d0, d1)
#map1 = (d0, d1) -> (d0 * 128 - d1)
mlfunc @test() {
%0 = alloc() : memref<9x9xi32>
%1 = alloc() : memref<128xi32>
for %i0 = -1 to 9 {
for %i1 = -1 to 9 {
%2 = affine_apply #map0(%i0, %i1)
%3 = load %0[%2tensorflow/mlir#0, %2tensorflow/mlir#1] : memref<9x9xi32>
%4 = affine_apply #map1(%i0, %i1)
%5 = load %1[%4] : memref<128xi32>
}
}
return
}
- Improves productivity while manually / semi-automatically developing MLIR for
testing / prototyping; also provides an indirect way to catch errors in
transformations.
- This pass is an easy way to test the underlying affine analysis
machinery including low level routines.
Some code (in getMemoryRegion()) borrowed from @andydavis cl/218263256.
While on this:
- create mlir/Analysis/Passes.h; move Pass.h up from mlir/Transforms/ to mlir/
- fix a bug in AffineAnalysis.cpp::toAffineExpr
TODO: extend to non-constant loop bounds (straightforward). Will transparently
work for all accesses once floordiv, mod, ceildiv are supported in the
AffineMap -> FlatAffineConstraints conversion.
PiperOrigin-RevId: 219397961
2018-10-31 08:43:06 +08:00
// In bound.
2019-07-04 01:35:03 +08:00
%u = affine. load %B [ %zero ] : memref < 111 x i32 >
Introduce memref bound checking.
Introduce analysis to check memref accesses (in MLFunctions) for out of bound
ones. It works as follows:
$ mlir-opt -memref-bound-check test/Transforms/memref-bound-check.mlir
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
#map0 = (d0, d1) -> (d0, d1)
#map1 = (d0, d1) -> (d0 * 128 - d1)
mlfunc @test() {
%0 = alloc() : memref<9x9xi32>
%1 = alloc() : memref<128xi32>
for %i0 = -1 to 9 {
for %i1 = -1 to 9 {
%2 = affine_apply #map0(%i0, %i1)
%3 = load %0[%2tensorflow/mlir#0, %2tensorflow/mlir#1] : memref<9x9xi32>
%4 = affine_apply #map1(%i0, %i1)
%5 = load %1[%4] : memref<128xi32>
}
}
return
}
- Improves productivity while manually / semi-automatically developing MLIR for
testing / prototyping; also provides an indirect way to catch errors in
transformations.
- This pass is an easy way to test the underlying affine analysis
machinery including low level routines.
Some code (in getMemoryRegion()) borrowed from @andydavis cl/218263256.
While on this:
- create mlir/Analysis/Passes.h; move Pass.h up from mlir/Transforms/ to mlir/
- fix a bug in AffineAnalysis.cpp::toAffineExpr
TODO: extend to non-constant loop bounds (straightforward). Will transparently
work for all accesses once floordiv, mod, ceildiv are supported in the
AffineMap -> FlatAffineConstraints conversion.
PiperOrigin-RevId: 219397961
2018-10-31 08:43:06 +08:00
// Out of bounds.
2019-07-04 01:35:03 +08:00
%v = affine. load %B [ %sym ] : memref < 111 x i32 > // expected-error {{'affine.load' op memref out of upper bound access along dimension #1}}
Introduce memref bound checking.
Introduce analysis to check memref accesses (in MLFunctions) for out of bound
ones. It works as follows:
$ mlir-opt -memref-bound-check test/Transforms/memref-bound-check.mlir
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
#map0 = (d0, d1) -> (d0, d1)
#map1 = (d0, d1) -> (d0 * 128 - d1)
mlfunc @test() {
%0 = alloc() : memref<9x9xi32>
%1 = alloc() : memref<128xi32>
for %i0 = -1 to 9 {
for %i1 = -1 to 9 {
%2 = affine_apply #map0(%i0, %i1)
%3 = load %0[%2tensorflow/mlir#0, %2tensorflow/mlir#1] : memref<9x9xi32>
%4 = affine_apply #map1(%i0, %i1)
%5 = load %1[%4] : memref<128xi32>
}
}
return
}
- Improves productivity while manually / semi-automatically developing MLIR for
testing / prototyping; also provides an indirect way to catch errors in
transformations.
- This pass is an easy way to test the underlying affine analysis
machinery including low level routines.
Some code (in getMemoryRegion()) borrowed from @andydavis cl/218263256.
While on this:
- create mlir/Analysis/Passes.h; move Pass.h up from mlir/Transforms/ to mlir/
- fix a bug in AffineAnalysis.cpp::toAffineExpr
TODO: extend to non-constant loop bounds (straightforward). Will transparently
work for all accesses once floordiv, mod, ceildiv are supported in the
AffineMap -> FlatAffineConstraints conversion.
PiperOrigin-RevId: 219397961
2018-10-31 08:43:06 +08:00
// Out of bounds.
2019-07-04 01:35:03 +08:00
affine. store %v , %B [ %minusone ] : memref < 111 x i32 > // expected-error {{'affine.store' op memref out of lower bound access along dimension #1}}
Introduce memref bound checking.
Introduce analysis to check memref accesses (in MLFunctions) for out of bound
ones. It works as follows:
$ mlir-opt -memref-bound-check test/Transforms/memref-bound-check.mlir
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:10:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#2
%x = load %A[%idxtensorflow/mlir#0, %idxtensorflow/mlir#1] : memref<9 x 9 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of upper bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
/tmp/single.mlir:12:12: error: 'load' op memref out of lower bound access along dimension tensorflow/mlir#1
%y = load %B[%idy] : memref<128 x i32>
^
#map0 = (d0, d1) -> (d0, d1)
#map1 = (d0, d1) -> (d0 * 128 - d1)
mlfunc @test() {
%0 = alloc() : memref<9x9xi32>
%1 = alloc() : memref<128xi32>
for %i0 = -1 to 9 {
for %i1 = -1 to 9 {
%2 = affine_apply #map0(%i0, %i1)
%3 = load %0[%2tensorflow/mlir#0, %2tensorflow/mlir#1] : memref<9x9xi32>
%4 = affine_apply #map1(%i0, %i1)
%5 = load %1[%4] : memref<128xi32>
}
}
return
}
- Improves productivity while manually / semi-automatically developing MLIR for
testing / prototyping; also provides an indirect way to catch errors in
transformations.
- This pass is an easy way to test the underlying affine analysis
machinery including low level routines.
Some code (in getMemoryRegion()) borrowed from @andydavis cl/218263256.
While on this:
- create mlir/Analysis/Passes.h; move Pass.h up from mlir/Transforms/ to mlir/
- fix a bug in AffineAnalysis.cpp::toAffineExpr
TODO: extend to non-constant loop bounds (straightforward). Will transparently
work for all accesses once floordiv, mod, ceildiv are supported in the
AffineMap -> FlatAffineConstraints conversion.
PiperOrigin-RevId: 219397961
2018-10-31 08:43:06 +08:00
}
return
}
2018-11-02 06:41:08 +08:00
2019-01-03 02:20:00 +08:00
// CHECK-LABEL: func @test_mod_floordiv_ceildiv
func @test_mod_floordiv_ceildiv ( ) {
2018-11-02 06:41:08 +08:00
%zero = constant 0 : index
%A = alloc( ) : memref < 128 x 64 x 64 x i32 >
2019-03-26 01:14:34 +08:00
affine. for %i = 0 to 256 {
affine. for %j = 0 to 256 {
2020-01-14 05:12:37 +08:00
%idx0 = affine. apply affine_map< ( d0, d1, d2) -> ( d0 mod 128 + 1 ) > ( %i , %j , %j )
%idx1 = affine. apply affine_map< ( d0, d1, d2) -> ( d1 floordiv 4 + 1 ) > ( %i , %j , %j )
%idx2 = affine. apply affine_map< ( d0, d1, d2) -> ( d2 ceildiv 4 ) > ( %i , %j , %j )
2019-07-04 01:35:03 +08:00
%x = affine. load %A [ %idx0 , %idx1 , %idx2 ] : memref < 128 x 64 x 64 x i32 >
// expected-error@-1 {{'affine.load' op memref out of upper bound access along dimension #1}}
// expected-error@-2 {{'affine.load' op memref out of upper bound access along dimension #2}}
// expected-error@-3 {{'affine.load' op memref out of upper bound access along dimension #3}}
2020-01-14 05:12:37 +08:00
%idy0 = affine. apply affine_map< ( d0, d1, d2) -> ( d0 mod 128 ) > ( %i , %j , %j )
%idy1 = affine. apply affine_map< ( d0, d1, d2) -> ( d1 floordiv 4 ) > ( %i , %j , %j )
%idy2 = affine. apply affine_map< ( d0, d1, d2) -> ( d2 ceildiv 4 - 1 ) > ( %i , %j , %j )
2019-07-04 01:35:03 +08:00
affine. store %x , %A [ %idy0 , %idy1 , %idy2 ] : memref < 128 x 64 x 64 x i32 > // expected-error {{'affine.store' op memref out of lower bound access along dimension #3}}
[MLIR] Add missing colon after CHECKs.
Reviewers: herhut
Reviewed By: herhut
Subscribers: mehdi_amini, rriddle, jpienaar, burmako, shauheen, antiagainst, nicolasvasilache, arpith-jacob, mgester, lucyrfox, liufengdb, Joonsoo, grosul1, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D77709
2020-04-08 14:59:59 +08:00
} // CHECK: }
} // CHECK: }
2018-11-02 06:41:08 +08:00
return
}
2019-01-03 02:20:00 +08:00
// CHECK-LABEL: func @test_no_out_of_bounds()
func @test_no_out_of_bounds ( ) {
2018-11-02 06:41:08 +08:00
%zero = constant 0 : index
%A = alloc( ) : memref < 257 x 256 x i32 >
%C = alloc( ) : memref < 257 x i32 >
%B = alloc( ) : memref < 1 x i32 >
2019-03-26 01:14:34 +08:00
affine. for %i = 0 to 256 {
affine. for %j = 0 to 256 {
2018-11-02 06:41:08 +08:00
// All of these accesses are in bound; check that no errors are emitted.
2019-07-10 01:40:29 +08:00
// CHECK: %{{.*}} = affine.apply {{#map.*}}(%{{.*}}, %{{.*}})
// CHECK-NEXT: %{{.*}} = affine.load %{{.*}}[%{{.*}}, %{{.*}}] : memref<257x256xi32>
// CHECK-NEXT: %{{.*}} = affine.apply {{#map.*}}(%{{.*}}, %{{.*}})
// CHECK-NEXT: %{{.*}} = affine.load %{{.*}}[%{{.*}}] : memref<1xi32>
2020-01-14 05:12:37 +08:00
%idx0 = affine. apply affine_map< ( d0, d1) -> ( 64 * ( d0 ceildiv 64 ) ) > ( %i , %j )
2018-11-02 06:41:08 +08:00
// Without GCDTightenInequalities(), the upper bound on the region
// accessed along first memref dimension would have come out as d0 <= 318
// (instead of d0 <= 256), and led to a false positive out of bounds.
2019-07-04 01:35:03 +08:00
%x = affine. load %A [ %idx0 , %zero ] : memref < 257 x 256 x i32 >
2020-01-14 05:12:37 +08:00
%idy = affine. apply affine_map< ( d0, d1) -> ( d0 floordiv 256 ) > ( %i , %i )
2019-07-04 01:35:03 +08:00
%y = affine. load %B [ %idy ] : memref < 1 x i32 >
[MLIR] Add missing colon after CHECKs.
Reviewers: herhut
Reviewed By: herhut
Subscribers: mehdi_amini, rriddle, jpienaar, burmako, shauheen, antiagainst, nicolasvasilache, arpith-jacob, mgester, lucyrfox, liufengdb, Joonsoo, grosul1, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D77709
2020-04-08 14:59:59 +08:00
} // CHECK-NEXT: }
2018-11-02 06:41:08 +08:00
}
return
}
2018-12-04 03:20:10 +08:00
2019-01-03 02:20:00 +08:00
// CHECK-LABEL: func @mod_div
func @mod_div ( ) {
2018-12-14 08:00:25 +08:00
%zero = constant 0 : index
%A = alloc( ) : memref < 128 x 64 x 64 x i32 >
2019-03-26 01:14:34 +08:00
affine. for %i = 0 to 256 {
affine. for %j = 0 to 256 {
2020-01-14 05:12:37 +08:00
%idx0 = affine. apply affine_map< ( d0, d1, d2) -> ( d0 mod 128 + 1 ) > ( %i , %j , %j )
%idx1 = affine. apply affine_map< ( d0, d1, d2) -> ( d1 floordiv 4 + 1 ) > ( %i , %j , %j )
%idx2 = affine. apply affine_map< ( d0, d1, d2) -> ( d2 ceildiv 4 ) > ( %i , %j , %j )
2019-07-04 01:35:03 +08:00
%x = affine. load %A [ %idx0 , %idx1 , %idx2 ] : memref < 128 x 64 x 64 x i32 >
// expected-error@-1 {{'affine.load' op memref out of upper bound access along dimension #1}}
// expected-error@-2 {{'affine.load' op memref out of upper bound access along dimension #2}}
// expected-error@-3 {{'affine.load' op memref out of upper bound access along dimension #3}}
2020-01-14 05:12:37 +08:00
%idy0 = affine. apply affine_map< ( d0, d1, d2) -> ( d0 mod 128 ) > ( %i , %j , %j )
%idy1 = affine. apply affine_map< ( d0, d1, d2) -> ( d1 floordiv 4 ) > ( %i , %j , %j )
%idy2 = affine. apply affine_map< ( d0, d1, d2) -> ( d2 ceildiv 4 - 1 ) > ( %i , %j , %j )
2019-07-04 01:35:03 +08:00
affine. store %x , %A [ %idy0 , %idy1 , %idy2 ] : memref < 128 x 64 x 64 x i32 > // expected-error {{'affine.store' op memref out of lower bound access along dimension #3}}
2018-12-14 08:00:25 +08:00
}
}
return
}
// Tests with nested mod's and floordiv's.
2019-01-03 02:20:00 +08:00
// CHECK-LABEL: func @mod_floordiv_nested() {
func @mod_floordiv_nested ( ) {
2018-12-14 08:00:25 +08:00
%A = alloc( ) : memref < 256 x 256 x i32 >
2019-03-26 01:14:34 +08:00
affine. for %i = 0 to 256 {
affine. for %j = 0 to 256 {
2020-01-14 05:12:37 +08:00
%idx0 = affine. apply affine_map< ( d0, d1) -> ( ( d0 mod 1024 ) floordiv 4 ) > ( %i , %j )
%idx1 = affine. apply affine_map< ( d0, d1) -> ( ( ( ( d1 mod 128 ) mod 32 ) ceildiv 4 ) * 32 ) > ( %i , %j )
2019-07-04 01:35:03 +08:00
affine. load %A [ %idx0 , %idx1 ] : memref < 256 x 256 x i32 > // expected-error {{'affine.load' op memref out of upper bound access along dimension #2}}
2018-12-14 08:00:25 +08:00
}
}
return
}
2019-01-03 02:20:00 +08:00
// CHECK-LABEL: func @test_semi_affine_bailout
func @test_semi_affine_bailout ( %N : index ) {
2018-12-04 03:20:10 +08:00
%B = alloc( ) : memref < 10 x i32 >
2019-03-26 01:14:34 +08:00
affine. for %i = 0 to 10 {
2020-01-14 05:12:37 +08:00
%idx = affine. apply affine_map< ( d0) [ s0] -> ( d0 * s0) > ( %i ) [ %N ]
2019-07-04 01:35:03 +08:00
%y = affine. load %B [ %idx ] : memref < 10 x i32 >
2019-03-01 04:07:12 +08:00
// expected-error@-1 {{getMemRefRegion: compose affine map failed}}
2018-12-04 03:20:10 +08:00
}
return
}
FlatAffineConstraints - complete TODOs: add method to remove duplicate /
trivially redundant constraints. Update projectOut to eliminate identifiers in
a more efficient order. Fix b/120801118.
- add method to remove duplicate / trivially redundant constraints from
FlatAffineConstraints (use a hashing-based approach with DenseSet)
- update projectOut to eliminate identifiers in a more efficient order
(A sequence of affine_apply's like this (from a real use case) finally exposed
the lack of the above trivial/low hanging simplifications).
for %ii = 0 to 64 {
for %jj = 0 to 9 {
%a0 = affine_apply (d0, d1) -> (d0 * (9 * 1024) + d1 * 128) (%ii, %jj)
%a1 = affine_apply (d0) ->
(d0 floordiv (2 * 3 * 3 * 128 * 128),
(d0 mod 294912) floordiv (3 * 3 * 128 * 128),
(((d0 mod 294912) mod 147456) floordiv 1152) floordiv 8,
(((d0 mod 294912) mod 147456) mod 1152) floordiv 384,
((((d0 mod 294912) mod 147456) mod 1152) mod 384) floordiv 128,
(((((d0 mod 294912) mod 147456) mod 1152) mod 384) mod 128)
floordiv 128) (%a0)
%v0 = load %in[%a1tensorflow/mlir#0, %a1tensorflow/mlir#1, %a1tensorflow/mlir#3, %a1tensorflow/mlir#4, %a1tensorflow/mlir#2, %a1tensorflow/mlir#5]
: memref<2x2x3x3x16x1xi32>
}
}
- update FlatAffineConstraints::print to print number of constraints.
PiperOrigin-RevId: 225397480
2018-12-14 02:47:09 +08:00
2019-01-03 02:20:00 +08:00
// CHECK-LABEL: func @multi_mod_floordiv
func @multi_mod_floordiv ( ) {
FlatAffineConstraints - complete TODOs: add method to remove duplicate /
trivially redundant constraints. Update projectOut to eliminate identifiers in
a more efficient order. Fix b/120801118.
- add method to remove duplicate / trivially redundant constraints from
FlatAffineConstraints (use a hashing-based approach with DenseSet)
- update projectOut to eliminate identifiers in a more efficient order
(A sequence of affine_apply's like this (from a real use case) finally exposed
the lack of the above trivial/low hanging simplifications).
for %ii = 0 to 64 {
for %jj = 0 to 9 {
%a0 = affine_apply (d0, d1) -> (d0 * (9 * 1024) + d1 * 128) (%ii, %jj)
%a1 = affine_apply (d0) ->
(d0 floordiv (2 * 3 * 3 * 128 * 128),
(d0 mod 294912) floordiv (3 * 3 * 128 * 128),
(((d0 mod 294912) mod 147456) floordiv 1152) floordiv 8,
(((d0 mod 294912) mod 147456) mod 1152) floordiv 384,
((((d0 mod 294912) mod 147456) mod 1152) mod 384) floordiv 128,
(((((d0 mod 294912) mod 147456) mod 1152) mod 384) mod 128)
floordiv 128) (%a0)
%v0 = load %in[%a1tensorflow/mlir#0, %a1tensorflow/mlir#1, %a1tensorflow/mlir#3, %a1tensorflow/mlir#4, %a1tensorflow/mlir#2, %a1tensorflow/mlir#5]
: memref<2x2x3x3x16x1xi32>
}
}
- update FlatAffineConstraints::print to print number of constraints.
PiperOrigin-RevId: 225397480
2018-12-14 02:47:09 +08:00
%A = alloc( ) : memref < 2x2x i32 >
2019-03-26 01:14:34 +08:00
affine. for %ii = 0 to 64 {
2020-01-14 05:12:37 +08:00
%idx0 = affine. apply affine_map< ( d0) -> ( ( d0 mod 147456 ) floordiv 1152 ) > ( %ii )
%idx1 = affine. apply affine_map< ( d0) -> ( ( ( d0 mod 147456 ) mod 1152 ) floordiv 384 ) > ( %ii )
2019-07-04 01:35:03 +08:00
%v = affine. load %A [ %idx0 , %idx1 ] : memref < 2x2x i32 >
FlatAffineConstraints - complete TODOs: add method to remove duplicate /
trivially redundant constraints. Update projectOut to eliminate identifiers in
a more efficient order. Fix b/120801118.
- add method to remove duplicate / trivially redundant constraints from
FlatAffineConstraints (use a hashing-based approach with DenseSet)
- update projectOut to eliminate identifiers in a more efficient order
(A sequence of affine_apply's like this (from a real use case) finally exposed
the lack of the above trivial/low hanging simplifications).
for %ii = 0 to 64 {
for %jj = 0 to 9 {
%a0 = affine_apply (d0, d1) -> (d0 * (9 * 1024) + d1 * 128) (%ii, %jj)
%a1 = affine_apply (d0) ->
(d0 floordiv (2 * 3 * 3 * 128 * 128),
(d0 mod 294912) floordiv (3 * 3 * 128 * 128),
(((d0 mod 294912) mod 147456) floordiv 1152) floordiv 8,
(((d0 mod 294912) mod 147456) mod 1152) floordiv 384,
((((d0 mod 294912) mod 147456) mod 1152) mod 384) floordiv 128,
(((((d0 mod 294912) mod 147456) mod 1152) mod 384) mod 128)
floordiv 128) (%a0)
%v0 = load %in[%a1tensorflow/mlir#0, %a1tensorflow/mlir#1, %a1tensorflow/mlir#3, %a1tensorflow/mlir#4, %a1tensorflow/mlir#2, %a1tensorflow/mlir#5]
: memref<2x2x3x3x16x1xi32>
}
}
- update FlatAffineConstraints::print to print number of constraints.
PiperOrigin-RevId: 225397480
2018-12-14 02:47:09 +08:00
}
return
}
2019-01-03 02:20:00 +08:00
// CHECK-LABEL: func @delinearize_mod_floordiv
func @delinearize_mod_floordiv ( ) {
FlatAffineConstraints - complete TODOs: add method to remove duplicate /
trivially redundant constraints. Update projectOut to eliminate identifiers in
a more efficient order. Fix b/120801118.
- add method to remove duplicate / trivially redundant constraints from
FlatAffineConstraints (use a hashing-based approach with DenseSet)
- update projectOut to eliminate identifiers in a more efficient order
(A sequence of affine_apply's like this (from a real use case) finally exposed
the lack of the above trivial/low hanging simplifications).
for %ii = 0 to 64 {
for %jj = 0 to 9 {
%a0 = affine_apply (d0, d1) -> (d0 * (9 * 1024) + d1 * 128) (%ii, %jj)
%a1 = affine_apply (d0) ->
(d0 floordiv (2 * 3 * 3 * 128 * 128),
(d0 mod 294912) floordiv (3 * 3 * 128 * 128),
(((d0 mod 294912) mod 147456) floordiv 1152) floordiv 8,
(((d0 mod 294912) mod 147456) mod 1152) floordiv 384,
((((d0 mod 294912) mod 147456) mod 1152) mod 384) floordiv 128,
(((((d0 mod 294912) mod 147456) mod 1152) mod 384) mod 128)
floordiv 128) (%a0)
%v0 = load %in[%a1tensorflow/mlir#0, %a1tensorflow/mlir#1, %a1tensorflow/mlir#3, %a1tensorflow/mlir#4, %a1tensorflow/mlir#2, %a1tensorflow/mlir#5]
: memref<2x2x3x3x16x1xi32>
}
}
- update FlatAffineConstraints::print to print number of constraints.
PiperOrigin-RevId: 225397480
2018-12-14 02:47:09 +08:00
%c0 = constant 0 : index
%in = alloc( ) : memref < 2x2x3x3x16x1x i32 >
%out = alloc( ) : memref < 64x9x i32 >
// Reshape '%in' into '%out'.
2019-03-26 01:14:34 +08:00
affine. for %ii = 0 to 64 {
affine. for %jj = 0 to 9 {
2020-01-14 05:12:37 +08:00
%a0 = affine. apply affine_map< ( d0, d1) -> ( d0 * ( 9 * 1024 ) + d1 * 128 ) > ( %ii , %jj )
%a10 = affine. apply affine_map< ( d0) ->
( d0 floordiv ( 2 * 3 * 3 * 128 * 128 ) ) > ( %a0 )
%a11 = affine. apply affine_map< ( d0) ->
( ( d0 mod 294912 ) floordiv ( 3 * 3 * 128 * 128 ) ) > ( %a0 )
%a12 = affine. apply affine_map< ( d0) ->
( ( ( ( d0 mod 294912 ) mod 147456 ) floordiv 1152 ) floordiv 8 ) > ( %a0 )
%a13 = affine. apply affine_map< ( d0) ->
( ( ( ( d0 mod 294912 ) mod 147456 ) mod 1152 ) floordiv 384 ) > ( %a0 )
%a14 = affine. apply affine_map< ( d0) ->
( ( ( ( ( d0 mod 294912 ) mod 147456 ) mod 1152 ) mod 384 ) floordiv 128 ) > ( %a0 )
%a15 = affine. apply affine_map< ( d0) ->
2019-01-25 05:04:50 +08:00
( ( ( ( ( ( d0 mod 294912 ) mod 147456 ) mod 1152 ) mod 384 ) mod 128 )
2020-01-14 05:12:37 +08:00
floordiv 128 ) > ( %a0 )
2019-07-04 01:35:03 +08:00
%v0 = affine. load %in [ %a10 , %a11 , %a13 , %a14 , %a12 , %a15 ]
FlatAffineConstraints - complete TODOs: add method to remove duplicate /
trivially redundant constraints. Update projectOut to eliminate identifiers in
a more efficient order. Fix b/120801118.
- add method to remove duplicate / trivially redundant constraints from
FlatAffineConstraints (use a hashing-based approach with DenseSet)
- update projectOut to eliminate identifiers in a more efficient order
(A sequence of affine_apply's like this (from a real use case) finally exposed
the lack of the above trivial/low hanging simplifications).
for %ii = 0 to 64 {
for %jj = 0 to 9 {
%a0 = affine_apply (d0, d1) -> (d0 * (9 * 1024) + d1 * 128) (%ii, %jj)
%a1 = affine_apply (d0) ->
(d0 floordiv (2 * 3 * 3 * 128 * 128),
(d0 mod 294912) floordiv (3 * 3 * 128 * 128),
(((d0 mod 294912) mod 147456) floordiv 1152) floordiv 8,
(((d0 mod 294912) mod 147456) mod 1152) floordiv 384,
((((d0 mod 294912) mod 147456) mod 1152) mod 384) floordiv 128,
(((((d0 mod 294912) mod 147456) mod 1152) mod 384) mod 128)
floordiv 128) (%a0)
%v0 = load %in[%a1tensorflow/mlir#0, %a1tensorflow/mlir#1, %a1tensorflow/mlir#3, %a1tensorflow/mlir#4, %a1tensorflow/mlir#2, %a1tensorflow/mlir#5]
: memref<2x2x3x3x16x1xi32>
}
}
- update FlatAffineConstraints::print to print number of constraints.
PiperOrigin-RevId: 225397480
2018-12-14 02:47:09 +08:00
: memref < 2x2x3x3x16x1x i32 >
}
}
return
}
2019-01-08 10:07:28 +08:00
// CHECK-LABEL: func @zero_d_memref
func @zero_d_memref ( %arg0 : memref < i32 > ) {
%c0 = constant 0 : i32
// A 0-d memref always has in-bound accesses!
2019-07-04 01:35:03 +08:00
affine. store %c0 , %arg0 [ ] : memref < i32 >
2019-01-08 10:07:28 +08:00
return
}
2019-01-10 02:17:05 +08:00
// CHECK-LABEL: func @out_of_bounds
func @out_of_bounds ( ) {
%in = alloc( ) : memref < 1x i32 >
%c9 = constant 9 : i32
2019-03-26 01:14:34 +08:00
affine. for %i0 = 10 to 11 {
2020-01-14 05:12:37 +08:00
%idy = affine. apply affine_map< ( d0) -> ( 100 * d0 floordiv 1000 ) > ( %i0 )
2019-07-04 01:35:03 +08:00
affine. store %c9 , %in [ %idy ] : memref < 1x i32 > // expected-error {{'affine.store' op memref out of upper bound access along dimension #1}}
2019-01-10 02:17:05 +08:00
}
return
}
2019-02-28 05:43:08 +08:00
// -----
// This test case accesses within bounds. Without removal of a certain type of
// trivially redundant constraints (those differing only in their constant
// term), the number of constraints here explodes, and this would return out of
// bounds errors conservatively due to FlatAffineConstraints::kExplosionFactor.
2020-01-14 05:12:37 +08:00
#map3 = affine_map< ( d0, d1) -> ( ( d0 * 72 + d1) floordiv 2304 + ( ( ( ( d0 * 72 + d1) mod 2304 ) mod 1152 ) mod 9 ) floordiv 3 ) >
#map4 = affine_map< ( d0, d1) -> ( ( d0 * 72 + d1) mod 2304 - ( ( ( d0 * 72 + d1) mod 2304 ) floordiv 1152 ) * 1151 - ( ( ( ( d0 * 72 + d1) mod 2304 ) mod 1152 ) floordiv 9 ) * 9 - ( ( ( ( ( d0 * 72 + d1) mod 2304 ) mod 1152 ) mod 9 ) floordiv 3 ) * 3 ) >
#map5 = affine_map< ( d0, d1) -> ( ( ( ( ( d0 * 72 + d1) mod 2304 ) mod 1152 ) floordiv 9 ) floordiv 8 ) >
2019-02-28 05:43:08 +08:00
// CHECK-LABEL: func @test_complex_mod_floordiv
func @test_complex_mod_floordiv ( %arg0 : memref < 4x4x16x1x f32 > ) {
%c0 = constant 0 : index
%0 = alloc( ) : memref < 1x2x3x3x16x1x f32 >
2019-03-26 01:14:34 +08:00
affine. for %i0 = 0 to 64 {
affine. for %i1 = 0 to 9 {
2019-02-28 05:43:08 +08:00
%2 = affine. apply #map3 ( %i0 , %i1 )
%3 = affine. apply #map4 ( %i0 , %i1 )
%4 = affine. apply #map5 ( %i0 , %i1 )
2019-07-04 01:35:03 +08:00
%5 = affine. load %arg0 [ %2 , %c0 , %4 , %c0 ] : memref < 4x4x16x1x f32 >
2019-02-28 05:43:08 +08:00
}
}
return
}
2019-03-02 01:48:22 +08:00
// -----
// The first load is within bounds, but not the second one.
2020-01-14 05:12:37 +08:00
#map0 = affine_map< ( d0) -> ( d0 mod 4 ) >
#map1 = affine_map< ( d0) -> ( d0 mod 4 + 4 ) >
2019-03-02 01:48:22 +08:00
// CHECK-LABEL: func @test_mod_bound
func @test_mod_bound ( ) {
%0 = alloc( ) : memref < 7 x f32 >
%1 = alloc( ) : memref < 6 x f32 >
2019-03-26 01:14:34 +08:00
affine. for %i0 = 0 to 4096 {
affine. for %i1 = #map0 ( %i0 ) to #map1 ( %i0 ) {
2019-07-04 01:35:03 +08:00
affine. load %0 [ %i1 ] : memref < 7 x f32 >
affine. load %1 [ %i1 ] : memref < 6 x f32 >
// expected-error@-1 {{'affine.load' op memref out of upper bound access along dimension #1}}
2019-03-02 01:48:22 +08:00
}
}
return
}
// -----
2020-01-14 05:12:37 +08:00
#map0 = affine_map< ( d0) -> ( d0 floordiv 4 ) >
#map1 = affine_map< ( d0) -> ( d0 floordiv 4 + 4 ) >
#map2 = affine_map< ( d0) -> ( 4 * ( d0 floordiv 4 ) + d0 mod 4 ) >
2019-03-02 01:48:22 +08:00
// CHECK-LABEL: func @test_floordiv_bound
func @test_floordiv_bound ( ) {
%0 = alloc( ) : memref < 1027 x f32 >
%1 = alloc( ) : memref < 1026 x f32 >
%2 = alloc( ) : memref < 4096 x f32 >
%N = constant 2048 : index
2019-03-26 01:14:34 +08:00
affine. for %i0 = 0 to 4096 {
affine. for %i1 = #map0 ( %i0 ) to #map1 ( %i0 ) {
2019-07-04 01:35:03 +08:00
affine. load %0 [ %i1 ] : memref < 1027 x f32 >
affine. load %1 [ %i1 ] : memref < 1026 x f32 >
// expected-error@-1 {{'affine.load' op memref out of upper bound access along dimension #1}}
2019-03-02 01:48:22 +08:00
}
2019-03-26 01:14:34 +08:00
affine. for %i2 = 0 to #map2 ( %N ) {
2019-03-02 01:48:22 +08:00
// Within bounds.
2019-07-04 01:35:03 +08:00
%v = affine. load %2 [ %i2 ] : memref < 4096 x f32 >
2019-03-02 01:48:22 +08:00
}
}
return
}
2019-03-13 01:52:09 +08:00
// -----
// This should not give an out of bounds error. The result of the affine.apply
// is composed into the bound map during analysis.
2020-01-14 05:12:37 +08:00
#map_lb = affine_map< ( d0) -> ( d0) >
#map_ub = affine_map< ( d0) -> ( d0 + 4 ) >
2019-03-13 01:52:09 +08:00
// CHECK-LABEL: func @non_composed_bound_operand
func @non_composed_bound_operand ( %arg0 : memref < 1024x f32 > ) {
2019-03-26 01:14:34 +08:00
affine. for %i0 = 4 to 1028 step 4 {
2020-01-14 05:12:37 +08:00
%i1 = affine. apply affine_map< ( d0) -> ( d0 - 4 ) > ( %i0 )
2019-03-26 01:14:34 +08:00
affine. for %i2 = #map_lb ( %i1 ) to #map_ub ( %i1 ) {
2019-07-04 01:35:03 +08:00
%0 = affine. load %arg0 [ %i2 ] : memref < 1024x f32 >
2019-03-13 01:52:09 +08:00
}
}
return
}
2020-06-14 03:20:26 +08:00
// CHECK-LABEL: func @zero_d_memref
func @zero_d_memref ( ) {
%Z = alloc( ) : memref < f32 >
affine. for %i = 0 to 100 {
affine. load %Z [ ] : memref < f32 >
}
return
}