Remove empty AffineExpr stride canonicalization in makeCanonicalStridedLayoutExpr

The "optimization" would replace the AffineMap for an empty shape with a 0 to represent its indexing (stride * dimension) logic. Meanwhile other pieces of core logic (such as getStridesAndOffset and makeStridedLinearLayoutMap) require strides for all dimensions to ensure no aliasing can occur which would occur if the shape was not empty. For now, this optimization is removed as different pieces of core types disagree on this, so the optimization should be caller supplied or should be consistent throughout the infrastructure.

Differential Revision: https://reviews.llvm.org/D130772
This commit is contained in:
Tres Popp 2022-07-29 15:04:29 +02:00
parent facb3ac385
commit 984d1bf8c0
2 changed files with 3 additions and 6 deletions

View File

@ -988,7 +988,7 @@ AffineExpr mlir::makeCanonicalStridedLayoutExpr(ArrayRef<int64_t> sizes,
ArrayRef<AffineExpr> exprs,
MLIRContext *context) {
// Size 0 corner case is useful for canonicalizations.
if (sizes.empty() || llvm::is_contained(sizes, 0))
if (sizes.empty())
return getAffineConstantExpr(0, context);
assert(!exprs.empty() && "expected exprs");
@ -1001,9 +1001,6 @@ AffineExpr mlir::makeCanonicalStridedLayoutExpr(ArrayRef<int64_t> sizes,
int64_t runningSize = 1;
for (auto en : llvm::zip(llvm::reverse(exprs), llvm::reverse(sizes))) {
int64_t size = std::get<1>(en);
// Degenerate case, no size =-> no stride
if (size == 0)
continue;
AffineExpr dimExpr = std::get<0>(en);
AffineExpr stride = dynamicPoisonBit
? getAffineSymbolExpr(nSymbols++, context)

View File

@ -99,11 +99,11 @@ func.func @view_empty_memref(%offset: index, %mem: memref<0xi8>) {
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: llvm.mlir.constant(4 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: llvm.mlir.constant(0 : index) : i64
// CHECK: llvm.mlir.constant(1 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: llvm.mlir.constant(0 : index) : i64
// CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: llvm.mlir.constant(0 : index) : i64
// CHECK: llvm.mlir.constant(4 : index) : i64
// CHECK: = llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
%0 = memref.view %mem[%offset][] : memref<0xi8> to memref<0x4xf32>