[mlir] Require std.alloc() ops to have canonical layout during LLVM lowering.

The current code allows strided layouts, but the number of elements allocated is ambiguous. It could be either the number of elements in the shape (the current implementation), or the amount of elements required to not index out-of-bounds with the given maps (which would require evaluating the layout map).

If we require the canonical layouts, the two will be the same.

Reviewed By: nicolasvasilache, ftynse

Differential Revision: https://reviews.llvm.org/D91523
This commit is contained in:
Christian Sigg 2020-11-16 11:41:59 +01:00
parent 6c02835d6c
commit 04481f26fa
2 changed files with 1 additions and 21 deletions

View File

@ -1987,21 +1987,7 @@ private:
LogicalResult match(Operation *op) const override {
MemRefType memRefType = getMemRefResultType(op);
if (isSupportedMemRefType(memRefType))
return success();
int64_t offset;
SmallVector<int64_t, 4> strides;
if (failed(getStridesAndOffset(memRefType, strides, offset)))
return failure();
// Dynamic strides are ok if they can be deduced from dynamic sizes (which
// is guaranteed when getStridesAndOffset succeeded. Dynamic offset however
// can never be alloc'ed.
if (offset == MemRefType::getDynamicStrideOrOffset())
return failure();
return success();
return success(isSupportedMemRefType(memRefType));
}
// An `alloc` is converted into a definition of a memref descriptor value and

View File

@ -10,12 +10,6 @@ func @address_space(%arg0 : memref<32xf32, affine_map<(d0) -> (d0)>, 7>) {
std.return
}
// CHECK-LABEL: func @strided_memref(
func @strided_memref(%ind: index) {
%0 = alloc()[%ind] : memref<32x64xf32, affine_map<(i, j)[M] -> (32 + M * i + j)>>
std.return
}
// -----
// CHECK-LABEL: func @rsqrt(