[mlir] vector.type_cast: disallow memrefs with layout in verifier

Summary:
These are not supported by any of the code using `type_cast`. In the general
case, such casting would require memrefs to handle a non-contiguous vector
representation or misaligned vectors (e.g., if the offset of the source memref
is not divisible by vector size, since offset in the target memref is expressed
in the number of elements).

Differential Revision: https://reviews.llvm.org/D76349
This commit is contained in:
Alex Zinenko 2020-03-18 10:59:54 +01:00
parent 4b0f1e12c2
commit bc18624b40
2 changed files with 11 additions and 0 deletions

View File

@ -1483,6 +1483,10 @@ static void print(OpAsmPrinter &p, TypeCastOp op) {
}
static LogicalResult verify(TypeCastOp op) {
MemRefType canonicalType = canonicalizeStridedLayout(op.getMemRefType());
if (!canonicalType.getAffineMaps().empty())
return op.emitOpError("expects operand to be a memref with no layout");
auto resultType = inferVectorTypeCastResultType(op.getMemRefType());
if (op.getResultMemRefType() != resultType)
return op.emitOpError("expects result type to be: ") << resultType;

View File

@ -1046,3 +1046,10 @@ func @reduce_unsupported_rank(%arg0: vector<4x16xf32>) -> f32 {
// expected-error@+1 {{'vector.reduction' op unsupported reduction rank: 2}}
%0 = vector.reduction "add", %arg0 : vector<4x16xf32> into f32
}
// -----
func @type_cast_layout(%arg0: memref<4x3xf32, affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s0 + d1 * s1 + s2)>>) {
// expected-error@+1 {{expects operand to be a memref with no layout}}
%0 = vector.type_cast %arg0: memref<4x3xf32, affine_map<(d0, d1)[s0, s1, s2] -> (d0 * s0 + d1 * s1 + s2)>> to memref<vector<4x3xf32>>
}