forked from OSchip/llvm-project
Disallow zero dimensions in vectors and memrefs
Aggregate types where at least one dimension is zero do not fully make sense as they cannot contain any values (their total size is zero). However, TensorFlow and XLA support tensors with zero sizes, so we must support those too. This is relatively safe since, unlike vectors and memrefs, we don't have first-class element accessors for MLIR tensors. To support sparse element attributes of vector types that have no non-zero elements, make sure that index and value element attributes have tensor type so that we never need to create a zero vector type internally. Note that this is already consistent with the inline documentation of the sparse elements attribute. Users of the sparse elements attribute should not rely on the storage schema anyway. PiperOrigin-RevId: 232896707
This commit is contained in:
parent
99b19c1d20
commit
36c0516c78
|
@ -678,7 +678,8 @@ of tensor type.
|
|||
Note: hexadecimal integer literals are not allowed in tensor type declarations
|
||||
to avoid confusion between `0xf32` and `0 x f32`. Zero sizes are allowed in
|
||||
tensors and treated as other sizes, e.g., `tensor<0 x 1 x i32>` and `tensor<1 x
|
||||
0 x i32>` are different types.
|
||||
0 x i32>` are different types. Since zero sizes are not allowed in other types,
|
||||
such tensors should be optimized away before lowering tensors to memrefs.
|
||||
|
||||
Examples:
|
||||
|
||||
|
|
|
@ -444,12 +444,13 @@ public:
|
|||
///
|
||||
/// This class uses COO (coordinate list) encoding to represent the sparse
|
||||
/// elements in an element attribute. Specifically, the sparse vector/tensor
|
||||
/// stores the indices and values as two separate dense elements attributes. The
|
||||
/// dense elements attribute indices is a 2-D tensor of 64-bit integer elements
|
||||
/// with shape [N, ndims], which specifies the indices of the elements in the
|
||||
/// sparse tensor that contains nonzero values. The dense elements attribute
|
||||
/// values is a 1-D tensor with shape [N], and it supplies the corresponding
|
||||
/// values for the indices.
|
||||
/// stores the indices and values as two separate dense elements attributes of
|
||||
/// tensor type (even if the sparse attribute is of vector type, in order to
|
||||
/// support empty lists). The dense elements attribute indices is a 2-D tensor
|
||||
/// of 64-bit integer elements with shape [N, ndims], which specifies the
|
||||
/// indices of the elements in the sparse tensor that contains nonzero values.
|
||||
/// The dense elements attribute values is a 1-D tensor with shape [N], and it
|
||||
/// supplies the corresponding values for the indices.
|
||||
///
|
||||
/// For example,
|
||||
/// `sparse<tensor<3x4xi32>, [[0, 0], [1, 2]], [1, 5]>` represents tensor
|
||||
|
|
|
@ -212,9 +212,10 @@ bool VectorType::verifyConstructionInvariants(llvm::Optional<Location> loc,
|
|||
return true;
|
||||
}
|
||||
|
||||
if (any_of(shape, [](int64_t i) { return i < 0; })) {
|
||||
if (any_of(shape, [](int64_t i) { return i <= 0; })) {
|
||||
if (loc)
|
||||
context->emitError(*loc, "vector types must have static shape");
|
||||
context->emitError(*loc,
|
||||
"vector types must have positive constant sizes");
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -257,6 +258,13 @@ RankedTensorType RankedTensorType::getChecked(ArrayRef<int64_t> shape,
|
|||
bool RankedTensorType::verifyConstructionInvariants(
|
||||
llvm::Optional<Location> loc, MLIRContext *context, ArrayRef<int64_t> shape,
|
||||
Type elementType) {
|
||||
for (int64_t s : shape) {
|
||||
if (s < -1) {
|
||||
if (loc)
|
||||
context->emitError(*loc, "invalid tensor dimension size");
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return checkTensorElementType(loc, context, elementType);
|
||||
}
|
||||
|
||||
|
@ -298,6 +306,15 @@ MemRefType MemRefType::getImpl(ArrayRef<int64_t> shape, Type elementType,
|
|||
Optional<Location> location) {
|
||||
auto *context = elementType.getContext();
|
||||
|
||||
for (int64_t s : shape) {
|
||||
// Negative sizes are not allowed except for `-1` that means dynamic size.
|
||||
if (s <= 0 && s != -1) {
|
||||
if (location)
|
||||
context->emitError(*location, "invalid memref size");
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
// Check that the structure of the composition is valid, i.e. that each
|
||||
// subsequent affine map has as many inputs as the previous map has results.
|
||||
// Take the dimensionality of the MemRef for the first map.
|
||||
|
|
|
@ -207,7 +207,7 @@ public:
|
|||
ParseResult parseAffineMapOrIntegerSetReference(AffineMap &map,
|
||||
IntegerSet &set);
|
||||
DenseElementsAttr parseDenseElementsAttr(VectorOrTensorType type);
|
||||
DenseElementsAttr parseDenseElementsAttr(Type eltType, bool isVector);
|
||||
DenseElementsAttr parseDenseElementsAttrAsTensor(Type eltType);
|
||||
VectorOrTensorType parseVectorOrTensorType();
|
||||
|
||||
// Location Parsing.
|
||||
|
@ -1124,8 +1124,7 @@ Attribute Parser::parseAttribute(Type type) {
|
|||
case Token::l_square: {
|
||||
/// Parse indices
|
||||
auto indicesEltType = builder.getIntegerType(64);
|
||||
auto indices =
|
||||
parseDenseElementsAttr(indicesEltType, type.isa<VectorType>());
|
||||
auto indices = parseDenseElementsAttrAsTensor(indicesEltType);
|
||||
if (!indices)
|
||||
return nullptr;
|
||||
|
||||
|
@ -1134,8 +1133,7 @@ Attribute Parser::parseAttribute(Type type) {
|
|||
|
||||
/// Parse values.
|
||||
auto valuesEltType = type.getElementType();
|
||||
auto values =
|
||||
parseDenseElementsAttr(valuesEltType, type.isa<VectorType>());
|
||||
auto values = parseDenseElementsAttrAsTensor(valuesEltType);
|
||||
if (!values)
|
||||
return nullptr;
|
||||
|
||||
|
@ -1188,19 +1186,14 @@ Attribute Parser::parseAttribute(Type type) {
|
|||
/// | float-literal
|
||||
/// | `[` (attribute-value (`,` attribute-value)*)? `]`
|
||||
///
|
||||
/// This method returns a constructed dense elements attribute with the shape
|
||||
/// from the parsing result.
|
||||
DenseElementsAttr Parser::parseDenseElementsAttr(Type eltType, bool isVector) {
|
||||
/// This method returns a constructed dense elements attribute of tensor type
|
||||
/// with the shape from the parsing result.
|
||||
DenseElementsAttr Parser::parseDenseElementsAttrAsTensor(Type eltType) {
|
||||
TensorLiteralParser literalParser(*this, eltType);
|
||||
if (literalParser.parse())
|
||||
return nullptr;
|
||||
|
||||
VectorOrTensorType type;
|
||||
if (isVector) {
|
||||
type = builder.getVectorType(literalParser.getShape(), eltType);
|
||||
} else {
|
||||
type = builder.getTensorType(literalParser.getShape(), eltType);
|
||||
}
|
||||
auto type = builder.getTensorType(literalParser.getShape(), eltType);
|
||||
return builder.getDenseElementsAttr(type, literalParser.getValues())
|
||||
.cast<DenseElementsAttr>();
|
||||
}
|
||||
|
|
|
@ -837,3 +837,53 @@ func @missing_for_min(%arg0: index, %arg1: index, %arg2: memref<100xf32>) {
|
|||
}
|
||||
return
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
// expected-error @+1 {{vector types must have positive constant sizes}}
|
||||
func @zero_vector_type() -> vector<0xi32>
|
||||
|
||||
// -----
|
||||
|
||||
// expected-error @+1 {{vector types must have positive constant sizes}}
|
||||
func @zero_in_vector_type() -> vector<1x0xi32>
|
||||
|
||||
// -----
|
||||
|
||||
// expected-error @+1 {{invalid memref size}}
|
||||
func @zero_memref_type() -> memref<0xi32>
|
||||
|
||||
// -----
|
||||
|
||||
// expected-error @+1 {{invalid memref size}}
|
||||
func @zero_in_memref_type() -> memref<1x0xi32>
|
||||
|
||||
// -----
|
||||
|
||||
// expected-error @+1 {{expected dimension size in vector type}}
|
||||
func @negative_vector_size() -> vector<-1xi32>
|
||||
|
||||
// -----
|
||||
|
||||
// expected-error @+1 {{expected non-function type}}
|
||||
func @negative_in_vector_size() -> vector<1x-1xi32>
|
||||
|
||||
// -----
|
||||
|
||||
// expected-error @+1 {{expected non-function type}}
|
||||
func @negative_memref_size() -> memref<-1xi32>
|
||||
|
||||
// -----
|
||||
|
||||
// expected-error @+1 {{expected non-function type}}
|
||||
func @negative_in_memref_size() -> memref<1x-1xi32>
|
||||
|
||||
// -----
|
||||
|
||||
// expected-error @+1 {{expected non-function type}}
|
||||
func @negative_tensor_size() -> tensor<-1xi32>
|
||||
|
||||
// -----
|
||||
|
||||
// expected-error @+1 {{expected non-function type}}
|
||||
func @negative_in_tensor_size() -> tensor<1x-1xi32>
|
||||
|
|
|
@ -670,19 +670,11 @@ func @densevectorattr() -> () {
|
|||
// CHECK: "fooi64"() {bar: dense<vector<1x1x1xi64>, {{\[\[\[}}-5]]]>} : () -> ()
|
||||
"fooi64"(){bar: dense<vector<1x1x1xi64>, [[[-5]]]>} : () -> ()
|
||||
|
||||
// CHECK: "foo2"() {bar: dense<vector<0xi32>, []>} : () -> ()
|
||||
"foo2"(){bar: dense<vector<0xi32>, []>} : () -> ()
|
||||
// CHECK: "foo2"() {bar: dense<vector<1x0xi32>, {{\[\[}}]]>} : () -> ()
|
||||
"foo2"(){bar: dense<vector<1x0xi32>, [[]]>} : () -> ()
|
||||
// CHECK: "foo3"() {bar: dense<vector<2x1x4xi32>, {{\[\[\[}}5, -6, 1, 2]], {{\[\[}}7, 8, 3, 4]]]>} : () -> ()
|
||||
"foo3"(){bar: dense<vector<2x1x4xi32>, [[[5, -6, 1, 2]], [[7, 8, 3, 4]]]>} : () -> ()
|
||||
|
||||
// CHECK: "float1"() {bar: dense<vector<1x1x1xf32>, {{\[\[\[}}5.000000e+00]]]>} : () -> ()
|
||||
"float1"(){bar: dense<vector<1x1x1xf32>, [[[5.0]]]>} : () -> ()
|
||||
// CHECK: "float2"() {bar: dense<vector<0xf32>, []>} : () -> ()
|
||||
"float2"(){bar: dense<vector<0xf32>, []>} : () -> ()
|
||||
// CHECK: "float2"() {bar: dense<vector<1x0xf32>, {{\[\[}}]]>} : () -> ()
|
||||
"float2"(){bar: dense<vector<1x0xf32>, [[]]>} : () -> ()
|
||||
|
||||
// CHECK: "bfloat16"() {bar: dense<vector<2x1x4xbf16>, {{\[\[\[}}-5.000000e+00, 6.000000e+00, 1.000000e+00, 2.000000e+00]], {{\[\[}}7.000000e+00, -8.000000e+00, 3.000000e+00, 4.000000e+00]]]>} : () -> ()
|
||||
"bfloat16"(){bar: dense<vector<2x1x4xbf16>, [[[-5.0, 6.0, 1.0, 2.0]], [[7.0, -8.0, 3.0, 4.0]]]>} : () -> ()
|
||||
|
@ -735,19 +727,13 @@ func @sparsevectorattr() -> () {
|
|||
"fooi32"(){bar: sparse<vector<1x1xi32>, [], []>} : () -> ()
|
||||
// CHECK: "fooi64"() {bar: sparse<vector<1xi64>, {{\[\[}}0]], {{\[}}-1]>} : () -> ()
|
||||
"fooi64"(){bar: sparse<vector<1xi64>, [[0]], [-1]>} : () -> ()
|
||||
// CHECK: "foo2"() {bar: sparse<vector<0xi32>, {{\[}}], {{\[}}]>} : () -> ()
|
||||
"foo2"(){bar: sparse<vector<0xi32>, [], []>} : () -> ()
|
||||
|
||||
// CHECK: "foof16"() {bar: sparse<vector<1x1x1xf16>, {{\[\[}}0, 0, 0]], {{\[}}-2.000000e+00]>} : () -> ()
|
||||
"foof16"(){bar: sparse<vector<1x1x1xf16>, [[0, 0, 0]], [-2.0]>} : () -> ()
|
||||
// CHECK: "foobf16"() {bar: sparse<vector<2x2x2xbf16>, {{\[\[}}1, 1, 0], {{\[}}0, 1, 0], {{\[}}0, 0, 1]], {{\[}}2.000000e+00, -1.000000e+00, 5.000000e+00]>} : () -> ()
|
||||
"foobf16"(){bar: sparse<vector<2x2x2xbf16>, [[1, 1, 0], [0, 1, 0], [0, 0, 1]], [2.0, -1.0, 5.0]>} : () -> ()
|
||||
// CHECK: "foof32"() {bar: sparse<vector<1x0x1xf32>, {{\[}}], {{\[}}]>} : () -> ()
|
||||
"foof32"(){bar: sparse<vector<1x0x1xf32>, [], []>} : () -> ()
|
||||
// CHECK: "foof64"() {bar: sparse<vector<1xf64>, {{\[\[}}0]], {{\[}}-1.000000e+00]>} : () -> ()
|
||||
"foof64"(){bar: sparse<vector<1xf64>, [[0]], [-1.0]>} : () -> ()
|
||||
// CHECK: "foof320"() {bar: sparse<vector<0xf32>, {{\[}}], {{\[}}]>} : () -> ()
|
||||
"foof320"(){bar: sparse<vector<0xf32>, [], []>} : () -> ()
|
||||
return
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue