forked from OSchip/llvm-project
[mlir][sparse] emergency fix after constant -> arith.constant change
Reviewed By: Mogball Differential Revision: https://reviews.llvm.org/D111743
This commit is contained in:
parent
92bec0e970
commit
a652e5b53a
|
@ -225,9 +225,9 @@ static LogicalResult verify(InitOp op) {
|
||||||
for (unsigned i = 0; i < rank; i++) {
|
for (unsigned i = 0; i < rank; i++) {
|
||||||
if (shape[i] == ShapedType::kDynamicSize)
|
if (shape[i] == ShapedType::kDynamicSize)
|
||||||
continue;
|
continue;
|
||||||
auto constantOp = op.sizes()[i].getDefiningOp<ConstantOp>();
|
auto constantOp = op.sizes()[i].getDefiningOp<arith::ConstantOp>();
|
||||||
if (!constantOp ||
|
if (!constantOp ||
|
||||||
constantOp.getValue().cast<IntegerAttr>().getInt() != shape[i])
|
constantOp.value().cast<IntegerAttr>().getInt() != shape[i])
|
||||||
return op.emitError("unexpected mismatch with static dimension size ")
|
return op.emitError("unexpected mismatch with static dimension size ")
|
||||||
<< shape[i];
|
<< shape[i];
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,8 +37,8 @@ func @invalid_init_rank(%arg0: index) -> tensor<?xf32, #SparseVector> {
|
||||||
#SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}>
|
#SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}>
|
||||||
|
|
||||||
func @invalid_init_size() -> tensor<?x10xf32, #SparseMatrix> {
|
func @invalid_init_size() -> tensor<?x10xf32, #SparseMatrix> {
|
||||||
%c10 = constant 10 : index
|
%c10 = arith.constant 10 : index
|
||||||
%c20 = constant 20 : index
|
%c20 = arith.constant 20 : index
|
||||||
// expected-error@+1 {{unexpected mismatch with static dimension size 10}}
|
// expected-error@+1 {{unexpected mismatch with static dimension size 10}}
|
||||||
%0 = sparse_tensor.init [%c10, %c20] : tensor<?x10xf32, #SparseMatrix>
|
%0 = sparse_tensor.init [%c10, %c20] : tensor<?x10xf32, #SparseMatrix>
|
||||||
return %0 : tensor<?x10xf32, #SparseMatrix>
|
return %0 : tensor<?x10xf32, #SparseMatrix>
|
||||||
|
|
|
@ -16,13 +16,13 @@ func @sparse_new(%arg0: !llvm.ptr<i8>) -> tensor<128xf64, #SparseVector> {
|
||||||
#SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}>
|
#SparseMatrix = #sparse_tensor.encoding<{dimLevelType = ["compressed", "compressed"]}>
|
||||||
|
|
||||||
// CHECK-LABEL: func @sparse_init()
|
// CHECK-LABEL: func @sparse_init()
|
||||||
// CHECK-DAG: %[[C16:.*]] = constant 16 : index
|
// CHECK-DAG: %[[C16:.*]] = arith.constant 16 : index
|
||||||
// CHECK-DAG: %[[C32:.*]] = constant 32 : index
|
// CHECK-DAG: %[[C32:.*]] = arith.constant 32 : index
|
||||||
// CHECK: %[[T:.*]] = sparse_tensor.init[%[[C16]], %[[C32]]] : tensor<?x32xf64, #{{.*}}>
|
// CHECK: %[[T:.*]] = sparse_tensor.init[%[[C16]], %[[C32]]] : tensor<?x32xf64, #{{.*}}>
|
||||||
// CHECK: return %[[T]] : tensor<?x32xf64, #{{.*}}>
|
// CHECK: return %[[T]] : tensor<?x32xf64, #{{.*}}>
|
||||||
func @sparse_init() -> tensor<?x32xf64, #SparseMatrix> {
|
func @sparse_init() -> tensor<?x32xf64, #SparseMatrix> {
|
||||||
%d1 = constant 16 : index
|
%d1 = arith.constant 16 : index
|
||||||
%d2 = constant 32 : index
|
%d2 = arith.constant 32 : index
|
||||||
%0 = sparse_tensor.init [%d1, %d2] : tensor<?x32xf64, #SparseMatrix>
|
%0 = sparse_tensor.init [%d1, %d2] : tensor<?x32xf64, #SparseMatrix>
|
||||||
return %0 : tensor<?x32xf64, #SparseMatrix>
|
return %0 : tensor<?x32xf64, #SparseMatrix>
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue