[mlir][sparse][nfc] Use tensor.generate in sparse integration tests

Currently, dense tensors are initialized in Sparse Integration tests using
"buffer.tensor_alloc and scf.for" . This makes code harder to read and maintain.
This diff uses tensor.generate instead to initialize dense tensors.

Testing: Ran integration tests after building with -DLLVM_USE_SANITIZER=Address flag.

Reviewed By: springerm

Differential Revision: https://reviews.llvm.org/D131404
This commit is contained in:
Rajas Vanjape 2022-08-08 15:23:36 +00:00
parent c1f65df19c
commit 333f98b4b6
4 changed files with 61 additions and 81 deletions

View File

@ -75,18 +75,17 @@ module {
%a = sparse_tensor.new %fileName : !Filename to tensor<?x?xi32, #SparseMatrix>
// Initialize dense vectors.
%init_256 = bufferization.alloc_tensor(%c256) : tensor<?xi32>
%b = scf.for %i = %c0 to %c256 step %c1 iter_args(%t = %init_256) -> tensor<?xi32> {
%b = tensor.generate %c256 {
^bb0(%i : index):
%k = arith.addi %i, %c1 : index
%j = arith.index_cast %k : index to i32
%t2 = tensor.insert %j into %t[%i] : tensor<?xi32>
scf.yield %t2 : tensor<?xi32>
}
%init_4 = bufferization.alloc_tensor(%c4) : tensor<?xi32>
%x = scf.for %i = %c0 to %c4 step %c1 iter_args(%t = %init_4) -> tensor<?xi32> {
%t2 = tensor.insert %i0 into %t[%i] : tensor<?xi32>
scf.yield %t2 : tensor<?xi32>
}
tensor.yield %j : i32
} : tensor<?xi32>
%x = tensor.generate %c4 {
^bb0(%i : index):
tensor.yield %i0 : i32
} : tensor<?xi32>
// Call kernel.
%0 = call @kernel_matvec(%a, %b, %x)

View File

@ -82,42 +82,30 @@ module {
%lsz = tensor.dim %b, %cst2 : tensor<?x?x?xf64, #SparseTensor>
// Initialize dense input matrix C.
%c0 = bufferization.alloc_tensor(%ksz, %jsz) : tensor<?x?xf64>
%c = scf.for %k = %cst0 to %ksz step %cst1 iter_args(%c1 = %c0) -> tensor<?x?xf64> {
%c2 = scf.for %j = %cst0 to %jsz step %cst1 iter_args(%c3 = %c1) -> tensor<?x?xf64> {
%k0 = arith.muli %k, %jsz : index
%k1 = arith.addi %k0, %j : index
%k2 = arith.index_cast %k1 : index to i32
%kf = arith.sitofp %k2 : i32 to f64
%c4 = tensor.insert %kf into %c3[%k, %j] : tensor<?x?xf64>
scf.yield %c4 : tensor<?x?xf64>
}
scf.yield %c2 : tensor<?x?xf64>
}
%c = tensor.generate %ksz, %jsz {
^bb0(%k : index, %j : index):
%k0 = arith.muli %k, %jsz : index
%k1 = arith.addi %k0, %j : index
%k2 = arith.index_cast %k1 : index to i32
%kf = arith.sitofp %k2 : i32 to f64
tensor.yield %kf : f64
} : tensor<?x?xf64>
// Initialize dense input matrix D.
%d0 = bufferization.alloc_tensor(%lsz, %jsz) : tensor<?x?xf64>
%d = scf.for %l = %cst0 to %lsz step %cst1 iter_args(%d1 = %d0) -> tensor<?x?xf64> {
%d2 = scf.for %j = %cst0 to %jsz step %cst1 iter_args(%d3 = %d1) -> tensor<?x?xf64> {
%k0 = arith.muli %l, %jsz : index
%k1 = arith.addi %k0, %j : index
%k2 = arith.index_cast %k1 : index to i32
%kf = arith.sitofp %k2 : i32 to f64
%d4 = tensor.insert %kf into %d3[%l, %j] : tensor<?x?xf64>
scf.yield %d4 : tensor<?x?xf64>
}
scf.yield %d2 : tensor<?x?xf64>
}
%d = tensor.generate %lsz, %jsz {
^bb0(%l : index, %j : index):
%k0 = arith.muli %l, %jsz : index
%k1 = arith.addi %k0, %j : index
%k2 = arith.index_cast %k1 : index to i32
%kf = arith.sitofp %k2 : i32 to f64
tensor.yield %kf : f64
} : tensor<?x?xf64>
// Initialize dense output matrix A.
%a0 = bufferization.alloc_tensor(%isz, %jsz) : tensor<?x?xf64>
%a = scf.for %i = %cst0 to %isz step %cst1 iter_args(%a1 = %a0) -> tensor<?x?xf64> {
%a2 = scf.for %j = %cst0 to %jsz step %cst1 iter_args(%a3 = %a1) -> tensor<?x?xf64> {
%a4 = tensor.insert %f0 into %a3[%i, %j] : tensor<?x?xf64>
scf.yield %a4 : tensor<?x?xf64>
}
scf.yield %a2 : tensor<?x?xf64>
}
%a = tensor.generate %isz, %jsz {
^bb0(%i : index, %j: index):
tensor.yield %f0 : f64
} : tensor<?x?xf64>
// Call kernel.
%0 = call @kernel_mttkrp(%b, %c, %d, %a)

View File

@ -72,27 +72,27 @@ module {
%c5 = arith.constant 5 : index
%c10 = arith.constant 10 : index
// Setup memory for the dense matrices and initialize.
%a0 = bufferization.alloc_tensor(%c5, %c10) : tensor<?x?xf32>
%b0 = bufferization.alloc_tensor(%c10, %c5) : tensor<?x?xf32>
%x0 = bufferization.alloc_tensor(%c5, %c5) : tensor<?x?xf32>
%a, %b, %x = scf.for %i = %c0 to %c5 step %c1 iter_args(%a1 = %a0, %b1 = %b0, %x1 = %x0)
-> (tensor<?x?xf32>, tensor<?x?xf32>, tensor<?x?xf32>) {
%x2 = scf.for %j = %c0 to %c5 step %c1 iter_args(%x3 = %x1) -> (tensor<?x?xf32>) {
%x4 = tensor.insert %d0 into %x3[%i, %j] : tensor<?x?xf32>
scf.yield %x4 : tensor<?x?xf32>
}
// Initialize dense matrices.
%x = tensor.generate %c5, %c5 {
^bb0(%i : index, %j : index):
tensor.yield %d0 : f32
} : tensor<?x?xf32>
%a = tensor.generate %c5, %c10 {
^bb0(%i: index, %j: index):
%p = arith.addi %i, %c1 : index
%q = arith.index_cast %p : index to i32
%d = arith.sitofp %q : i32 to f32
%a2, %b2 = scf.for %j = %c0 to %c10 step %c1 iter_args(%a3 = %a1, %b3 = %b1)
-> (tensor<?x?xf32>, tensor<?x?xf32>) {
%a4 = tensor.insert %d into %a3[%i, %j] : tensor<?x?xf32>
%b4 = tensor.insert %d into %b3[%j, %i] : tensor<?x?xf32>
scf.yield %a4, %b4 : tensor<?x?xf32>, tensor<?x?xf32>
}
scf.yield %a2, %b2, %x2 : tensor<?x?xf32>, tensor<?x?xf32>, tensor<?x?xf32>
}
tensor.yield %d : f32
} : tensor<?x?xf32>
%b = tensor.generate %c10, %c5 {
^bb0(%i: index, %j: index):
%p = arith.addi %j, %c1 : index
%q = arith.index_cast %p : index to i32
%d = arith.sitofp %q : i32 to f32
tensor.yield %d : f32
} : tensor<?x?xf32>
// Read the sparse matrix from file, construct sparse storage.
%fileName = call @getTensorFilename(%c0) : (index) -> (!Filename)

View File

@ -70,27 +70,20 @@ module {
%fileName = call @getTensorFilename(%c0) : (index) -> (!Filename)
%a = sparse_tensor.new %fileName : !Filename to tensor<?x?xf64, #SparseMatrix>
// Initialize dense vectors.
%init_256_4 = bufferization.alloc_tensor(%c256, %c4) : tensor<?x?xf64>
%b = scf.for %i = %c0 to %c256 step %c1 iter_args(%t = %init_256_4) -> tensor<?x?xf64> {
%b2 = scf.for %j = %c0 to %c4 step %c1 iter_args(%t2 = %t) -> tensor<?x?xf64> {
%k0 = arith.muli %i, %c4 : index
%k1 = arith.addi %j, %k0 : index
%k2 = arith.index_cast %k1 : index to i32
%k = arith.sitofp %k2 : i32 to f64
%t3 = tensor.insert %k into %t2[%i, %j] : tensor<?x?xf64>
scf.yield %t3 : tensor<?x?xf64>
}
scf.yield %b2 : tensor<?x?xf64>
}
%init_4_4 = bufferization.alloc_tensor(%c4, %c4) : tensor<?x?xf64>
%x = scf.for %i = %c0 to %c4 step %c1 iter_args(%t = %init_4_4) -> tensor<?x?xf64> {
%x2 = scf.for %j = %c0 to %c4 step %c1 iter_args(%t2 = %t) -> tensor<?x?xf64> {
%t3 = tensor.insert %i0 into %t2[%i, %j] : tensor<?x?xf64>
scf.yield %t3 : tensor<?x?xf64>
}
scf.yield %x2 : tensor<?x?xf64>
}
// Initialize dense tensors.
%b = tensor.generate %c256, %c4 {
^bb0(%i : index, %j : index):
%k0 = arith.muli %i, %c4 : index
%k1 = arith.addi %j, %k0 : index
%k2 = arith.index_cast %k1 : index to i32
%k = arith.sitofp %k2 : i32 to f64
tensor.yield %k : f64
} : tensor<?x?xf64>
%x = tensor.generate %c4, %c4 {
^bb0(%i : index, %j : index):
tensor.yield %i0 : f64
} : tensor<?x?xf64>
// Call kernel.
%0 = call @kernel_spmm(%a, %b, %x)