2019-11-27 00:47:14 +08:00
|
|
|
// RUN: mlir-opt %s -convert-linalg-to-llvm | mlir-cpu-runner -e dot -entry-point-result=f32 -shared-libs=%linalg_test_lib_dir/libcblas%shlibext,%linalg_test_lib_dir/libcblas_interface%shlibext | FileCheck %s
|
2019-11-27 23:31:41 +08:00
|
|
|
// RUN: mlir-opt %s -convert-linalg-to-loops -convert-linalg-to-llvm | mlir-cpu-runner -e dot -entry-point-result=f32 -shared-libs=%linalg_test_lib_dir/libcblas%shlibext,%linalg_test_lib_dir/libcblas_interface%shlibext | FileCheck %s
|
2019-11-27 00:47:14 +08:00
|
|
|
// RUN: mlir-opt %s -convert-linalg-to-llvm | mlir-cpu-runner -e matmul -entry-point-result=f32 -shared-libs=%linalg_test_lib_dir/libcblas%shlibext,%linalg_test_lib_dir/libcblas_interface%shlibext | FileCheck %s
|
2019-11-27 23:31:41 +08:00
|
|
|
// RUN: mlir-opt %s -convert-linalg-to-loops -convert-linalg-to-llvm | mlir-cpu-runner -e matmul -entry-point-result=f32 -shared-libs=%linalg_test_lib_dir/libcblas%shlibext,%linalg_test_lib_dir/libcblas_interface%shlibext | FileCheck %s
|
2020-03-22 06:08:49 +08:00
|
|
|
// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2,3,4" -linalg-promote-subviews -convert-linalg-to-loops -convert-linalg-to-llvm | mlir-cpu-runner -e matmul -entry-point-result=f32 -shared-libs=%linalg_test_lib_dir/libcblas%shlibext,%linalg_test_lib_dir/libcblas_interface%shlibext | FileCheck %s
|
|
|
|
// RUN: mlir-opt %s -linalg-tile="linalg-tile-sizes=2,3,4" -linalg-promote-subviews -convert-linalg-to-llvm | mlir-cpu-runner -e matmul -entry-point-result=f32 -shared-libs=%linalg_test_lib_dir/libcblas%shlibext,%linalg_test_lib_dir/libcblas_interface%shlibext | FileCheck %s
|
2019-10-01 20:22:54 +08:00
|
|
|
|
2020-01-14 05:12:37 +08:00
|
|
|
#strided1D = affine_map<(d0) -> (d0)>
|
|
|
|
#strided2D = affine_map<(d0, d1)[s0] -> (d0 * s0 + d1)>
|
2019-05-10 03:34:04 +08:00
|
|
|
|
2019-08-09 22:33:34 +08:00
|
|
|
// Creates and returns a 1-D buffer of size %s filled with the value %f
|
2019-11-07 22:32:39 +08:00
|
|
|
func @alloc_filled_f32(%s : index, %f : f32) -> memref<?xi8> {
|
2019-05-10 03:34:04 +08:00
|
|
|
%c0 = constant 0 : index
|
|
|
|
%c1 = constant 1 : index
|
2019-11-07 22:32:39 +08:00
|
|
|
%c4 = constant 4 : index
|
|
|
|
%s4 = muli %s, %c4: index
|
|
|
|
%buf = alloc(%s4) {alignment = 256} : memref<?xi8>
|
|
|
|
%V = view %buf[%s][] : memref<?xi8> to memref<?xf32, #strided1D>
|
2019-10-01 20:22:54 +08:00
|
|
|
linalg.fill(%V, %f) : memref<?xf32, #strided1D>, f32
|
2019-11-07 22:32:39 +08:00
|
|
|
return %buf : memref<?xi8>
|
2019-05-10 03:34:04 +08:00
|
|
|
}
|
|
|
|
|
2019-08-09 22:33:34 +08:00
|
|
|
// Test for linalg.dot.
|
Add a lowering for Linalg matmul to LLVM
This CL adds a lowering to LLVM for MamulOp and a corresponding integration test.
View descriptor manipulation is moved from MLIR's LLVM dialect to C++ code compiled on the side. To this end a separation is introduced between `cblas.cpp` and `cblas_interface.cpp`, the latter operating on view types whose ABI correspond to the LLVM signature generated by MLIR.
An intermediary step is introduced that allocates a new descriptor on the MLIR side for the purpose of passing it to LLVM. The reason for this extra step is that the ABI for by-value ViewType objects wants aligned descriptors, e.g.:
```
extern "C" void linalg_dot_impl(ViewType<float, 1> X, ViewType<float, 1> Y,
BaseViewType<float> Z) {
...
}
```
produces LLVM IR with the signature:
```
%struct.ViewType = type { %struct.BaseViewType, [1 x i64], [1 x i64] }
%struct.BaseViewType = type { float*, i64 }
define void @linalg_dot_impl(%struct.ViewType* byval align 8, %struct.ViewType* byval align 8, float*, i64) tensorflow/mlir#0 {
...
}
```
We don't seem to be able to make such aligned allocations in the MLIR -> LLVM converter atm.
Going through a level of indirection allows the test to pass.
The temporary tradeoff is that the MLIR shims have to be written by hand.
They will disappear in the future.
PiperOrigin-RevId: 252670672
2019-06-12 03:09:50 +08:00
|
|
|
func @dot() -> f32 {
|
2019-05-10 03:34:04 +08:00
|
|
|
%c0 = constant 0 : index
|
|
|
|
%c1 = constant 1 : index
|
|
|
|
%c16 = constant 16 : index
|
2019-05-14 01:59:04 +08:00
|
|
|
%f10 = constant 10.00000e+00 : f32
|
|
|
|
%f1 = constant 1.00000e+00 : f32
|
2019-05-10 03:34:04 +08:00
|
|
|
%f2 = constant 2.00000e+00 : f32
|
|
|
|
|
2019-11-07 22:32:39 +08:00
|
|
|
%bA = call @alloc_filled_f32(%c16, %f2) : (index, f32) -> (memref<?xi8>)
|
|
|
|
%bB = call @alloc_filled_f32(%c16, %f1) : (index, f32) -> (memref<?xi8>)
|
|
|
|
%bC = call @alloc_filled_f32(%c1, %f10) : (index, f32) -> (memref<?xi8>)
|
Add a lowering for Linalg matmul to LLVM
This CL adds a lowering to LLVM for MamulOp and a corresponding integration test.
View descriptor manipulation is moved from MLIR's LLVM dialect to C++ code compiled on the side. To this end a separation is introduced between `cblas.cpp` and `cblas_interface.cpp`, the latter operating on view types whose ABI correspond to the LLVM signature generated by MLIR.
An intermediary step is introduced that allocates a new descriptor on the MLIR side for the purpose of passing it to LLVM. The reason for this extra step is that the ABI for by-value ViewType objects wants aligned descriptors, e.g.:
```
extern "C" void linalg_dot_impl(ViewType<float, 1> X, ViewType<float, 1> Y,
BaseViewType<float> Z) {
...
}
```
produces LLVM IR with the signature:
```
%struct.ViewType = type { %struct.BaseViewType, [1 x i64], [1 x i64] }
%struct.BaseViewType = type { float*, i64 }
define void @linalg_dot_impl(%struct.ViewType* byval align 8, %struct.ViewType* byval align 8, float*, i64) tensorflow/mlir#0 {
...
}
```
We don't seem to be able to make such aligned allocations in the MLIR -> LLVM converter atm.
Going through a level of indirection allows the test to pass.
The temporary tradeoff is that the MLIR shims have to be written by hand.
They will disappear in the future.
PiperOrigin-RevId: 252670672
2019-06-12 03:09:50 +08:00
|
|
|
|
2019-11-07 22:32:39 +08:00
|
|
|
%A = view %bA[%c16][] : memref<?xi8> to memref<?xf32, #strided1D>
|
|
|
|
%B = view %bB[%c16][] : memref<?xi8> to memref<?xf32, #strided1D>
|
|
|
|
%C = view %bC[][] : memref<?xi8> to memref<f32>
|
2019-05-14 01:59:04 +08:00
|
|
|
|
2019-10-01 20:22:54 +08:00
|
|
|
linalg.dot(%A, %B, %C) : memref<?xf32, #strided1D>, memref<?xf32, #strided1D>, memref<f32>
|
|
|
|
%res = load %C[] : memref<f32>
|
2019-05-14 01:59:04 +08:00
|
|
|
|
2019-11-07 22:32:39 +08:00
|
|
|
dealloc %bC : memref<?xi8>
|
|
|
|
dealloc %bB : memref<?xi8>
|
|
|
|
dealloc %bA : memref<?xi8>
|
2019-05-14 01:59:04 +08:00
|
|
|
|
2019-05-10 03:34:04 +08:00
|
|
|
return %res : f32
|
|
|
|
}
|
|
|
|
|
2019-08-09 22:33:34 +08:00
|
|
|
// Test for linalg.matmul.
|
Add a lowering for Linalg matmul to LLVM
This CL adds a lowering to LLVM for MamulOp and a corresponding integration test.
View descriptor manipulation is moved from MLIR's LLVM dialect to C++ code compiled on the side. To this end a separation is introduced between `cblas.cpp` and `cblas_interface.cpp`, the latter operating on view types whose ABI correspond to the LLVM signature generated by MLIR.
An intermediary step is introduced that allocates a new descriptor on the MLIR side for the purpose of passing it to LLVM. The reason for this extra step is that the ABI for by-value ViewType objects wants aligned descriptors, e.g.:
```
extern "C" void linalg_dot_impl(ViewType<float, 1> X, ViewType<float, 1> Y,
BaseViewType<float> Z) {
...
}
```
produces LLVM IR with the signature:
```
%struct.ViewType = type { %struct.BaseViewType, [1 x i64], [1 x i64] }
%struct.BaseViewType = type { float*, i64 }
define void @linalg_dot_impl(%struct.ViewType* byval align 8, %struct.ViewType* byval align 8, float*, i64) tensorflow/mlir#0 {
...
}
```
We don't seem to be able to make such aligned allocations in the MLIR -> LLVM converter atm.
Going through a level of indirection allows the test to pass.
The temporary tradeoff is that the MLIR shims have to be written by hand.
They will disappear in the future.
PiperOrigin-RevId: 252670672
2019-06-12 03:09:50 +08:00
|
|
|
func @matmul() -> f32 {
|
|
|
|
%c0 = constant 0 : index
|
|
|
|
%c1 = constant 1 : index
|
|
|
|
%c6 = constant 6 : index
|
|
|
|
%c7 = constant 7 : index
|
|
|
|
%c10 = constant 10 : index
|
|
|
|
%c16 = constant 16 : index
|
|
|
|
%c100 = constant 100 : index
|
|
|
|
%c160 = constant 160 : index
|
|
|
|
%f1 = constant 1.00000e+00 : f32
|
|
|
|
%f2 = constant 2.00000e+00 : f32
|
|
|
|
%f10 = constant 10.00000e+00 : f32
|
|
|
|
|
2019-11-07 22:32:39 +08:00
|
|
|
%bA = call @alloc_filled_f32(%c160, %f2) : (index, f32) -> (memref<?xi8>)
|
|
|
|
%bB = call @alloc_filled_f32(%c160, %f1) : (index, f32) -> (memref<?xi8>)
|
|
|
|
%bC = call @alloc_filled_f32(%c100, %f10) : (index, f32) -> (memref<?xi8>)
|
Add a lowering for Linalg matmul to LLVM
This CL adds a lowering to LLVM for MamulOp and a corresponding integration test.
View descriptor manipulation is moved from MLIR's LLVM dialect to C++ code compiled on the side. To this end a separation is introduced between `cblas.cpp` and `cblas_interface.cpp`, the latter operating on view types whose ABI correspond to the LLVM signature generated by MLIR.
An intermediary step is introduced that allocates a new descriptor on the MLIR side for the purpose of passing it to LLVM. The reason for this extra step is that the ABI for by-value ViewType objects wants aligned descriptors, e.g.:
```
extern "C" void linalg_dot_impl(ViewType<float, 1> X, ViewType<float, 1> Y,
BaseViewType<float> Z) {
...
}
```
produces LLVM IR with the signature:
```
%struct.ViewType = type { %struct.BaseViewType, [1 x i64], [1 x i64] }
%struct.BaseViewType = type { float*, i64 }
define void @linalg_dot_impl(%struct.ViewType* byval align 8, %struct.ViewType* byval align 8, float*, i64) tensorflow/mlir#0 {
...
}
```
We don't seem to be able to make such aligned allocations in the MLIR -> LLVM converter atm.
Going through a level of indirection allows the test to pass.
The temporary tradeoff is that the MLIR shims have to be written by hand.
They will disappear in the future.
PiperOrigin-RevId: 252670672
2019-06-12 03:09:50 +08:00
|
|
|
|
2019-12-03 22:22:31 +08:00
|
|
|
%A = view %bA[][%c10, %c16] : memref<?xi8> to memref<?x?xf32, #strided2D>
|
|
|
|
%B = view %bB[][%c16, %c10] : memref<?xi8> to memref<?x?xf32, #strided2D>
|
|
|
|
%C = view %bC[][%c10, %c10] : memref<?xi8> to memref<?x?xf32, #strided2D>
|
Add a lowering for Linalg matmul to LLVM
This CL adds a lowering to LLVM for MamulOp and a corresponding integration test.
View descriptor manipulation is moved from MLIR's LLVM dialect to C++ code compiled on the side. To this end a separation is introduced between `cblas.cpp` and `cblas_interface.cpp`, the latter operating on view types whose ABI correspond to the LLVM signature generated by MLIR.
An intermediary step is introduced that allocates a new descriptor on the MLIR side for the purpose of passing it to LLVM. The reason for this extra step is that the ABI for by-value ViewType objects wants aligned descriptors, e.g.:
```
extern "C" void linalg_dot_impl(ViewType<float, 1> X, ViewType<float, 1> Y,
BaseViewType<float> Z) {
...
}
```
produces LLVM IR with the signature:
```
%struct.ViewType = type { %struct.BaseViewType, [1 x i64], [1 x i64] }
%struct.BaseViewType = type { float*, i64 }
define void @linalg_dot_impl(%struct.ViewType* byval align 8, %struct.ViewType* byval align 8, float*, i64) tensorflow/mlir#0 {
...
}
```
We don't seem to be able to make such aligned allocations in the MLIR -> LLVM converter atm.
Going through a level of indirection allows the test to pass.
The temporary tradeoff is that the MLIR shims have to be written by hand.
They will disappear in the future.
PiperOrigin-RevId: 252670672
2019-06-12 03:09:50 +08:00
|
|
|
|
2019-10-01 20:22:54 +08:00
|
|
|
linalg.matmul(%A, %B, %C) : memref<?x?xf32, #strided2D>, memref<?x?xf32, #strided2D>, memref<?x?xf32, #strided2D>
|
|
|
|
%res = load %C[%c6, %c7] : memref<?x?xf32, #strided2D>
|
Add a lowering for Linalg matmul to LLVM
This CL adds a lowering to LLVM for MamulOp and a corresponding integration test.
View descriptor manipulation is moved from MLIR's LLVM dialect to C++ code compiled on the side. To this end a separation is introduced between `cblas.cpp` and `cblas_interface.cpp`, the latter operating on view types whose ABI correspond to the LLVM signature generated by MLIR.
An intermediary step is introduced that allocates a new descriptor on the MLIR side for the purpose of passing it to LLVM. The reason for this extra step is that the ABI for by-value ViewType objects wants aligned descriptors, e.g.:
```
extern "C" void linalg_dot_impl(ViewType<float, 1> X, ViewType<float, 1> Y,
BaseViewType<float> Z) {
...
}
```
produces LLVM IR with the signature:
```
%struct.ViewType = type { %struct.BaseViewType, [1 x i64], [1 x i64] }
%struct.BaseViewType = type { float*, i64 }
define void @linalg_dot_impl(%struct.ViewType* byval align 8, %struct.ViewType* byval align 8, float*, i64) tensorflow/mlir#0 {
...
}
```
We don't seem to be able to make such aligned allocations in the MLIR -> LLVM converter atm.
Going through a level of indirection allows the test to pass.
The temporary tradeoff is that the MLIR shims have to be written by hand.
They will disappear in the future.
PiperOrigin-RevId: 252670672
2019-06-12 03:09:50 +08:00
|
|
|
|
2019-11-07 22:32:39 +08:00
|
|
|
dealloc %bC : memref<?xi8>
|
|
|
|
dealloc %bB : memref<?xi8>
|
|
|
|
dealloc %bA : memref<?xi8>
|
Add a lowering for Linalg matmul to LLVM
This CL adds a lowering to LLVM for MamulOp and a corresponding integration test.
View descriptor manipulation is moved from MLIR's LLVM dialect to C++ code compiled on the side. To this end a separation is introduced between `cblas.cpp` and `cblas_interface.cpp`, the latter operating on view types whose ABI correspond to the LLVM signature generated by MLIR.
An intermediary step is introduced that allocates a new descriptor on the MLIR side for the purpose of passing it to LLVM. The reason for this extra step is that the ABI for by-value ViewType objects wants aligned descriptors, e.g.:
```
extern "C" void linalg_dot_impl(ViewType<float, 1> X, ViewType<float, 1> Y,
BaseViewType<float> Z) {
...
}
```
produces LLVM IR with the signature:
```
%struct.ViewType = type { %struct.BaseViewType, [1 x i64], [1 x i64] }
%struct.BaseViewType = type { float*, i64 }
define void @linalg_dot_impl(%struct.ViewType* byval align 8, %struct.ViewType* byval align 8, float*, i64) tensorflow/mlir#0 {
...
}
```
We don't seem to be able to make such aligned allocations in the MLIR -> LLVM converter atm.
Going through a level of indirection allows the test to pass.
The temporary tradeoff is that the MLIR shims have to be written by hand.
They will disappear in the future.
PiperOrigin-RevId: 252670672
2019-06-12 03:09:50 +08:00
|
|
|
|
|
|
|
return %res : f32
|
|
|
|
}
|
|
|
|
|
|
|
|
// All tests return this value
|
2019-05-21 04:25:35 +08:00
|
|
|
// CHECK: 4.2{{0+}}e+01
|