2020-08-05 20:36:16 +08:00
|
|
|
add_subdirectory(CAPI)
|
2019-06-25 05:35:21 +08:00
|
|
|
add_subdirectory(lib)
|
2019-05-14 01:59:04 +08:00
|
|
|
|
2021-05-22 22:25:38 +08:00
|
|
|
if(MLIR_ENABLE_BINDINGS_PYTHON)
|
2021-04-29 04:04:17 +08:00
|
|
|
add_subdirectory(python)
|
|
|
|
endif()
|
|
|
|
|
2019-04-03 01:02:07 +08:00
|
|
|
llvm_canonicalize_cmake_booleans(
|
2021-05-22 22:25:38 +08:00
|
|
|
MLIR_ENABLE_BINDINGS_PYTHON
|
2019-04-03 01:02:07 +08:00
|
|
|
LLVM_BUILD_EXAMPLES
|
2021-05-22 22:25:38 +08:00
|
|
|
MLIR_ENABLE_CUDA_CONVERSIONS
|
|
|
|
MLIR_ENABLE_CUDA_RUNNER
|
|
|
|
MLIR_ENABLE_ROCM_CONVERSIONS
|
|
|
|
MLIR_ENABLE_ROCM_RUNNER
|
|
|
|
MLIR_ENABLE_SPIRV_CPU_RUNNER
|
|
|
|
MLIR_ENABLE_VULKAN_RUNNER
|
2019-04-03 01:02:07 +08:00
|
|
|
)
|
|
|
|
|
2021-06-03 01:21:17 +08:00
|
|
|
# Passed to lit.site.cfg.py.so that the out of tree Standalone dialect test
|
|
|
|
# can find MLIR's CMake configuration
|
|
|
|
set(MLIR_CMAKE_DIR
|
|
|
|
"${CMAKE_BINARY_DIR}/lib${LLVM_LIBDIR_SUFFIX}/cmake/mlir")
|
|
|
|
|
2019-05-16 00:26:27 +08:00
|
|
|
# Passed to lit.site.cfg.py.in to set up the path where to find the libraries
|
|
|
|
# for linalg integration tests.
|
2019-11-27 00:47:14 +08:00
|
|
|
set(MLIR_DIALECT_LINALG_INTEGRATION_TEST_LIB_DIR ${CMAKE_LIBRARY_OUTPUT_DIRECTORY})
|
2020-03-27 17:20:05 +08:00
|
|
|
set(MLIR_RUNNER_UTILS_DIR ${CMAKE_LIBRARY_OUTPUT_DIRECTORY})
|
2019-05-16 00:26:27 +08:00
|
|
|
|
2019-07-04 22:49:52 +08:00
|
|
|
# Passed to lit.site.cfg.py.in to set up the path where to find the libraries
|
2021-03-19 15:22:50 +08:00
|
|
|
# for the mlir spirv / vulkan runner tests.
|
[MLIR][mlir-spirv-cpu-runner] A SPIR-V cpu runner prototype
This patch introduces a SPIR-V runner. The aim is to run a gpu
kernel on a CPU via GPU -> SPIRV -> LLVM conversions. This is a first
prototype, so more features will be added in due time.
- Overview
The runner follows similar flow as the other runners in-tree. However,
having converted the kernel to SPIR-V, we encode the bind attributes of
global variables that represent kernel arguments. Then SPIR-V module is
converted to LLVM. On the host side, we emulate passing the data to device
by creating in main module globals with the same symbolic name as in kernel
module. These global variables are later linked with ones from the nested
module. We copy data from kernel arguments to globals, call the kernel
function from nested module and then copy the data back.
- Current state
At the moment, the runner is capable of running 2 modules, nested one in
another. The kernel module must contain exactly one kernel function. Also,
the runner supports rank 1 integer memref types as arguments (to be scaled).
- Enhancement of JitRunner and ExecutionEngine
To translate nested modules to LLVM IR, JitRunner and ExecutionEngine were
altered to take an optional (default to `nullptr`) function reference that
is a custom LLVM IR module builder. This allows to customize LLVM IR module
creation from MLIR modules.
Reviewed By: ftynse, mravishankar
Differential Revision: https://reviews.llvm.org/D86108
2020-10-23 22:46:18 +08:00
|
|
|
set(MLIR_SPIRV_WRAPPER_LIBRARY_DIR ${CMAKE_LIBRARY_OUTPUT_DIRECTORY})
|
2020-02-19 22:11:22 +08:00
|
|
|
set(MLIR_VULKAN_WRAPPER_LIBRARY_DIR ${CMAKE_LIBRARY_OUTPUT_DIRECTORY})
|
2019-07-04 22:49:52 +08:00
|
|
|
|
2021-02-23 08:56:01 +08:00
|
|
|
if (MLIR_INCLUDE_INTEGRATION_TESTS)
|
|
|
|
set(INTEL_SDE_EXECUTABLE "" CACHE STRING
|
|
|
|
"If set, arch-specific integration tests are run with Intel SDE.")
|
2021-03-16 06:32:57 +08:00
|
|
|
option(MLIR_RUN_AMX_TESTS "Run AMX tests.")
|
2021-04-13 01:15:35 +08:00
|
|
|
option(MLIR_RUN_X86VECTOR_TESTS "Run X86Vector tests.")
|
2021-05-22 18:46:08 +08:00
|
|
|
option(MLIR_RUN_CUDA_TENSOR_CORE_TESTS "Run CUDA Tensor core WMMA tests.")
|
2021-02-23 08:56:01 +08:00
|
|
|
# Passed to lit.site.cfg.py.in to set up the path where to find the libraries.
|
|
|
|
set(MLIR_INTEGRATION_TEST_DIR ${CMAKE_LIBRARY_OUTPUT_DIRECTORY})
|
|
|
|
|
|
|
|
# Copy test data over.
|
2021-06-17 05:07:15 +08:00
|
|
|
file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/Integration/data/mttkrp_b.tns
|
|
|
|
${CMAKE_CURRENT_SOURCE_DIR}/Integration/data/test.mtx
|
2021-02-23 08:56:01 +08:00
|
|
|
${CMAKE_CURRENT_SOURCE_DIR}/Integration/data/test.tns
|
2021-04-02 07:23:17 +08:00
|
|
|
${CMAKE_CURRENT_SOURCE_DIR}/Integration/data/wide.mtx
|
2021-06-15 10:07:03 +08:00
|
|
|
${CMAKE_CURRENT_SOURCE_DIR}/Integration/data/zero.mtx
|
2021-02-23 08:56:01 +08:00
|
|
|
DESTINATION ${MLIR_INTEGRATION_TEST_DIR}/data/)
|
|
|
|
endif()
|
|
|
|
|
2019-03-30 13:10:12 +08:00
|
|
|
configure_lit_site_cfg(
|
|
|
|
${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.py.in
|
|
|
|
${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg.py
|
|
|
|
MAIN_CONFIG
|
|
|
|
${CMAKE_CURRENT_SOURCE_DIR}/lit.cfg.py
|
|
|
|
)
|
|
|
|
configure_lit_site_cfg(
|
|
|
|
${CMAKE_CURRENT_SOURCE_DIR}/Unit/lit.site.cfg.py.in
|
|
|
|
${CMAKE_CURRENT_BINARY_DIR}/Unit/lit.site.cfg.py
|
|
|
|
MAIN_CONFIG
|
|
|
|
${CMAKE_CURRENT_SOURCE_DIR}/Unit/lit.cfg.py
|
|
|
|
)
|
|
|
|
|
|
|
|
set(MLIR_TEST_DEPENDS
|
|
|
|
FileCheck count not
|
2021-02-14 03:23:06 +08:00
|
|
|
mlir-capi-execution-engine-test
|
2020-08-05 20:36:16 +08:00
|
|
|
mlir-capi-ir-test
|
2021-07-03 01:03:46 +08:00
|
|
|
mlir-capi-llvm-test
|
2020-11-04 05:38:34 +08:00
|
|
|
mlir-capi-pass-test
|
2021-05-10 07:14:05 +08:00
|
|
|
mlir-capi-sparse-tensor-test
|
2019-03-30 13:10:12 +08:00
|
|
|
mlir-cpu-runner
|
[mlir][Linalg] Create a tool to generate named Linalg ops from a Tensor Comprehensions-like specification.
Summary:
This revision adds a tool that generates the ODS and C++ implementation for "named" Linalg ops according to the [RFC discussion](https://llvm.discourse.group/t/rfc-declarative-named-ops-in-the-linalg-dialect/745).
While the mechanisms and language aspects are by no means set in stone, this revision allows connecting the pieces end-to-end from a mathematical-like specification.
Some implementation details and short-term decisions taken for the purpose of bootstrapping and that are not set in stone include:
1. using a "[Tensor Comprehension](https://arxiv.org/abs/1802.04730)-inspired" syntax
2. implicit and eager discovery of dims and symbols when parsing
3. using EDSC ops to specify the computation (e.g. std_addf, std_mul_f, ...)
A followup revision will connect this tool to tablegen mechanisms and allow the emission of named Linalg ops that automatically lower to various loop forms and run end to end.
For the following "Tensor Comprehension-inspired" string:
```
def batch_matmul(A: f32(Batch, M, K), B: f32(K, N)) -> (C: f32(Batch, M, N)) {
C(b, m, n) = std_addf<k>(std_mulf(A(b, m, k), B(k, n)));
}
```
With -gen-ods-decl=1, this emits (modulo formatting):
```
def batch_matmulOp : LinalgNamedStructured_Op<"batch_matmul", [
NInputs<2>,
NOutputs<1>,
NamedStructuredOpTraits]> {
let arguments = (ins Variadic<LinalgOperand>:$views);
let results = (outs Variadic<AnyRankedTensor>:$output_tensors);
let extraClassDeclaration = [{
llvm::Optional<SmallVector<StringRef, 8>> referenceIterators();
llvm::Optional<SmallVector<AffineMap, 8>> referenceIndexingMaps();
void regionBuilder(ArrayRef<BlockArgument> args);
}];
let hasFolder = 1;
}
```
With -gen-ods-impl, this emits (modulo formatting):
```
llvm::Optional<SmallVector<StringRef, 8>> batch_matmul::referenceIterators() {
return SmallVector<StringRef, 8>{ getParallelIteratorTypeName(),
getParallelIteratorTypeName(),
getParallelIteratorTypeName(),
getReductionIteratorTypeName() };
}
llvm::Optional<SmallVector<AffineMap, 8>> batch_matmul::referenceIndexingMaps()
{
MLIRContext *context = getContext();
AffineExpr d0, d1, d2, d3;
bindDims(context, d0, d1, d2, d3);
return SmallVector<AffineMap, 8>{
AffineMap::get(4, 0, {d0, d1, d3}),
AffineMap::get(4, 0, {d3, d2}),
AffineMap::get(4, 0, {d0, d1, d2}) };
}
void batch_matmul::regionBuilder(ArrayRef<BlockArgument> args) {
using namespace edsc;
using namespace intrinsics;
ValueHandle _0(args[0]), _1(args[1]), _2(args[2]);
ValueHandle _4 = std_mulf(_0, _1);
ValueHandle _5 = std_addf(_2, _4);
(linalg_yield(ValueRange{ _5 }));
}
```
Differential Revision: https://reviews.llvm.org/D77067
2020-04-11 01:54:08 +08:00
|
|
|
mlir-linalg-ods-gen
|
2021-05-19 21:10:28 +08:00
|
|
|
mlir-linalg-ods-yaml-gen
|
2021-04-22 05:33:04 +08:00
|
|
|
mlir-lsp-server
|
2019-03-30 13:10:12 +08:00
|
|
|
mlir-opt
|
2020-07-08 07:42:40 +08:00
|
|
|
mlir-reduce
|
2019-03-30 13:10:12 +08:00
|
|
|
mlir-tblgen
|
|
|
|
mlir-translate
|
2020-02-28 01:58:41 +08:00
|
|
|
mlir_runner_utils
|
2020-02-28 02:45:43 +08:00
|
|
|
mlir_c_runner_utils
|
2020-10-23 03:20:42 +08:00
|
|
|
mlir_async_runtime
|
2019-03-30 13:10:12 +08:00
|
|
|
)
|
|
|
|
|
2021-07-17 05:06:38 +08:00
|
|
|
if (MLIR_INCLUDE_INTEGRATION_TESTS)
|
|
|
|
list(APPEND MLIR_TEST_DEPENDS lli)
|
|
|
|
endif()
|
|
|
|
|
2021-05-22 22:25:38 +08:00
|
|
|
if(MLIR_ENABLE_CUDA_RUNNER)
|
2021-03-03 18:24:38 +08:00
|
|
|
list(APPEND MLIR_TEST_DEPENDS mlir_cuda_runtime)
|
|
|
|
endif()
|
|
|
|
|
2021-05-22 22:25:38 +08:00
|
|
|
if(MLIR_ENABLE_ROCM_RUNNER)
|
2021-03-19 15:22:50 +08:00
|
|
|
list(APPEND MLIR_TEST_DEPENDS mlir_rocm_runtime)
|
|
|
|
endif()
|
|
|
|
|
2021-02-11 09:17:24 +08:00
|
|
|
list(APPEND MLIR_TEST_DEPENDS MLIRUnitTests)
|
2021-02-03 03:09:45 +08:00
|
|
|
|
2019-04-03 01:02:07 +08:00
|
|
|
if(LLVM_BUILD_EXAMPLES)
|
|
|
|
list(APPEND MLIR_TEST_DEPENDS
|
|
|
|
toyc-ch1
|
2019-04-03 04:11:20 +08:00
|
|
|
toyc-ch2
|
2019-04-04 10:16:32 +08:00
|
|
|
toyc-ch3
|
2019-04-05 09:31:31 +08:00
|
|
|
toyc-ch4
|
2019-04-09 14:00:49 +08:00
|
|
|
toyc-ch5
|
2019-10-18 05:21:44 +08:00
|
|
|
toyc-ch6
|
2019-11-08 01:53:27 +08:00
|
|
|
toyc-ch7
|
2019-04-03 01:02:07 +08:00
|
|
|
)
|
|
|
|
endif()
|
|
|
|
|
2021-05-22 22:25:38 +08:00
|
|
|
if(MLIR_ENABLE_SPIRV_CPU_RUNNER)
|
[MLIR][mlir-spirv-cpu-runner] A SPIR-V cpu runner prototype
This patch introduces a SPIR-V runner. The aim is to run a gpu
kernel on a CPU via GPU -> SPIRV -> LLVM conversions. This is a first
prototype, so more features will be added in due time.
- Overview
The runner follows similar flow as the other runners in-tree. However,
having converted the kernel to SPIR-V, we encode the bind attributes of
global variables that represent kernel arguments. Then SPIR-V module is
converted to LLVM. On the host side, we emulate passing the data to device
by creating in main module globals with the same symbolic name as in kernel
module. These global variables are later linked with ones from the nested
module. We copy data from kernel arguments to globals, call the kernel
function from nested module and then copy the data back.
- Current state
At the moment, the runner is capable of running 2 modules, nested one in
another. The kernel module must contain exactly one kernel function. Also,
the runner supports rank 1 integer memref types as arguments (to be scaled).
- Enhancement of JitRunner and ExecutionEngine
To translate nested modules to LLVM IR, JitRunner and ExecutionEngine were
altered to take an optional (default to `nullptr`) function reference that
is a custom LLVM IR module builder. This allows to customize LLVM IR module
creation from MLIR modules.
Reviewed By: ftynse, mravishankar
Differential Revision: https://reviews.llvm.org/D86108
2020-10-23 22:46:18 +08:00
|
|
|
add_subdirectory(mlir-spirv-cpu-runner)
|
|
|
|
list(APPEND MLIR_TEST_DEPENDS
|
|
|
|
mlir-spirv-cpu-runner
|
|
|
|
mlir_test_spirv_cpu_runner_c_wrappers
|
|
|
|
)
|
|
|
|
endif()
|
|
|
|
|
2021-05-22 22:25:38 +08:00
|
|
|
if(MLIR_ENABLE_VULKAN_RUNNER)
|
2020-02-19 22:11:22 +08:00
|
|
|
list(APPEND MLIR_TEST_DEPENDS
|
|
|
|
mlir-vulkan-runner
|
|
|
|
)
|
|
|
|
endif()
|
|
|
|
|
2021-05-22 22:25:38 +08:00
|
|
|
if(MLIR_ENABLE_BINDINGS_PYTHON)
|
Initial boiler-plate for python bindings.
Summary:
* Native '_mlir' extension module.
* Python mlir/__init__.py trampoline module.
* Lit test that checks a message.
* Uses some cmake configurations that have worked for me in the past but likely needs further elaboration.
Subscribers: mgorny, mehdi_amini, rriddle, jpienaar, shauheen, antiagainst, nicolasvasilache, arpith-jacob, mgester, lucyrfox, aartbik, liufengdb, stephenneuendorffer, Joonsoo, grosul1, Kayjukh, jurahul, msifontes
Tags: #mlir
Differential Revision: https://reviews.llvm.org/D83279
2020-07-07 14:05:46 +08:00
|
|
|
list(APPEND MLIR_TEST_DEPENDS
|
|
|
|
MLIRBindingsPythonExtension
|
2021-04-29 04:04:17 +08:00
|
|
|
MLIRBindingsPythonSources
|
2020-11-18 01:28:16 +08:00
|
|
|
MLIRBindingsPythonTestOps
|
2020-11-11 02:39:12 +08:00
|
|
|
MLIRTransformsBindingsPythonExtension
|
2021-02-23 09:56:01 +08:00
|
|
|
MLIRConversionsBindingsPythonExtension
|
Initial boiler-plate for python bindings.
Summary:
* Native '_mlir' extension module.
* Python mlir/__init__.py trampoline module.
* Lit test that checks a message.
* Uses some cmake configurations that have worked for me in the past but likely needs further elaboration.
Subscribers: mgorny, mehdi_amini, rriddle, jpienaar, shauheen, antiagainst, nicolasvasilache, arpith-jacob, mgester, lucyrfox, aartbik, liufengdb, stephenneuendorffer, Joonsoo, grosul1, Kayjukh, jurahul, msifontes
Tags: #mlir
Differential Revision: https://reviews.llvm.org/D83279
2020-07-07 14:05:46 +08:00
|
|
|
)
|
|
|
|
endif()
|
|
|
|
|
2019-03-30 13:10:12 +08:00
|
|
|
add_lit_testsuite(check-mlir "Running the MLIR regression tests"
|
|
|
|
${CMAKE_CURRENT_BINARY_DIR}
|
|
|
|
DEPENDS ${MLIR_TEST_DEPENDS}
|
|
|
|
)
|
|
|
|
set_target_properties(check-mlir PROPERTIES FOLDER "Tests")
|
|
|
|
|
|
|
|
add_lit_testsuites(MLIR ${CMAKE_CURRENT_SOURCE_DIR}
|
[MLIR] Add RegionKindInterface
Some dialects have semantics which is not well represented by common
SSA structures with dominance constraints. This patch allows
operations to declare the 'kind' of their contained regions.
Currently, two kinds are allowed: "SSACFG" and "Graph". The only
difference between them at the moment is that SSACFG regions are
required to have dominance, while Graph regions are not required to
have dominance. The intention is that this Interface would be
generated by ODS for existing operations, although this has not yet
been implemented. Presumably, if someone were interested in code
generation, we might also have a "CFG" dialect, which defines control
flow, but does not require SSA.
The new behavior is mostly identical to the previous behavior, since
registered operations without a RegionKindInterface are assumed to
contain SSACFG regions. However, the behavior has changed for
unregistered operations. Previously, these were checked for
dominance, however the new behavior allows dominance violations, in
order to allow the processing of unregistered dialects with Graph
regions. One implication of this is that regions in unregistered
operations with more than one op are no longer CSE'd (since it
requires dominance info).
I've also reorganized the LangRef documentation to remove assertions
about "sequential execution", "SSA Values", and "Dominance". Instead,
the core IR is simply "ordered" (i.e. totally ordered) and consists of
"Values". I've also clarified some things about how control flow
passes between blocks in an SSACFG region. Control Flow must enter a
region at the entry block and follow terminator operation successors
or be returned to the containing op. Graph regions do not define a
notion of control flow.
see discussion here:
https://llvm.discourse.group/t/rfc-allowing-dialects-to-relax-the-ssa-dominance-condition/833/53
Differential Revision: https://reviews.llvm.org/D80358
2020-05-16 01:33:13 +08:00
|
|
|
DEPENDS ${MLIR_TEST_DEPENDS}
|
2019-03-30 13:10:12 +08:00
|
|
|
)
|