llvm-project/mlir/test/CMakeLists.txt

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

144 lines
3.9 KiB
CMake
Raw Normal View History

add_subdirectory(CAPI)
add_subdirectory(lib)
if(MLIR_ENABLE_BINDINGS_PYTHON)
add_subdirectory(python)
endif()
llvm_canonicalize_cmake_booleans(
MLIR_ENABLE_BINDINGS_PYTHON
LLVM_BUILD_EXAMPLES
MLIR_ENABLE_CUDA_CONVERSIONS
MLIR_ENABLE_CUDA_RUNNER
MLIR_ENABLE_ROCM_CONVERSIONS
MLIR_ENABLE_ROCM_RUNNER
MLIR_ENABLE_SPIRV_CPU_RUNNER
MLIR_ENABLE_VULKAN_RUNNER
)
# Passed to lit.site.cfg.py.so that the out of tree Standalone dialect test
# can find MLIR's CMake configuration
set(MLIR_CMAKE_DIR
"${CMAKE_BINARY_DIR}/lib${LLVM_LIBDIR_SUFFIX}/cmake/mlir")
# Passed to lit.site.cfg.py.in to set up the path where to find the libraries
# for linalg integration tests.
set(MLIR_DIALECT_LINALG_INTEGRATION_TEST_LIB_DIR ${CMAKE_LIBRARY_OUTPUT_DIRECTORY})
set(MLIR_RUNNER_UTILS_DIR ${CMAKE_LIBRARY_OUTPUT_DIRECTORY})
# Passed to lit.site.cfg.py.in to set up the path where to find the libraries
# for the mlir spirv / vulkan runner tests.
set(MLIR_SPIRV_WRAPPER_LIBRARY_DIR ${CMAKE_LIBRARY_OUTPUT_DIRECTORY})
set(MLIR_VULKAN_WRAPPER_LIBRARY_DIR ${CMAKE_LIBRARY_OUTPUT_DIRECTORY})
if (MLIR_INCLUDE_INTEGRATION_TESTS)
set(INTEL_SDE_EXECUTABLE "" CACHE STRING
"If set, arch-specific integration tests are run with Intel SDE.")
option(MLIR_RUN_AMX_TESTS "Run AMX tests.")
option(MLIR_RUN_X86VECTOR_TESTS "Run X86Vector tests.")
option(MLIR_RUN_CUDA_TENSOR_CORE_TESTS "Run CUDA Tensor core WMMA tests.")
# Passed to lit.site.cfg.py.in to set up the path where to find the libraries.
set(MLIR_INTEGRATION_TEST_DIR ${CMAKE_LIBRARY_OUTPUT_DIRECTORY})
# Copy test data over.
file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/Integration/data/mttkrp_b.tns
${CMAKE_CURRENT_SOURCE_DIR}/Integration/data/test.mtx
${CMAKE_CURRENT_SOURCE_DIR}/Integration/data/test.tns
${CMAKE_CURRENT_SOURCE_DIR}/Integration/data/wide.mtx
${CMAKE_CURRENT_SOURCE_DIR}/Integration/data/zero.mtx
DESTINATION ${MLIR_INTEGRATION_TEST_DIR}/data/)
endif()
configure_lit_site_cfg(
${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.py.in
${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg.py
MAIN_CONFIG
${CMAKE_CURRENT_SOURCE_DIR}/lit.cfg.py
)
configure_lit_site_cfg(
${CMAKE_CURRENT_SOURCE_DIR}/Unit/lit.site.cfg.py.in
${CMAKE_CURRENT_BINARY_DIR}/Unit/lit.site.cfg.py
MAIN_CONFIG
${CMAKE_CURRENT_SOURCE_DIR}/Unit/lit.cfg.py
)
set(MLIR_TEST_DEPENDS
FileCheck count not
mlir-capi-execution-engine-test
mlir-capi-ir-test
mlir-capi-llvm-test
mlir-capi-pass-test
mlir-capi-sparse-tensor-test
mlir-cpu-runner
[mlir][Linalg] Create a tool to generate named Linalg ops from a Tensor Comprehensions-like specification. Summary: This revision adds a tool that generates the ODS and C++ implementation for "named" Linalg ops according to the [RFC discussion](https://llvm.discourse.group/t/rfc-declarative-named-ops-in-the-linalg-dialect/745). While the mechanisms and language aspects are by no means set in stone, this revision allows connecting the pieces end-to-end from a mathematical-like specification. Some implementation details and short-term decisions taken for the purpose of bootstrapping and that are not set in stone include: 1. using a "[Tensor Comprehension](https://arxiv.org/abs/1802.04730)-inspired" syntax 2. implicit and eager discovery of dims and symbols when parsing 3. using EDSC ops to specify the computation (e.g. std_addf, std_mul_f, ...) A followup revision will connect this tool to tablegen mechanisms and allow the emission of named Linalg ops that automatically lower to various loop forms and run end to end. For the following "Tensor Comprehension-inspired" string: ``` def batch_matmul(A: f32(Batch, M, K), B: f32(K, N)) -> (C: f32(Batch, M, N)) { C(b, m, n) = std_addf<k>(std_mulf(A(b, m, k), B(k, n))); } ``` With -gen-ods-decl=1, this emits (modulo formatting): ``` def batch_matmulOp : LinalgNamedStructured_Op<"batch_matmul", [ NInputs<2>, NOutputs<1>, NamedStructuredOpTraits]> { let arguments = (ins Variadic<LinalgOperand>:$views); let results = (outs Variadic<AnyRankedTensor>:$output_tensors); let extraClassDeclaration = [{ llvm::Optional<SmallVector<StringRef, 8>> referenceIterators(); llvm::Optional<SmallVector<AffineMap, 8>> referenceIndexingMaps(); void regionBuilder(ArrayRef<BlockArgument> args); }]; let hasFolder = 1; } ``` With -gen-ods-impl, this emits (modulo formatting): ``` llvm::Optional<SmallVector<StringRef, 8>> batch_matmul::referenceIterators() { return SmallVector<StringRef, 8>{ getParallelIteratorTypeName(), getParallelIteratorTypeName(), getParallelIteratorTypeName(), getReductionIteratorTypeName() }; } llvm::Optional<SmallVector<AffineMap, 8>> batch_matmul::referenceIndexingMaps() { MLIRContext *context = getContext(); AffineExpr d0, d1, d2, d3; bindDims(context, d0, d1, d2, d3); return SmallVector<AffineMap, 8>{ AffineMap::get(4, 0, {d0, d1, d3}), AffineMap::get(4, 0, {d3, d2}), AffineMap::get(4, 0, {d0, d1, d2}) }; } void batch_matmul::regionBuilder(ArrayRef<BlockArgument> args) { using namespace edsc; using namespace intrinsics; ValueHandle _0(args[0]), _1(args[1]), _2(args[2]); ValueHandle _4 = std_mulf(_0, _1); ValueHandle _5 = std_addf(_2, _4); (linalg_yield(ValueRange{ _5 })); } ``` Differential Revision: https://reviews.llvm.org/D77067
2020-04-11 01:54:08 +08:00
mlir-linalg-ods-gen
mlir-linalg-ods-yaml-gen
mlir-lsp-server
mlir-opt
mlir-reduce
mlir-tblgen
mlir-translate
mlir_runner_utils
mlir_c_runner_utils
mlir_async_runtime
)
if (MLIR_INCLUDE_INTEGRATION_TESTS)
list(APPEND MLIR_TEST_DEPENDS lli)
endif()
if(MLIR_ENABLE_CUDA_RUNNER)
list(APPEND MLIR_TEST_DEPENDS mlir_cuda_runtime)
endif()
if(MLIR_ENABLE_ROCM_RUNNER)
list(APPEND MLIR_TEST_DEPENDS mlir_rocm_runtime)
endif()
list(APPEND MLIR_TEST_DEPENDS MLIRUnitTests)
if(LLVM_BUILD_EXAMPLES)
list(APPEND MLIR_TEST_DEPENDS
toyc-ch1
toyc-ch2
toyc-ch3
toyc-ch4
toyc-ch5
toyc-ch6
toyc-ch7
)
endif()
if(MLIR_ENABLE_SPIRV_CPU_RUNNER)
add_subdirectory(mlir-spirv-cpu-runner)
list(APPEND MLIR_TEST_DEPENDS
mlir-spirv-cpu-runner
mlir_test_spirv_cpu_runner_c_wrappers
)
endif()
if(MLIR_ENABLE_VULKAN_RUNNER)
list(APPEND MLIR_TEST_DEPENDS
mlir-vulkan-runner
)
endif()
if(MLIR_ENABLE_BINDINGS_PYTHON)
list(APPEND MLIR_TEST_DEPENDS
MLIRBindingsPythonExtension
MLIRBindingsPythonSources
MLIRBindingsPythonTestOps
MLIRTransformsBindingsPythonExtension
MLIRConversionsBindingsPythonExtension
)
endif()
add_lit_testsuite(check-mlir "Running the MLIR regression tests"
${CMAKE_CURRENT_BINARY_DIR}
DEPENDS ${MLIR_TEST_DEPENDS}
)
set_target_properties(check-mlir PROPERTIES FOLDER "Tests")
add_lit_testsuites(MLIR ${CMAKE_CURRENT_SOURCE_DIR}
[MLIR] Add RegionKindInterface Some dialects have semantics which is not well represented by common SSA structures with dominance constraints. This patch allows operations to declare the 'kind' of their contained regions. Currently, two kinds are allowed: "SSACFG" and "Graph". The only difference between them at the moment is that SSACFG regions are required to have dominance, while Graph regions are not required to have dominance. The intention is that this Interface would be generated by ODS for existing operations, although this has not yet been implemented. Presumably, if someone were interested in code generation, we might also have a "CFG" dialect, which defines control flow, but does not require SSA. The new behavior is mostly identical to the previous behavior, since registered operations without a RegionKindInterface are assumed to contain SSACFG regions. However, the behavior has changed for unregistered operations. Previously, these were checked for dominance, however the new behavior allows dominance violations, in order to allow the processing of unregistered dialects with Graph regions. One implication of this is that regions in unregistered operations with more than one op are no longer CSE'd (since it requires dominance info). I've also reorganized the LangRef documentation to remove assertions about "sequential execution", "SSA Values", and "Dominance". Instead, the core IR is simply "ordered" (i.e. totally ordered) and consists of "Values". I've also clarified some things about how control flow passes between blocks in an SSACFG region. Control Flow must enter a region at the entry block and follow terminator operation successors or be returned to the containing op. Graph regions do not define a notion of control flow. see discussion here: https://llvm.discourse.group/t/rfc-allowing-dialects-to-relax-the-ssa-dominance-condition/833/53 Differential Revision: https://reviews.llvm.org/D80358
2020-05-16 01:33:13 +08:00
DEPENDS ${MLIR_TEST_DEPENDS}
)