2020-03-11 03:17:07 +08:00
|
|
|
//===- ConvertGPULaunchFuncToVulkanLaunchFunc.cpp - MLIR conversion pass --===//
|
|
|
|
//
|
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file implements a pass to convert gpu launch function into a vulkan
|
|
|
|
// launch function. Creates a SPIR-V binary shader from the `spirv::ModuleOp`
|
|
|
|
// using `spirv::serialize` function, attaches binary data and entry point name
|
|
|
|
// as an attributes to vulkan launch call op.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-04-08 04:58:12 +08:00
|
|
|
#include "../PassDetail.h"
|
2020-03-11 03:17:07 +08:00
|
|
|
#include "mlir/Conversion/GPUToVulkan/ConvertGPUToVulkanPass.h"
|
|
|
|
#include "mlir/Dialect/GPU/GPUDialect.h"
|
Separate the Registration from Loading dialects in the Context
This changes the behavior of constructing MLIRContext to no longer load globally
registered dialects on construction. Instead Dialects are only loaded explicitly
on demand:
- the Parser is lazily loading Dialects in the context as it encounters them
during parsing. This is the only purpose for registering dialects and not load
them in the context.
- Passes are expected to declare the dialects they will create entity from
(Operations, Attributes, or Types), and the PassManager is loading Dialects into
the Context when starting a pipeline.
This changes simplifies the configuration of the registration: a compiler only
need to load the dialect for the IR it will emit, and the optimizer is
self-contained and load the required Dialects. For example in the Toy tutorial,
the compiler only needs to load the Toy dialect in the Context, all the others
(linalg, affine, std, LLVM, ...) are automatically loaded depending on the
optimization pipeline enabled.
To adjust to this change, stop using the existing dialect registration: the
global registry will be removed soon.
1) For passes, you need to override the method:
virtual void getDependentDialects(DialectRegistry ®istry) const {}
and registery on the provided registry any dialect that this pass can produce.
Passes defined in TableGen can provide this list in the dependentDialects list
field.
2) For dialects, on construction you can register dependent dialects using the
provided MLIRContext: `context.getOrLoadDialect<DialectName>()`
This is useful if a dialect may canonicalize or have interfaces involving
another dialect.
3) For loading IR, dialect that can be in the input file must be explicitly
registered with the context. `MlirOptMain()` is taking an explicit registry for
this purpose. See how the standalone-opt.cpp example is setup:
mlir::DialectRegistry registry;
registry.insert<mlir::standalone::StandaloneDialect>();
registry.insert<mlir::StandardOpsDialect>();
Only operations from these two dialects can be in the input file. To include all
of the dialects in MLIR Core, you can populate the registry this way:
mlir::registerAllDialects(registry);
4) For `mlir-translate` callback, as well as frontend, Dialects can be loaded in
the context before emitting the IR: context.getOrLoadDialect<ToyDialect>()
Differential Revision: https://reviews.llvm.org/D85622
2020-08-19 04:01:19 +08:00
|
|
|
#include "mlir/Dialect/SPIRV/SPIRVDialect.h"
|
2020-03-11 03:17:07 +08:00
|
|
|
#include "mlir/Dialect/SPIRV/SPIRVOps.h"
|
|
|
|
#include "mlir/Dialect/SPIRV/Serialization.h"
|
|
|
|
#include "mlir/Dialect/StandardOps/IR/Ops.h"
|
|
|
|
#include "mlir/IR/Attributes.h"
|
|
|
|
#include "mlir/IR/Builders.h"
|
2020-11-17 16:38:10 +08:00
|
|
|
#include "mlir/IR/BuiltinDialect.h"
|
2020-03-11 03:17:07 +08:00
|
|
|
#include "mlir/IR/StandardTypes.h"
|
|
|
|
|
|
|
|
using namespace mlir;
|
|
|
|
|
|
|
|
static constexpr const char *kSPIRVBlobAttrName = "spirv_blob";
|
|
|
|
static constexpr const char *kSPIRVEntryPointAttrName = "spirv_entry_point";
|
|
|
|
static constexpr const char *kVulkanLaunch = "vulkanLaunch";
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
2020-03-17 19:49:00 +08:00
|
|
|
/// A pass to convert gpu launch op to vulkan launch call op, by creating a
|
|
|
|
/// SPIR-V binary shader from `spirv::ModuleOp` using `spirv::serialize`
|
|
|
|
/// function and attaching binary data and entry point name as an attributes to
|
|
|
|
/// created vulkan launch call op.
|
2020-03-11 03:17:07 +08:00
|
|
|
class ConvertGpuLaunchFuncToVulkanLaunchFunc
|
2020-04-08 04:58:12 +08:00
|
|
|
: public ConvertGpuLaunchFuncToVulkanLaunchFuncBase<
|
|
|
|
ConvertGpuLaunchFuncToVulkanLaunchFunc> {
|
2020-03-11 03:17:07 +08:00
|
|
|
public:
|
2020-04-08 04:55:34 +08:00
|
|
|
void runOnOperation() override;
|
2020-03-11 03:17:07 +08:00
|
|
|
|
|
|
|
private:
|
|
|
|
/// Creates a SPIR-V binary shader from the given `module` using
|
|
|
|
/// `spirv::serialize` function.
|
|
|
|
LogicalResult createBinaryShader(ModuleOp module,
|
|
|
|
std::vector<char> &binaryShader);
|
|
|
|
|
[mlir] NFC: fix trivial typo in source files
Summary: fix trivial typos in the source files
Reviewers: mravishankar, antiagainst, nicolasvasilache, herhut, rriddle, aartbik
Reviewed By: antiagainst, rriddle
Subscribers: mehdi_amini, rriddle, jpienaar, burmako, shauheen, antiagainst, nicolasvasilache, csigg, arpith-jacob, mgester, lucyrfox, aartbik, liufengdb, Joonsoo, bader, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D76876
2020-03-27 02:51:37 +08:00
|
|
|
/// Converts the given `launchOp` to vulkan launch call.
|
2020-03-11 03:17:07 +08:00
|
|
|
void convertGpuLaunchFunc(gpu::LaunchFuncOp launchOp);
|
|
|
|
|
|
|
|
/// Checks where the given type is supported by Vulkan runtime.
|
|
|
|
bool isSupportedType(Type type) {
|
2020-04-21 04:31:24 +08:00
|
|
|
if (auto memRefType = type.dyn_cast_or_null<MemRefType>()) {
|
|
|
|
auto elementType = memRefType.getElementType();
|
2020-03-24 20:13:59 +08:00
|
|
|
return memRefType.hasRank() &&
|
2020-04-21 04:31:24 +08:00
|
|
|
(memRefType.getRank() >= 1 && memRefType.getRank() <= 3) &&
|
|
|
|
(elementType.isIntOrFloat());
|
|
|
|
}
|
2020-03-11 03:17:07 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Declares the vulkan launch function. Returns an error if the any type of
|
|
|
|
/// operand is unsupported by Vulkan runtime.
|
|
|
|
LogicalResult declareVulkanLaunchFunc(Location loc,
|
|
|
|
gpu::LaunchFuncOp launchOp);
|
2020-04-16 03:02:41 +08:00
|
|
|
|
|
|
|
private:
|
|
|
|
/// The number of vulkan launch configuration operands, placed at the leading
|
|
|
|
/// positions of the operand list.
|
|
|
|
static constexpr unsigned kVulkanLaunchNumConfigOperands = 3;
|
2020-03-11 03:17:07 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
} // anonymous namespace
|
|
|
|
|
2020-04-08 04:55:34 +08:00
|
|
|
void ConvertGpuLaunchFuncToVulkanLaunchFunc::runOnOperation() {
|
2020-03-11 03:17:07 +08:00
|
|
|
bool done = false;
|
2020-04-08 04:55:34 +08:00
|
|
|
getOperation().walk([this, &done](gpu::LaunchFuncOp op) {
|
2020-03-11 03:17:07 +08:00
|
|
|
if (done) {
|
|
|
|
op.emitError("should only contain one 'gpu::LaunchFuncOp' op");
|
|
|
|
return signalPassFailure();
|
|
|
|
}
|
|
|
|
done = true;
|
|
|
|
convertGpuLaunchFunc(op);
|
|
|
|
});
|
|
|
|
|
|
|
|
// Erase `gpu::GPUModuleOp` and `spirv::Module` operations.
|
|
|
|
for (auto gpuModule :
|
2020-04-08 04:55:34 +08:00
|
|
|
llvm::make_early_inc_range(getOperation().getOps<gpu::GPUModuleOp>()))
|
2020-03-11 03:17:07 +08:00
|
|
|
gpuModule.erase();
|
|
|
|
|
|
|
|
for (auto spirvModule :
|
2020-04-08 04:55:34 +08:00
|
|
|
llvm::make_early_inc_range(getOperation().getOps<spirv::ModuleOp>()))
|
2020-03-11 03:17:07 +08:00
|
|
|
spirvModule.erase();
|
|
|
|
}
|
|
|
|
|
|
|
|
LogicalResult ConvertGpuLaunchFuncToVulkanLaunchFunc::declareVulkanLaunchFunc(
|
|
|
|
Location loc, gpu::LaunchFuncOp launchOp) {
|
2020-04-08 04:55:34 +08:00
|
|
|
OpBuilder builder(getOperation().getBody()->getTerminator());
|
2020-03-11 03:17:07 +08:00
|
|
|
|
2020-04-16 03:02:41 +08:00
|
|
|
// Workgroup size is written into the kernel. So to properly modelling
|
|
|
|
// vulkan launch, we have to skip local workgroup size configuration here.
|
|
|
|
SmallVector<Type, 8> gpuLaunchTypes(launchOp.getOperandTypes());
|
|
|
|
// The first kVulkanLaunchNumConfigOperands of the gpu.launch_func op are the
|
|
|
|
// same as the config operands for the vulkan launch call op.
|
|
|
|
SmallVector<Type, 8> vulkanLaunchTypes(gpuLaunchTypes.begin(),
|
|
|
|
gpuLaunchTypes.begin() +
|
|
|
|
kVulkanLaunchNumConfigOperands);
|
|
|
|
vulkanLaunchTypes.append(gpuLaunchTypes.begin() +
|
|
|
|
gpu::LaunchOp::kNumConfigOperands,
|
|
|
|
gpuLaunchTypes.end());
|
|
|
|
|
|
|
|
// Check that all operands have supported types except those for the
|
|
|
|
// launch configuration.
|
2020-03-24 20:13:59 +08:00
|
|
|
for (auto type :
|
2020-04-16 03:02:41 +08:00
|
|
|
llvm::drop_begin(vulkanLaunchTypes, kVulkanLaunchNumConfigOperands)) {
|
2020-03-11 03:17:07 +08:00
|
|
|
if (!isSupportedType(type))
|
|
|
|
return launchOp.emitError() << type << " is unsupported to run on Vulkan";
|
|
|
|
}
|
|
|
|
|
|
|
|
// Declare vulkan launch function.
|
2020-11-14 05:04:53 +08:00
|
|
|
auto funcType = FunctionType::get(vulkanLaunchTypes, {}, loc->getContext());
|
|
|
|
builder.create<FuncOp>(loc, kVulkanLaunch, funcType).setPrivate();
|
2020-03-11 03:17:07 +08:00
|
|
|
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
LogicalResult ConvertGpuLaunchFuncToVulkanLaunchFunc::createBinaryShader(
|
|
|
|
ModuleOp module, std::vector<char> &binaryShader) {
|
|
|
|
bool done = false;
|
|
|
|
SmallVector<uint32_t, 0> binary;
|
|
|
|
for (auto spirvModule : module.getOps<spirv::ModuleOp>()) {
|
|
|
|
if (done)
|
|
|
|
return spirvModule.emitError("should only contain one 'spv.module' op");
|
|
|
|
done = true;
|
|
|
|
|
|
|
|
if (failed(spirv::serialize(spirvModule, binary)))
|
|
|
|
return failure();
|
|
|
|
}
|
|
|
|
binaryShader.resize(binary.size() * sizeof(uint32_t));
|
|
|
|
std::memcpy(binaryShader.data(), reinterpret_cast<char *>(binary.data()),
|
|
|
|
binaryShader.size());
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
|
|
|
void ConvertGpuLaunchFuncToVulkanLaunchFunc::convertGpuLaunchFunc(
|
|
|
|
gpu::LaunchFuncOp launchOp) {
|
2020-04-08 04:55:34 +08:00
|
|
|
ModuleOp module = getOperation();
|
2020-03-11 03:17:07 +08:00
|
|
|
OpBuilder builder(launchOp);
|
|
|
|
Location loc = launchOp.getLoc();
|
|
|
|
|
|
|
|
// Serialize `spirv::Module` into binary form.
|
|
|
|
std::vector<char> binary;
|
|
|
|
if (failed(createBinaryShader(module, binary)))
|
|
|
|
return signalPassFailure();
|
|
|
|
|
|
|
|
// Declare vulkan launch function.
|
|
|
|
if (failed(declareVulkanLaunchFunc(loc, launchOp)))
|
|
|
|
return signalPassFailure();
|
|
|
|
|
2020-04-16 03:02:41 +08:00
|
|
|
SmallVector<Value, 8> gpuLaunchOperands(launchOp.getOperands());
|
|
|
|
SmallVector<Value, 8> vulkanLaunchOperands(
|
|
|
|
gpuLaunchOperands.begin(),
|
|
|
|
gpuLaunchOperands.begin() + kVulkanLaunchNumConfigOperands);
|
|
|
|
vulkanLaunchOperands.append(gpuLaunchOperands.begin() +
|
|
|
|
gpu::LaunchOp::kNumConfigOperands,
|
|
|
|
gpuLaunchOperands.end());
|
|
|
|
|
2020-03-11 03:17:07 +08:00
|
|
|
// Create vulkan launch call op.
|
|
|
|
auto vulkanLaunchCallOp = builder.create<CallOp>(
|
2020-09-23 12:00:11 +08:00
|
|
|
loc, TypeRange{}, builder.getSymbolRefAttr(kVulkanLaunch),
|
2020-04-16 03:02:41 +08:00
|
|
|
vulkanLaunchOperands);
|
2020-03-11 03:17:07 +08:00
|
|
|
|
|
|
|
// Set SPIR-V binary shader data as an attribute.
|
|
|
|
vulkanLaunchCallOp.setAttr(
|
|
|
|
kSPIRVBlobAttrName,
|
|
|
|
StringAttr::get({binary.data(), binary.size()}, loc->getContext()));
|
|
|
|
|
|
|
|
// Set entry point name as an attribute.
|
|
|
|
vulkanLaunchCallOp.setAttr(
|
|
|
|
kSPIRVEntryPointAttrName,
|
2020-04-21 18:16:41 +08:00
|
|
|
StringAttr::get(launchOp.getKernelName(), loc->getContext()));
|
2020-03-11 03:17:07 +08:00
|
|
|
|
|
|
|
launchOp.erase();
|
|
|
|
}
|
|
|
|
|
2020-04-08 04:56:16 +08:00
|
|
|
std::unique_ptr<mlir::OperationPass<mlir::ModuleOp>>
|
2020-03-11 03:17:07 +08:00
|
|
|
mlir::createConvertGpuLaunchFuncToVulkanLaunchFuncPass() {
|
|
|
|
return std::make_unique<ConvertGpuLaunchFuncToVulkanLaunchFunc>();
|
|
|
|
}
|