[mlir][gpu] sinkOperationsIntoLaunchOp: Add user hook for isSinkingBeneficiary

Differential Revision: https://reviews.llvm.org/D119632
This commit is contained in:
Ivan Butygin 2022-02-12 19:13:43 +03:00
parent 16667362d0
commit a2e2fbba17
2 changed files with 19 additions and 13 deletions

View File

@ -38,7 +38,9 @@ gpu::GPUFuncOp outlineKernelFunc(gpu::LaunchOp launchOp, StringRef kernelFnName,
/// Sink operations into the `launchOp` to reduce the number of values that are
/// used within the region of the operation, but defined outside of the
/// region.
LogicalResult sinkOperationsIntoLaunchOp(gpu::LaunchOp launchOp);
LogicalResult sinkOperationsIntoLaunchOp(
gpu::LaunchOp launchOp,
llvm::function_ref<bool(Operation *)> isSinkingBeneficiary);
} // namespace mlir
#endif // MLIR_DIALECT_GPU_UTILS_H_

View File

@ -59,7 +59,7 @@ static void injectGpuIndexOperations(Location loc, Region &launchFuncOpBody,
/// Identifies operations that are beneficial to sink into kernels. These
/// operations may not have side-effects, as otherwise sinking (and hence
/// duplicating them) is not legal.
static bool isSinkingBeneficiary(Operation *op) {
static bool isLikelyAnIndexComputatio(Operation *op) {
return isa<arith::ConstantOp, ConstantOp, memref::DimOp, arith::SelectOp,
arith::CmpIOp>(op);
}
@ -75,11 +75,11 @@ static bool isSinkingBeneficiary(Operation *op) {
/// the order they should appear in the kernel. Furthermore, `availableValues`
/// is updated with results that will be available after sinking the identified
/// ops.
static bool
extractBeneficiaryOps(Operation *op,
const SetVector<Value> &existingDependencies,
SetVector<Operation *> &beneficiaryOps,
llvm::SmallPtrSetImpl<Value> &availableValues) {
static bool extractBeneficiaryOps(
Operation *op, const SetVector<Value> &existingDependencies,
SetVector<Operation *> &beneficiaryOps,
llvm::SmallPtrSetImpl<Value> &availableValues,
llvm::function_ref<bool(Operation *)> isSinkingBeneficiary) {
if (beneficiaryOps.count(op))
return true;
@ -93,9 +93,9 @@ extractBeneficiaryOps(Operation *op,
// Else check whether it can be made available via sinking or already is a
// dependency.
Operation *definingOp = operand.getDefiningOp();
if ((!definingOp ||
!extractBeneficiaryOps(definingOp, existingDependencies,
beneficiaryOps, availableValues)) &&
if ((!definingOp || !extractBeneficiaryOps(definingOp, existingDependencies,
beneficiaryOps, availableValues,
isSinkingBeneficiary)) &&
!existingDependencies.count(operand))
return false;
}
@ -106,7 +106,10 @@ extractBeneficiaryOps(Operation *op,
return true;
}
LogicalResult mlir::sinkOperationsIntoLaunchOp(gpu::LaunchOp launchOp) {
LogicalResult mlir::sinkOperationsIntoLaunchOp(
gpu::LaunchOp launchOp,
llvm::function_ref<bool(Operation *)> isSinkingBeneficiary) {
assert(isSinkingBeneficiary);
Region &launchOpBody = launchOp.body();
// Identify uses from values defined outside of the scope of the launch
@ -120,7 +123,8 @@ LogicalResult mlir::sinkOperationsIntoLaunchOp(gpu::LaunchOp launchOp) {
Operation *operandOp = operand.getDefiningOp();
if (!operandOp)
continue;
extractBeneficiaryOps(operandOp, sinkCandidates, toBeSunk, availableValues);
extractBeneficiaryOps(operandOp, sinkCandidates, toBeSunk, availableValues,
isSinkingBeneficiary);
}
// Insert operations so that the defs get cloned before uses.
@ -277,7 +281,7 @@ public:
Twine(op->getParentOfType<FuncOp>().getName(), "_kernel").str();
// Pull in instructions that can be sunk
if (failed(sinkOperationsIntoLaunchOp(op)))
if (failed(sinkOperationsIntoLaunchOp(op, isLikelyAnIndexComputatio)))
return WalkResult::interrupt();
gpu::GPUFuncOp outlinedFunc =
outlineKernelFuncImpl(op, kernelFnName, operands);