2012-12-12 05:25:42 +08:00
|
|
|
//===-- AMDGPUTargetMachine.cpp - TargetMachine for hw codegen targets-----===//
|
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2012-12-12 05:25:42 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
/// \file
|
2018-05-01 23:54:18 +08:00
|
|
|
/// The AMDGPU target machine contains all of the hardware specific
|
2012-12-12 05:25:42 +08:00
|
|
|
/// information needed to emit code for R600 and SI GPUs.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "AMDGPUTargetMachine.h"
|
|
|
|
#include "AMDGPU.h"
|
2017-03-18 07:56:58 +08:00
|
|
|
#include "AMDGPUAliasAnalysis.h"
|
2016-06-29 01:42:09 +08:00
|
|
|
#include "AMDGPUCallLowering.h"
|
Re-commit AMDGPU/GlobalISel: Add support for simple shaders
Fix build when global-isel is disabled and fix a warning.
Summary: We can select constant/global G_LOAD, global G_STORE, and G_GEP.
Reviewers: qcolombet, MatzeB, t.p.northover, ab, arsenm
Subscribers: mehdi_amini, vkalintiris, kzhuravl, wdng, nhaehnle, mgorny, yaxunl, tony-tye, modocache, llvm-commits, dberris
Differential Revision: https://reviews.llvm.org/D26730
llvm-svn: 293551
2017-01-31 05:56:46 +08:00
|
|
|
#include "AMDGPUInstructionSelector.h"
|
|
|
|
#include "AMDGPULegalizerInfo.h"
|
2017-07-07 04:57:05 +08:00
|
|
|
#include "AMDGPUMacroFusion.h"
|
2016-06-29 01:42:09 +08:00
|
|
|
#include "AMDGPUTargetObjectFile.h"
|
2015-01-31 19:17:59 +08:00
|
|
|
#include "AMDGPUTargetTransformInfo.h"
|
2017-03-21 21:15:46 +08:00
|
|
|
#include "GCNIterativeScheduler.h"
|
2016-08-30 03:42:52 +08:00
|
|
|
#include "GCNSchedStrategy.h"
|
2013-03-06 02:41:32 +08:00
|
|
|
#include "R600MachineScheduler.h"
|
2016-08-12 03:18:50 +08:00
|
|
|
#include "SIMachineScheduler.h"
|
2016-04-15 03:09:28 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/IRTranslator.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/InstructionSelect.h"
|
Re-commit AMDGPU/GlobalISel: Add support for simple shaders
Fix build when global-isel is disabled and fix a warning.
Summary: We can select constant/global G_LOAD, global G_STORE, and G_GEP.
Reviewers: qcolombet, MatzeB, t.p.northover, ab, arsenm
Subscribers: mehdi_amini, vkalintiris, kzhuravl, wdng, nhaehnle, mgorny, yaxunl, tony-tye, modocache, llvm-commits, dberris
Differential Revision: https://reviews.llvm.org/D26730
llvm-svn: 293551
2017-01-31 05:56:46 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/Legalizer.h"
|
|
|
|
#include "llvm/CodeGen/GlobalISel/RegBankSelect.h"
|
2012-12-12 05:25:42 +08:00
|
|
|
#include "llvm/CodeGen/Passes.h"
|
2016-05-10 11:21:59 +08:00
|
|
|
#include "llvm/CodeGen/TargetPassConfig.h"
|
2016-12-13 06:23:53 +08:00
|
|
|
#include "llvm/IR/Attributes.h"
|
|
|
|
#include "llvm/IR/Function.h"
|
2016-12-09 03:46:04 +08:00
|
|
|
#include "llvm/IR/LegacyPassManager.h"
|
2016-12-13 06:23:53 +08:00
|
|
|
#include "llvm/Pass.h"
|
|
|
|
#include "llvm/Support/CommandLine.h"
|
|
|
|
#include "llvm/Support/Compiler.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "llvm/Support/TargetRegistry.h"
|
2018-03-24 07:58:19 +08:00
|
|
|
#include "llvm/Target/TargetLoweringObjectFile.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "llvm/Transforms/IPO.h"
|
|
|
|
#include "llvm/Transforms/IPO/AlwaysInliner.h"
|
|
|
|
#include "llvm/Transforms/IPO/PassManagerBuilder.h"
|
|
|
|
#include "llvm/Transforms/Scalar.h"
|
|
|
|
#include "llvm/Transforms/Scalar/GVN.h"
|
2018-09-25 17:39:21 +08:00
|
|
|
#include "llvm/Transforms/Utils.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "llvm/Transforms/Vectorize.h"
|
2016-12-13 06:23:53 +08:00
|
|
|
#include <memory>
|
2012-12-12 05:25:42 +08:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
2016-06-24 14:30:22 +08:00
|
|
|
static cl::opt<bool> EnableR600StructurizeCFG(
|
|
|
|
"r600-ir-structurize",
|
|
|
|
cl::desc("Use StructurizeCFG IR pass"),
|
|
|
|
cl::init(true));
|
|
|
|
|
2016-06-28 04:32:13 +08:00
|
|
|
static cl::opt<bool> EnableSROA(
|
|
|
|
"amdgpu-sroa",
|
|
|
|
cl::desc("Run SROA after promote alloca pass"),
|
|
|
|
cl::ReallyHidden,
|
|
|
|
cl::init(true));
|
|
|
|
|
2017-01-25 12:25:02 +08:00
|
|
|
static cl::opt<bool>
|
|
|
|
EnableEarlyIfConversion("amdgpu-early-ifcvt", cl::Hidden,
|
|
|
|
cl::desc("Run early if-conversion"),
|
|
|
|
cl::init(false));
|
|
|
|
|
2016-06-28 04:32:13 +08:00
|
|
|
static cl::opt<bool> EnableR600IfConvert(
|
|
|
|
"r600-if-convert",
|
|
|
|
cl::desc("Use if conversion pass"),
|
|
|
|
cl::ReallyHidden,
|
|
|
|
cl::init(true));
|
|
|
|
|
2016-07-01 11:33:52 +08:00
|
|
|
// Option to disable vectorizer for tests.
|
|
|
|
static cl::opt<bool> EnableLoadStoreVectorizer(
|
|
|
|
"amdgpu-load-store-vectorizer",
|
|
|
|
cl::desc("Enable load store vectorizer"),
|
2016-09-10 06:29:28 +08:00
|
|
|
cl::init(true),
|
2016-07-01 11:33:52 +08:00
|
|
|
cl::Hidden);
|
|
|
|
|
2018-01-29 13:17:03 +08:00
|
|
|
// Option to control global loads scalarization
|
2016-12-09 01:28:47 +08:00
|
|
|
static cl::opt<bool> ScalarizeGlobal(
|
|
|
|
"amdgpu-scalarize-global-loads",
|
|
|
|
cl::desc("Enable global load scalarization"),
|
2017-07-05 01:32:00 +08:00
|
|
|
cl::init(true),
|
2016-12-09 01:28:47 +08:00
|
|
|
cl::Hidden);
|
|
|
|
|
2017-01-31 05:05:18 +08:00
|
|
|
// Option to run internalize pass.
|
|
|
|
static cl::opt<bool> InternalizeSymbols(
|
|
|
|
"amdgpu-internalize-symbols",
|
|
|
|
cl::desc("Enable elimination of non-kernel functions and unused globals"),
|
|
|
|
cl::init(false),
|
|
|
|
cl::Hidden);
|
|
|
|
|
2017-03-29 02:23:24 +08:00
|
|
|
// Option to inline all early.
|
|
|
|
static cl::opt<bool> EarlyInlineAll(
|
|
|
|
"amdgpu-early-inline-all",
|
|
|
|
cl::desc("Inline all functions early"),
|
|
|
|
cl::init(false),
|
|
|
|
cl::Hidden);
|
|
|
|
|
[ADMGPU] SDWA peephole optimization pass.
Summary:
First iteration of SDWA peephole.
This pass tries to combine several instruction into one SDWA instruction. E.g. it converts:
'''
V_LSHRREV_B32_e32 %vreg0, 16, %vreg1
V_ADD_I32_e32 %vreg2, %vreg0, %vreg3
V_LSHLREV_B32_e32 %vreg4, 16, %vreg2
'''
Into:
'''
V_ADD_I32_sdwa %vreg4, %vreg1, %vreg3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
'''
Pass structure:
1. Iterate over machine instruction in basic block and try to apply "SDWA patterns" to each of them. SDWA patterns match machine instruction into either source or destination SDWA operand. E.g. ''' V_LSHRREV_B32_e32 %vreg0, 16, %vreg1''' is matched to source SDWA operand '''%vreg1 src_sel:WORD_1'''.
2. Iterate over found SDWA operands and find instruction that could be potentially coverted into SDWA. E.g. for source SDWA operand potential instruction are all instruction in this basic block that uses '''%vreg0'''
3. Iterate over all potential instructions and check if they can be converted into SDWA.
4. Convert instructions to SDWA.
This review contains basic implementation of SDWA peephole pass. This pass requires additional testing fot both correctness and performance (no performance testing done).
There are several ways this pass can be improved:
1. Make this pass work on whole function not only basic block. As I can see this can be done right now without changes to pass.
2. Introduce more SDWA patterns
3. Introduce mnemonics to limit when SDWA patterns should apply
Reviewers: vpykhtin, alex-t, arsenm, rampitec
Subscribers: wdng, nhaehnle, mgorny
Differential Revision: https://reviews.llvm.org/D30038
llvm-svn: 298365
2017-03-21 20:51:34 +08:00
|
|
|
static cl::opt<bool> EnableSDWAPeephole(
|
|
|
|
"amdgpu-sdwa-peephole",
|
|
|
|
cl::desc("Enable SDWA peepholer"),
|
2017-04-06 23:03:28 +08:00
|
|
|
cl::init(true));
|
[ADMGPU] SDWA peephole optimization pass.
Summary:
First iteration of SDWA peephole.
This pass tries to combine several instruction into one SDWA instruction. E.g. it converts:
'''
V_LSHRREV_B32_e32 %vreg0, 16, %vreg1
V_ADD_I32_e32 %vreg2, %vreg0, %vreg3
V_LSHLREV_B32_e32 %vreg4, 16, %vreg2
'''
Into:
'''
V_ADD_I32_sdwa %vreg4, %vreg1, %vreg3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
'''
Pass structure:
1. Iterate over machine instruction in basic block and try to apply "SDWA patterns" to each of them. SDWA patterns match machine instruction into either source or destination SDWA operand. E.g. ''' V_LSHRREV_B32_e32 %vreg0, 16, %vreg1''' is matched to source SDWA operand '''%vreg1 src_sel:WORD_1'''.
2. Iterate over found SDWA operands and find instruction that could be potentially coverted into SDWA. E.g. for source SDWA operand potential instruction are all instruction in this basic block that uses '''%vreg0'''
3. Iterate over all potential instructions and check if they can be converted into SDWA.
4. Convert instructions to SDWA.
This review contains basic implementation of SDWA peephole pass. This pass requires additional testing fot both correctness and performance (no performance testing done).
There are several ways this pass can be improved:
1. Make this pass work on whole function not only basic block. As I can see this can be done right now without changes to pass.
2. Introduce more SDWA patterns
3. Introduce mnemonics to limit when SDWA patterns should apply
Reviewers: vpykhtin, alex-t, arsenm, rampitec
Subscribers: wdng, nhaehnle, mgorny
Differential Revision: https://reviews.llvm.org/D30038
llvm-svn: 298365
2017-03-21 20:51:34 +08:00
|
|
|
|
2018-11-30 22:21:56 +08:00
|
|
|
static cl::opt<bool> EnableDPPCombine(
|
|
|
|
"amdgpu-dpp-combine",
|
|
|
|
cl::desc("Enable DPP combiner"),
|
2019-02-11 19:15:03 +08:00
|
|
|
cl::init(true));
|
2018-11-30 22:21:56 +08:00
|
|
|
|
2017-03-18 07:56:58 +08:00
|
|
|
// Enable address space based alias analysis
|
|
|
|
static cl::opt<bool> EnableAMDGPUAliasAnalysis("enable-amdgpu-aa", cl::Hidden,
|
|
|
|
cl::desc("Enable AMDGPU Alias Analysis"),
|
|
|
|
cl::init(true));
|
|
|
|
|
2017-05-16 04:18:37 +08:00
|
|
|
// Option to run late CFG structurizer
|
2017-10-11 04:22:07 +08:00
|
|
|
static cl::opt<bool, true> LateCFGStructurize(
|
2017-05-16 04:18:37 +08:00
|
|
|
"amdgpu-late-structurize",
|
|
|
|
cl::desc("Enable late CFG structurization"),
|
2017-10-11 04:22:07 +08:00
|
|
|
cl::location(AMDGPUTargetMachine::EnableLateStructurizeCFG),
|
2017-05-16 04:18:37 +08:00
|
|
|
cl::Hidden);
|
|
|
|
|
2018-07-10 22:03:41 +08:00
|
|
|
static cl::opt<bool, true> EnableAMDGPUFunctionCalls(
|
2017-08-02 03:54:18 +08:00
|
|
|
"amdgpu-function-calls",
|
|
|
|
cl::desc("Enable AMDGPU function call support"),
|
2018-07-10 22:03:41 +08:00
|
|
|
cl::location(AMDGPUTargetMachine::EnableFunctionCalls),
|
|
|
|
cl::init(false),
|
|
|
|
cl::Hidden);
|
2017-08-02 03:54:18 +08:00
|
|
|
|
2017-08-12 00:42:09 +08:00
|
|
|
// Enable lib calls simplifications
|
|
|
|
static cl::opt<bool> EnableLibCallSimplify(
|
|
|
|
"amdgpu-simplify-libcall",
|
2018-05-30 03:35:46 +08:00
|
|
|
cl::desc("Enable amdgpu library simplifications"),
|
2017-08-12 00:42:09 +08:00
|
|
|
cl::init(true),
|
|
|
|
cl::Hidden);
|
|
|
|
|
AMDGPU: Add pass to lower kernel arguments to loads
This replaces most argument uses with loads, but for
now not all.
The code in SelectionDAG for calling convention lowering
is actively harmful for amdgpu_kernel. It attempts to
split the argument types into register legal types, which
results in low quality code for arbitary types. Since
all kernel arguments are passed in memory, we just want the
raw types.
I've tried a couple of methods of mitigating this in SelectionDAG,
but it's easier to just bypass this problem alltogether. It's
possible to hack around the problem in the initial lowering,
but the real problem is the DAG then expects to be able to use
CopyToReg/CopyFromReg for uses of the arguments outside the block.
Exposing the argument loads in the IR also has the advantage
that the LoadStoreVectorizer can merge them.
I'm not sure the best approach to dealing with the IR
argument list is. The patch as-is just leaves the IR arguments
in place, so all the existing code will still compute the same
kernarg size and pointlessly lowers the arguments.
Arguably the frontend should emit kernels with an empty argument
list in the first place. Alternatively a dummy array could be
inserted as a single argument just to reserve space.
This does have some disadvantages. Local pointer kernel arguments can
no longer have AssertZext placed on them as the equivalent !range
metadata is not valid on pointer typed loads. This is mostly bad
for SI which needs to know about the known bits in order to use the
DS instruction offset, so in this case this is not done.
More importantly, this skips noalias arguments since this pass
does not yet convert this to the equivalent !alias.scope and !noalias
metadata. Producing this metadata correctly seems to be tricky,
although this logically is the same as inlining into a function which
doesn't exist. Additionally, exposing these loads to the vectorizer
may result in degraded aliasing information if a pointer load is
merged with another argument load.
I'm also not entirely sure this is preserving the current clover
ABI, although I would greatly prefer if it would stop widening
arguments and match the HSA ABI. As-is I think it is extending
< 4-byte arguments to 4-bytes but doesn't align them to 4-bytes.
llvm-svn: 335650
2018-06-27 03:10:00 +08:00
|
|
|
static cl::opt<bool> EnableLowerKernelArguments(
|
|
|
|
"amdgpu-ir-lower-kernel-arguments",
|
|
|
|
cl::desc("Lower kernel argument loads in IR pass"),
|
|
|
|
cl::init(true),
|
|
|
|
cl::Hidden);
|
|
|
|
|
2018-10-08 23:49:19 +08:00
|
|
|
// Enable atomic optimization
|
|
|
|
static cl::opt<bool> EnableAtomicOptimizations(
|
|
|
|
"amdgpu-atomic-optimizations",
|
|
|
|
cl::desc("Enable atomic optimizations"),
|
|
|
|
cl::init(false),
|
|
|
|
cl::Hidden);
|
|
|
|
|
2018-12-10 20:06:10 +08:00
|
|
|
// Enable Mode register optimization
|
|
|
|
static cl::opt<bool> EnableSIModeRegisterPass(
|
|
|
|
"amdgpu-mode-register",
|
|
|
|
cl::desc("Enable mode register pass"),
|
|
|
|
cl::init(true),
|
|
|
|
cl::Hidden);
|
|
|
|
|
2015-06-13 11:28:10 +08:00
|
|
|
extern "C" void LLVMInitializeAMDGPUTarget() {
|
2012-12-12 05:25:42 +08:00
|
|
|
// Register the target
|
2016-10-10 07:00:34 +08:00
|
|
|
RegisterTargetMachine<R600TargetMachine> X(getTheAMDGPUTarget());
|
|
|
|
RegisterTargetMachine<GCNTargetMachine> Y(getTheGCNTarget());
|
2015-10-02 06:10:03 +08:00
|
|
|
|
|
|
|
PassRegistry *PR = PassRegistry::getPassRegistry();
|
2017-08-03 06:19:45 +08:00
|
|
|
initializeR600ClauseMergePassPass(*PR);
|
|
|
|
initializeR600ControlFlowFinalizerPass(*PR);
|
|
|
|
initializeR600PacketizerPass(*PR);
|
|
|
|
initializeR600ExpandSpecialInstrsPassPass(*PR);
|
|
|
|
initializeR600VectorRegMergerPass(*PR);
|
2018-04-10 00:09:13 +08:00
|
|
|
initializeGlobalISel(*PR);
|
2017-08-04 06:30:46 +08:00
|
|
|
initializeAMDGPUDAGToDAGISelPass(*PR);
|
2018-11-30 22:21:56 +08:00
|
|
|
initializeGCNDPPCombinePass(*PR);
|
2015-10-13 01:43:59 +08:00
|
|
|
initializeSILowerI1CopiesPass(*PR);
|
2015-11-04 06:30:13 +08:00
|
|
|
initializeSIFixSGPRCopiesPass(*PR);
|
2017-01-25 01:46:17 +08:00
|
|
|
initializeSIFixVGPRCopiesPass(*PR);
|
2018-11-16 09:13:34 +08:00
|
|
|
initializeSIFixupVectorISelPass(*PR);
|
2015-10-13 01:43:59 +08:00
|
|
|
initializeSIFoldOperandsPass(*PR);
|
[ADMGPU] SDWA peephole optimization pass.
Summary:
First iteration of SDWA peephole.
This pass tries to combine several instruction into one SDWA instruction. E.g. it converts:
'''
V_LSHRREV_B32_e32 %vreg0, 16, %vreg1
V_ADD_I32_e32 %vreg2, %vreg0, %vreg3
V_LSHLREV_B32_e32 %vreg4, 16, %vreg2
'''
Into:
'''
V_ADD_I32_sdwa %vreg4, %vreg1, %vreg3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
'''
Pass structure:
1. Iterate over machine instruction in basic block and try to apply "SDWA patterns" to each of them. SDWA patterns match machine instruction into either source or destination SDWA operand. E.g. ''' V_LSHRREV_B32_e32 %vreg0, 16, %vreg1''' is matched to source SDWA operand '''%vreg1 src_sel:WORD_1'''.
2. Iterate over found SDWA operands and find instruction that could be potentially coverted into SDWA. E.g. for source SDWA operand potential instruction are all instruction in this basic block that uses '''%vreg0'''
3. Iterate over all potential instructions and check if they can be converted into SDWA.
4. Convert instructions to SDWA.
This review contains basic implementation of SDWA peephole pass. This pass requires additional testing fot both correctness and performance (no performance testing done).
There are several ways this pass can be improved:
1. Make this pass work on whole function not only basic block. As I can see this can be done right now without changes to pass.
2. Introduce more SDWA patterns
3. Introduce mnemonics to limit when SDWA patterns should apply
Reviewers: vpykhtin, alex-t, arsenm, rampitec
Subscribers: wdng, nhaehnle, mgorny
Differential Revision: https://reviews.llvm.org/D30038
llvm-svn: 298365
2017-03-21 20:51:34 +08:00
|
|
|
initializeSIPeepholeSDWAPass(*PR);
|
2016-06-10 07:18:47 +08:00
|
|
|
initializeSIShrinkInstructionsPass(*PR);
|
2017-08-02 07:14:32 +08:00
|
|
|
initializeSIOptimizeExecMaskingPreRAPass(*PR);
|
2015-10-07 08:42:53 +08:00
|
|
|
initializeSILoadStoreOptimizerPass(*PR);
|
2018-10-26 21:18:36 +08:00
|
|
|
initializeAMDGPUFixFunctionBitcastsPass(*PR);
|
2017-06-03 02:02:42 +08:00
|
|
|
initializeAMDGPUAlwaysInlinePass(*PR);
|
2015-11-07 02:01:57 +08:00
|
|
|
initializeAMDGPUAnnotateKernelFeaturesPass(*PR);
|
2015-12-16 04:55:55 +08:00
|
|
|
initializeAMDGPUAnnotateUniformValuesPass(*PR);
|
2017-08-04 06:30:46 +08:00
|
|
|
initializeAMDGPUArgumentUsageInfoPass(*PR);
|
2018-10-08 23:49:19 +08:00
|
|
|
initializeAMDGPUAtomicOptimizerPass(*PR);
|
AMDGPU: Add pass to lower kernel arguments to loads
This replaces most argument uses with loads, but for
now not all.
The code in SelectionDAG for calling convention lowering
is actively harmful for amdgpu_kernel. It attempts to
split the argument types into register legal types, which
results in low quality code for arbitary types. Since
all kernel arguments are passed in memory, we just want the
raw types.
I've tried a couple of methods of mitigating this in SelectionDAG,
but it's easier to just bypass this problem alltogether. It's
possible to hack around the problem in the initial lowering,
but the real problem is the DAG then expects to be able to use
CopyToReg/CopyFromReg for uses of the arguments outside the block.
Exposing the argument loads in the IR also has the advantage
that the LoadStoreVectorizer can merge them.
I'm not sure the best approach to dealing with the IR
argument list is. The patch as-is just leaves the IR arguments
in place, so all the existing code will still compute the same
kernarg size and pointlessly lowers the arguments.
Arguably the frontend should emit kernels with an empty argument
list in the first place. Alternatively a dummy array could be
inserted as a single argument just to reserve space.
This does have some disadvantages. Local pointer kernel arguments can
no longer have AssertZext placed on them as the equivalent !range
metadata is not valid on pointer typed loads. This is mostly bad
for SI which needs to know about the known bits in order to use the
DS instruction offset, so in this case this is not done.
More importantly, this skips noalias arguments since this pass
does not yet convert this to the equivalent !alias.scope and !noalias
metadata. Producing this metadata correctly seems to be tricky,
although this logically is the same as inlining into a function which
doesn't exist. Additionally, exposing these loads to the vectorizer
may result in degraded aliasing information if a pointer load is
merged with another argument load.
I'm also not entirely sure this is preserving the current clover
ABI, although I would greatly prefer if it would stop widening
arguments and match the HSA ABI. As-is I think it is extending
< 4-byte arguments to 4-bytes but doesn't align them to 4-bytes.
llvm-svn: 335650
2018-06-27 03:10:00 +08:00
|
|
|
initializeAMDGPULowerKernelArgumentsPass(*PR);
|
2018-05-19 05:35:00 +08:00
|
|
|
initializeAMDGPULowerKernelAttributesPass(*PR);
|
2017-02-10 06:00:42 +08:00
|
|
|
initializeAMDGPULowerIntrinsicsPass(*PR);
|
2017-10-11 03:39:48 +08:00
|
|
|
initializeAMDGPUOpenCLEnqueuedBlockLoweringPass(*PR);
|
2016-01-30 13:19:45 +08:00
|
|
|
initializeAMDGPUPromoteAllocaPass(*PR);
|
2016-06-24 15:07:55 +08:00
|
|
|
initializeAMDGPUCodeGenPreparePass(*PR);
|
2017-07-29 02:40:05 +08:00
|
|
|
initializeAMDGPURewriteOutArgumentsPass(*PR);
|
2016-12-09 03:46:04 +08:00
|
|
|
initializeAMDGPUUnifyMetadataPass(*PR);
|
2016-01-20 23:48:27 +08:00
|
|
|
initializeSIAnnotateControlFlowPass(*PR);
|
2017-04-12 11:25:12 +08:00
|
|
|
initializeSIInsertWaitcntsPass(*PR);
|
2018-12-10 20:06:10 +08:00
|
|
|
initializeSIModeRegisterPass(*PR);
|
2016-03-22 04:28:33 +08:00
|
|
|
initializeSIWholeQuadModePass(*PR);
|
2016-02-12 10:16:10 +08:00
|
|
|
initializeSILowerControlFlowPass(*PR);
|
2016-08-23 03:33:16 +08:00
|
|
|
initializeSIInsertSkipsPass(*PR);
|
2017-07-22 05:19:23 +08:00
|
|
|
initializeSIMemoryLegalizerPass(*PR);
|
2016-09-29 09:44:16 +08:00
|
|
|
initializeSIOptimizeExecMaskingPass(*PR);
|
[AMDGPU] Add support for Whole Wavefront Mode
Summary:
Whole Wavefront Wode (WWM) is similar to WQM, except that all of the
lanes are always enabled, regardless of control flow. This is required
for implementing wavefront reductions in non-uniform control flow, where
we need to use the inactive lanes to propagate intermediate results, so
they need to be enabled. We need to propagate WWM to uses (unless
they're explicitly marked as exact) so that they also propagate
intermediate results correctly. We do the analysis and exec mask munging
during the WQM pass, since there are interactions with WQM for things
that require both WQM and WWM. For simplicity, WWM is entirely
block-local -- blocks are never WWM on entry or exit of a block, and WWM
is not propagated to the block level. This means that computations
involving WWM cannot involve control flow, but we only ever plan to use
WWM for a few limited purposes (none of which involve control flow)
anyways.
Shaders can ask for WWM using the @llvm.amdgcn.wwm intrinsic. There
isn't yet a way to turn WWM off -- that will be added in a future
change.
Finally, it turns out that turning on inactive lanes causes a number of
problems with register allocation. While the best long-term solution
seems like teaching LLVM's register allocator about predication, for now
we need to add some hacks to prevent ourselves from getting into trouble
due to constraints that aren't currently expressed in LLVM. For the gory
details, see the comments at the top of SIFixWWMLiveness.cpp.
Reviewers: arsenm, nhaehnle, tpr
Subscribers: kzhuravl, wdng, mgorny, yaxunl, dstuttard, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D35524
llvm-svn: 310087
2017-08-05 02:36:52 +08:00
|
|
|
initializeSIFixWWMLivenessPass(*PR);
|
2018-06-01 04:13:51 +08:00
|
|
|
initializeSIFormMemoryClausesPass(*PR);
|
2017-03-25 03:52:05 +08:00
|
|
|
initializeAMDGPUUnifyDivergentExitNodesPass(*PR);
|
2017-03-18 07:56:58 +08:00
|
|
|
initializeAMDGPUAAWrapperPassPass(*PR);
|
2018-11-08 04:26:42 +08:00
|
|
|
initializeAMDGPUExternalAAWrapperPass(*PR);
|
2017-08-12 00:42:09 +08:00
|
|
|
initializeAMDGPUUseNativeCallsPass(*PR);
|
|
|
|
initializeAMDGPUSimplifyLibCallsPass(*PR);
|
2017-09-20 12:25:58 +08:00
|
|
|
initializeAMDGPUInlinerPass(*PR);
|
2012-12-12 05:25:42 +08:00
|
|
|
}
|
|
|
|
|
2015-09-26 05:41:28 +08:00
|
|
|
static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) {
|
2016-12-13 06:23:53 +08:00
|
|
|
return llvm::make_unique<AMDGPUTargetObjectFile>();
|
2015-09-26 05:41:28 +08:00
|
|
|
}
|
|
|
|
|
2013-03-06 02:41:32 +08:00
|
|
|
static ScheduleDAGInstrs *createR600MachineScheduler(MachineSchedContext *C) {
|
2016-12-13 06:23:53 +08:00
|
|
|
return new ScheduleDAGMILive(C, llvm::make_unique<R600SchedStrategy>());
|
2013-03-06 02:41:32 +08:00
|
|
|
}
|
|
|
|
|
2016-08-12 03:18:50 +08:00
|
|
|
static ScheduleDAGInstrs *createSIMachineScheduler(MachineSchedContext *C) {
|
|
|
|
return new SIScheduleDAGMI(C);
|
|
|
|
}
|
|
|
|
|
2016-08-30 03:42:52 +08:00
|
|
|
static ScheduleDAGInstrs *
|
|
|
|
createGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
|
|
|
|
ScheduleDAGMILive *DAG =
|
2017-02-16 01:19:50 +08:00
|
|
|
new GCNScheduleDAGMILive(C, make_unique<GCNMaxOccupancySchedStrategy>(C));
|
2016-11-29 04:11:54 +08:00
|
|
|
DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
|
|
|
|
DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
|
2017-07-07 04:57:05 +08:00
|
|
|
DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
|
2016-08-30 03:42:52 +08:00
|
|
|
return DAG;
|
|
|
|
}
|
|
|
|
|
2017-03-21 21:15:46 +08:00
|
|
|
static ScheduleDAGInstrs *
|
|
|
|
createIterativeGCNMaxOccupancyMachineScheduler(MachineSchedContext *C) {
|
|
|
|
auto DAG = new GCNIterativeScheduler(C,
|
|
|
|
GCNIterativeScheduler::SCHEDULE_LEGACYMAXOCCUPANCY);
|
|
|
|
DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
|
|
|
|
DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
|
|
|
|
return DAG;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ScheduleDAGInstrs *createMinRegScheduler(MachineSchedContext *C) {
|
|
|
|
return new GCNIterativeScheduler(C,
|
|
|
|
GCNIterativeScheduler::SCHEDULE_MINREGFORCED);
|
|
|
|
}
|
|
|
|
|
2017-11-20 22:35:53 +08:00
|
|
|
static ScheduleDAGInstrs *
|
|
|
|
createIterativeILPMachineScheduler(MachineSchedContext *C) {
|
|
|
|
auto DAG = new GCNIterativeScheduler(C,
|
|
|
|
GCNIterativeScheduler::SCHEDULE_ILP);
|
|
|
|
DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
|
|
|
|
DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
|
|
|
|
DAG->addMutation(createAMDGPUMacroFusionDAGMutation());
|
|
|
|
return DAG;
|
|
|
|
}
|
|
|
|
|
2013-03-06 02:41:32 +08:00
|
|
|
static MachineSchedRegistry
|
2016-01-14 00:10:10 +08:00
|
|
|
R600SchedRegistry("r600", "Run R600's custom scheduler",
|
|
|
|
createR600MachineScheduler);
|
|
|
|
|
|
|
|
static MachineSchedRegistry
|
|
|
|
SISchedRegistry("si", "Run SI's custom scheduler",
|
|
|
|
createSIMachineScheduler);
|
2013-03-06 02:41:32 +08:00
|
|
|
|
2016-08-30 03:42:52 +08:00
|
|
|
static MachineSchedRegistry
|
|
|
|
GCNMaxOccupancySchedRegistry("gcn-max-occupancy",
|
|
|
|
"Run GCN scheduler to maximize occupancy",
|
|
|
|
createGCNMaxOccupancyMachineScheduler);
|
|
|
|
|
2017-03-21 21:15:46 +08:00
|
|
|
static MachineSchedRegistry
|
|
|
|
IterativeGCNMaxOccupancySchedRegistry("gcn-max-occupancy-experimental",
|
|
|
|
"Run GCN scheduler to maximize occupancy (experimental)",
|
|
|
|
createIterativeGCNMaxOccupancyMachineScheduler);
|
|
|
|
|
|
|
|
static MachineSchedRegistry
|
|
|
|
GCNMinRegSchedRegistry("gcn-minreg",
|
|
|
|
"Run GCN iterative scheduler for minimal register usage (experimental)",
|
|
|
|
createMinRegScheduler);
|
|
|
|
|
2017-11-20 22:35:53 +08:00
|
|
|
static MachineSchedRegistry
|
|
|
|
GCNILPSchedRegistry("gcn-ilp",
|
|
|
|
"Run GCN iterative scheduler for ILP scheduling (experimental)",
|
|
|
|
createIterativeILPMachineScheduler);
|
|
|
|
|
2016-06-01 00:57:45 +08:00
|
|
|
static StringRef computeDataLayout(const Triple &TT) {
|
|
|
|
if (TT.getArch() == Triple::r600) {
|
|
|
|
// 32-bit pointers.
|
2017-11-06 22:32:33 +08:00
|
|
|
return "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
|
2018-03-28 03:26:40 +08:00
|
|
|
"-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5";
|
2015-01-29 00:04:26 +08:00
|
|
|
}
|
|
|
|
|
2016-06-01 00:57:45 +08:00
|
|
|
// 32-bit private, local, and region pointers. 64-bit global, constant and
|
|
|
|
// flat.
|
2018-02-14 02:00:25 +08:00
|
|
|
return "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32"
|
2016-06-01 00:57:45 +08:00
|
|
|
"-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
|
2018-03-28 03:26:40 +08:00
|
|
|
"-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5";
|
2015-01-29 00:04:26 +08:00
|
|
|
}
|
|
|
|
|
2016-01-27 10:17:49 +08:00
|
|
|
LLVM_READNONE
|
|
|
|
static StringRef getGPUOrDefault(const Triple &TT, StringRef GPU) {
|
|
|
|
if (!GPU.empty())
|
|
|
|
return GPU;
|
|
|
|
|
|
|
|
if (TT.getArch() == Triple::amdgcn)
|
2017-08-07 22:58:04 +08:00
|
|
|
return "generic";
|
2016-01-27 10:17:49 +08:00
|
|
|
|
2016-06-03 02:37:16 +08:00
|
|
|
return "r600";
|
2016-01-27 10:17:49 +08:00
|
|
|
}
|
|
|
|
|
2016-05-19 06:04:49 +08:00
|
|
|
static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
|
2016-07-13 22:23:33 +08:00
|
|
|
// The AMDGPU toolchain only supports generating shared objects, so we
|
|
|
|
// must always use PIC.
|
|
|
|
return Reloc::PIC_;
|
2016-05-19 06:04:49 +08:00
|
|
|
}
|
|
|
|
|
2015-06-12 03:41:26 +08:00
|
|
|
AMDGPUTargetMachine::AMDGPUTargetMachine(const Target &T, const Triple &TT,
|
2014-07-26 06:22:39 +08:00
|
|
|
StringRef CPU, StringRef FS,
|
2016-05-19 06:04:49 +08:00
|
|
|
TargetOptions Options,
|
|
|
|
Optional<Reloc::Model> RM,
|
2017-08-03 10:16:21 +08:00
|
|
|
Optional<CodeModel::Model> CM,
|
2014-07-26 06:22:39 +08:00
|
|
|
CodeGenOpt::Level OptLevel)
|
2017-10-13 06:57:28 +08:00
|
|
|
: LLVMTargetMachine(T, computeDataLayout(TT), TT, getGPUOrDefault(TT, CPU),
|
|
|
|
FS, Options, getEffectiveRelocModel(RM),
|
2018-12-07 20:10:23 +08:00
|
|
|
getEffectiveCodeModel(CM, CodeModel::Small), OptLevel),
|
2017-08-03 10:16:21 +08:00
|
|
|
TLOF(createTLOF(getTargetTriple())) {
|
2013-05-13 09:16:13 +08:00
|
|
|
initAsmInfo();
|
2012-12-12 05:25:42 +08:00
|
|
|
}
|
|
|
|
|
2018-07-10 08:46:07 +08:00
|
|
|
bool AMDGPUTargetMachine::EnableLateStructurizeCFG = false;
|
2018-07-10 22:03:41 +08:00
|
|
|
bool AMDGPUTargetMachine::EnableFunctionCalls = false;
|
|
|
|
|
|
|
|
AMDGPUTargetMachine::~AMDGPUTargetMachine() = default;
|
2018-07-10 08:46:07 +08:00
|
|
|
|
2016-06-28 04:48:03 +08:00
|
|
|
StringRef AMDGPUTargetMachine::getGPUName(const Function &F) const {
|
|
|
|
Attribute GPUAttr = F.getFnAttribute("target-cpu");
|
|
|
|
return GPUAttr.hasAttribute(Attribute::None) ?
|
|
|
|
getTargetCPU() : GPUAttr.getValueAsString();
|
|
|
|
}
|
|
|
|
|
|
|
|
StringRef AMDGPUTargetMachine::getFeatureString(const Function &F) const {
|
|
|
|
Attribute FSAttr = F.getFnAttribute("target-features");
|
|
|
|
|
|
|
|
return FSAttr.hasAttribute(Attribute::None) ?
|
|
|
|
getTargetFeatureString() :
|
|
|
|
FSAttr.getValueAsString();
|
|
|
|
}
|
|
|
|
|
2017-09-19 15:40:11 +08:00
|
|
|
/// Predicate for Internalize pass.
|
2017-11-01 07:21:30 +08:00
|
|
|
static bool mustPreserveGV(const GlobalValue &GV) {
|
2017-09-19 15:40:11 +08:00
|
|
|
if (const Function *F = dyn_cast<Function>(&GV))
|
|
|
|
return F->isDeclaration() || AMDGPU::isEntryFunctionCC(F->getCallingConv());
|
|
|
|
|
|
|
|
return !GV.use_empty();
|
|
|
|
}
|
|
|
|
|
2017-01-27 00:49:08 +08:00
|
|
|
void AMDGPUTargetMachine::adjustPassManager(PassManagerBuilder &Builder) {
|
2017-03-18 01:13:41 +08:00
|
|
|
Builder.DivergentTarget = true;
|
|
|
|
|
2017-08-12 00:42:09 +08:00
|
|
|
bool EnableOpt = getOptLevel() > CodeGenOpt::None;
|
2017-09-19 15:40:11 +08:00
|
|
|
bool Internalize = InternalizeSymbols;
|
2017-09-20 12:25:58 +08:00
|
|
|
bool EarlyInline = EarlyInlineAll && EnableOpt && !EnableAMDGPUFunctionCalls;
|
2017-08-12 00:42:09 +08:00
|
|
|
bool AMDGPUAA = EnableAMDGPUAliasAnalysis && EnableOpt;
|
|
|
|
bool LibCallSimplify = EnableLibCallSimplify && EnableOpt;
|
2017-03-25 02:01:14 +08:00
|
|
|
|
2017-09-20 14:34:28 +08:00
|
|
|
if (EnableAMDGPUFunctionCalls) {
|
|
|
|
delete Builder.Inliner;
|
2017-09-20 14:10:15 +08:00
|
|
|
Builder.Inliner = createAMDGPUFunctionInliningPass();
|
2017-09-20 14:34:28 +08:00
|
|
|
}
|
2017-09-20 12:25:58 +08:00
|
|
|
|
2017-01-27 00:49:08 +08:00
|
|
|
Builder.addExtension(
|
2017-01-28 00:38:10 +08:00
|
|
|
PassManagerBuilder::EP_ModuleOptimizerEarly,
|
2017-03-29 02:23:24 +08:00
|
|
|
[Internalize, EarlyInline, AMDGPUAA](const PassManagerBuilder &,
|
|
|
|
legacy::PassManagerBase &PM) {
|
2017-03-25 02:01:14 +08:00
|
|
|
if (AMDGPUAA) {
|
|
|
|
PM.add(createAMDGPUAAWrapperPass());
|
|
|
|
PM.add(createAMDGPUExternalAAWrapperPass());
|
|
|
|
}
|
2017-01-27 00:49:08 +08:00
|
|
|
PM.add(createAMDGPUUnifyMetadataPass());
|
2017-01-31 05:05:18 +08:00
|
|
|
if (Internalize) {
|
2017-09-19 15:40:11 +08:00
|
|
|
PM.add(createInternalizePass(mustPreserveGV));
|
2017-01-31 05:05:18 +08:00
|
|
|
PM.add(createGlobalDCEPass());
|
|
|
|
}
|
2017-03-29 02:23:24 +08:00
|
|
|
if (EarlyInline)
|
2017-03-31 04:16:02 +08:00
|
|
|
PM.add(createAMDGPUAlwaysInlinePass(false));
|
2017-01-31 05:05:18 +08:00
|
|
|
});
|
2017-03-25 02:01:14 +08:00
|
|
|
|
2017-09-30 07:40:19 +08:00
|
|
|
const auto &Opt = Options;
|
2017-03-25 02:01:14 +08:00
|
|
|
Builder.addExtension(
|
|
|
|
PassManagerBuilder::EP_EarlyAsPossible,
|
2017-09-30 07:40:19 +08:00
|
|
|
[AMDGPUAA, LibCallSimplify, &Opt](const PassManagerBuilder &,
|
|
|
|
legacy::PassManagerBase &PM) {
|
2017-03-25 02:01:14 +08:00
|
|
|
if (AMDGPUAA) {
|
|
|
|
PM.add(createAMDGPUAAWrapperPass());
|
|
|
|
PM.add(createAMDGPUExternalAAWrapperPass());
|
|
|
|
}
|
2017-08-12 00:42:09 +08:00
|
|
|
PM.add(llvm::createAMDGPUUseNativeCallsPass());
|
|
|
|
if (LibCallSimplify)
|
2017-09-30 07:40:19 +08:00
|
|
|
PM.add(llvm::createAMDGPUSimplifyLibCallsPass(Opt));
|
2017-03-25 02:01:14 +08:00
|
|
|
});
|
2017-06-20 07:17:36 +08:00
|
|
|
|
|
|
|
Builder.addExtension(
|
|
|
|
PassManagerBuilder::EP_CGSCCOptimizerLate,
|
|
|
|
[](const PassManagerBuilder &, legacy::PassManagerBase &PM) {
|
|
|
|
// Add infer address spaces pass to the opt pipeline after inlining
|
|
|
|
// but before SROA to increase SROA opportunities.
|
|
|
|
PM.add(createInferAddressSpacesPass());
|
2018-05-19 05:35:00 +08:00
|
|
|
|
|
|
|
// This should run after inlining to have any chance of doing anything,
|
|
|
|
// and before other cleanup optimizations.
|
|
|
|
PM.add(createAMDGPULowerKernelAttributesPass());
|
2017-06-20 07:17:36 +08:00
|
|
|
});
|
2016-12-09 03:46:04 +08:00
|
|
|
}
|
|
|
|
|
2015-02-12 01:11:50 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// R600 Target Machine (R600 -> Cayman)
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2015-06-12 03:41:26 +08:00
|
|
|
R600TargetMachine::R600TargetMachine(const Target &T, const Triple &TT,
|
2016-02-06 02:29:17 +08:00
|
|
|
StringRef CPU, StringRef FS,
|
2016-05-19 06:04:49 +08:00
|
|
|
TargetOptions Options,
|
|
|
|
Optional<Reloc::Model> RM,
|
2017-08-03 10:16:21 +08:00
|
|
|
Optional<CodeModel::Model> CM,
|
|
|
|
CodeGenOpt::Level OL, bool JIT)
|
|
|
|
: AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {
|
2016-12-06 09:02:51 +08:00
|
|
|
setRequiresStructuredCFG(true);
|
|
|
|
}
|
2016-06-28 04:48:03 +08:00
|
|
|
|
|
|
|
const R600Subtarget *R600TargetMachine::getSubtargetImpl(
|
|
|
|
const Function &F) const {
|
|
|
|
StringRef GPU = getGPUName(F);
|
|
|
|
StringRef FS = getFeatureString(F);
|
|
|
|
|
|
|
|
SmallString<128> SubtargetKey(GPU);
|
|
|
|
SubtargetKey.append(FS);
|
|
|
|
|
|
|
|
auto &I = SubtargetMap[SubtargetKey];
|
|
|
|
if (!I) {
|
|
|
|
// This needs to be done before we create a new subtarget since any
|
|
|
|
// creation will depend on the TM and the code generation flags on the
|
|
|
|
// function that reside in TargetOptions.
|
|
|
|
resetTargetOptions(F);
|
|
|
|
I = llvm::make_unique<R600Subtarget>(TargetTriple, GPU, FS, *this);
|
|
|
|
}
|
|
|
|
|
|
|
|
return I.get();
|
|
|
|
}
|
2015-02-12 01:11:50 +08:00
|
|
|
|
2018-05-31 06:55:35 +08:00
|
|
|
TargetTransformInfo
|
|
|
|
R600TargetMachine::getTargetTransformInfo(const Function &F) {
|
|
|
|
return TargetTransformInfo(R600TTIImpl(this, F));
|
|
|
|
}
|
|
|
|
|
2015-02-12 01:11:50 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// GCN Target Machine (SI+)
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2015-06-12 03:41:26 +08:00
|
|
|
GCNTargetMachine::GCNTargetMachine(const Target &T, const Triple &TT,
|
2016-02-06 02:29:17 +08:00
|
|
|
StringRef CPU, StringRef FS,
|
2016-05-19 06:04:49 +08:00
|
|
|
TargetOptions Options,
|
|
|
|
Optional<Reloc::Model> RM,
|
2017-08-03 10:16:21 +08:00
|
|
|
Optional<CodeModel::Model> CM,
|
|
|
|
CodeGenOpt::Level OL, bool JIT)
|
|
|
|
: AMDGPUTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL) {}
|
2016-06-28 04:48:03 +08:00
|
|
|
|
2018-07-12 04:59:01 +08:00
|
|
|
const GCNSubtarget *GCNTargetMachine::getSubtargetImpl(const Function &F) const {
|
2016-06-28 04:48:03 +08:00
|
|
|
StringRef GPU = getGPUName(F);
|
|
|
|
StringRef FS = getFeatureString(F);
|
|
|
|
|
|
|
|
SmallString<128> SubtargetKey(GPU);
|
|
|
|
SubtargetKey.append(FS);
|
|
|
|
|
|
|
|
auto &I = SubtargetMap[SubtargetKey];
|
|
|
|
if (!I) {
|
|
|
|
// This needs to be done before we create a new subtarget since any
|
|
|
|
// creation will depend on the TM and the code generation flags on the
|
|
|
|
// function that reside in TargetOptions.
|
|
|
|
resetTargetOptions(F);
|
2018-07-12 04:59:01 +08:00
|
|
|
I = llvm::make_unique<GCNSubtarget>(TargetTriple, GPU, FS, *this);
|
2016-06-28 04:48:03 +08:00
|
|
|
}
|
|
|
|
|
2016-12-09 01:28:47 +08:00
|
|
|
I->setScalarizeGlobalBehavior(ScalarizeGlobal);
|
|
|
|
|
2016-06-28 04:48:03 +08:00
|
|
|
return I.get();
|
|
|
|
}
|
2015-02-12 01:11:50 +08:00
|
|
|
|
2018-05-31 06:55:35 +08:00
|
|
|
TargetTransformInfo
|
|
|
|
GCNTargetMachine::getTargetTransformInfo(const Function &F) {
|
|
|
|
return TargetTransformInfo(GCNTTIImpl(this, F));
|
|
|
|
}
|
|
|
|
|
2015-02-12 01:11:50 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// AMDGPU Pass Setup
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2012-12-12 05:25:42 +08:00
|
|
|
namespace {
|
2016-03-03 11:53:29 +08:00
|
|
|
|
2012-12-12 05:25:42 +08:00
|
|
|
class AMDGPUPassConfig : public TargetPassConfig {
|
|
|
|
public:
|
2017-10-13 06:57:28 +08:00
|
|
|
AMDGPUPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
|
2015-09-26 01:41:20 +08:00
|
|
|
: TargetPassConfig(TM, PM) {
|
|
|
|
// Exceptions and StackMaps are not supported, so these passes will never do
|
|
|
|
// anything.
|
|
|
|
disablePass(&StackMapLivenessID);
|
|
|
|
disablePass(&FuncletLayoutID);
|
|
|
|
}
|
2012-12-12 05:25:42 +08:00
|
|
|
|
|
|
|
AMDGPUTargetMachine &getAMDGPUTargetMachine() const {
|
|
|
|
return getTM<AMDGPUTargetMachine>();
|
|
|
|
}
|
2013-09-20 13:14:41 +08:00
|
|
|
|
2016-11-29 04:11:54 +08:00
|
|
|
ScheduleDAGInstrs *
|
|
|
|
createMachineScheduler(MachineSchedContext *C) const override {
|
|
|
|
ScheduleDAGMILive *DAG = createGenericSchedLive(C);
|
|
|
|
DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
|
|
|
|
DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
|
|
|
|
return DAG;
|
|
|
|
}
|
|
|
|
|
2016-06-15 08:11:01 +08:00
|
|
|
void addEarlyCSEOrGVNPass();
|
|
|
|
void addStraightLineScalarOptimizationPasses();
|
2014-11-04 03:49:05 +08:00
|
|
|
void addIRPasses() override;
|
2016-07-01 11:33:52 +08:00
|
|
|
void addCodeGenPrepare() override;
|
2015-09-26 01:41:20 +08:00
|
|
|
bool addPreISel() override;
|
|
|
|
bool addInstSelector() override;
|
|
|
|
bool addGCPasses() override;
|
2015-02-12 01:11:51 +08:00
|
|
|
};
|
|
|
|
|
2016-03-11 16:00:27 +08:00
|
|
|
class R600PassConfig final : public AMDGPUPassConfig {
|
2015-02-12 01:11:51 +08:00
|
|
|
public:
|
2017-10-13 06:57:28 +08:00
|
|
|
R600PassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
|
2016-12-13 06:23:53 +08:00
|
|
|
: AMDGPUPassConfig(TM, PM) {}
|
2015-02-12 01:11:51 +08:00
|
|
|
|
2016-06-24 14:30:11 +08:00
|
|
|
ScheduleDAGInstrs *createMachineScheduler(
|
|
|
|
MachineSchedContext *C) const override {
|
|
|
|
return createR600MachineScheduler(C);
|
|
|
|
}
|
|
|
|
|
2015-02-12 01:11:51 +08:00
|
|
|
bool addPreISel() override;
|
2017-08-08 12:57:55 +08:00
|
|
|
bool addInstSelector() override;
|
2015-02-12 01:11:51 +08:00
|
|
|
void addPreRegAlloc() override;
|
|
|
|
void addPreSched2() override;
|
|
|
|
void addPreEmitPass() override;
|
|
|
|
};
|
|
|
|
|
2016-03-11 16:00:27 +08:00
|
|
|
class GCNPassConfig final : public AMDGPUPassConfig {
|
2015-02-12 01:11:51 +08:00
|
|
|
public:
|
2017-10-13 06:57:28 +08:00
|
|
|
GCNPassConfig(LLVMTargetMachine &TM, PassManagerBase &PM)
|
2017-08-02 09:31:28 +08:00
|
|
|
: AMDGPUPassConfig(TM, PM) {
|
2017-08-04 07:24:05 +08:00
|
|
|
// It is necessary to know the register usage of the entire call graph. We
|
|
|
|
// allow calls without EnableAMDGPUFunctionCalls if they are marked
|
|
|
|
// noinline, so this is always required.
|
|
|
|
setRequiresCodeGenSCCOrder(true);
|
2017-08-02 09:31:28 +08:00
|
|
|
}
|
2016-06-24 14:30:11 +08:00
|
|
|
|
|
|
|
GCNTargetMachine &getGCNTargetMachine() const {
|
|
|
|
return getTM<GCNTargetMachine>();
|
|
|
|
}
|
|
|
|
|
|
|
|
ScheduleDAGInstrs *
|
2016-06-28 04:32:13 +08:00
|
|
|
createMachineScheduler(MachineSchedContext *C) const override;
|
2016-06-24 14:30:11 +08:00
|
|
|
|
2014-04-29 15:57:24 +08:00
|
|
|
bool addPreISel() override;
|
2016-04-15 05:58:24 +08:00
|
|
|
void addMachineSSAOptimization() override;
|
2017-01-25 12:25:02 +08:00
|
|
|
bool addILPOpts() override;
|
2014-04-29 15:57:24 +08:00
|
|
|
bool addInstSelector() override;
|
2016-04-15 03:09:28 +08:00
|
|
|
bool addIRTranslator() override;
|
2016-07-23 04:03:43 +08:00
|
|
|
bool addLegalizeMachineIR() override;
|
2016-04-15 03:09:28 +08:00
|
|
|
bool addRegBankSelect() override;
|
2016-07-27 22:31:55 +08:00
|
|
|
bool addGlobalInstructionSelect() override;
|
2015-10-02 06:10:03 +08:00
|
|
|
void addFastRegAlloc(FunctionPass *RegAllocPass) override;
|
|
|
|
void addOptimizedRegAlloc(FunctionPass *RegAllocPass) override;
|
2014-12-12 05:26:47 +08:00
|
|
|
void addPreRegAlloc() override;
|
2016-09-29 09:44:16 +08:00
|
|
|
void addPostRegAlloc() override;
|
2014-12-12 05:26:47 +08:00
|
|
|
void addPreSched2() override;
|
|
|
|
void addPreEmitPass() override;
|
2012-12-12 05:25:42 +08:00
|
|
|
};
|
|
|
|
|
2016-12-13 06:23:53 +08:00
|
|
|
} // end anonymous namespace
|
2012-12-12 05:25:42 +08:00
|
|
|
|
2016-06-15 08:11:01 +08:00
|
|
|
void AMDGPUPassConfig::addEarlyCSEOrGVNPass() {
|
|
|
|
if (getOptLevel() == CodeGenOpt::Aggressive)
|
|
|
|
addPass(createGVNPass());
|
|
|
|
else
|
|
|
|
addPass(createEarlyCSEPass());
|
|
|
|
}
|
|
|
|
|
|
|
|
void AMDGPUPassConfig::addStraightLineScalarOptimizationPasses() {
|
2018-06-30 00:26:53 +08:00
|
|
|
addPass(createLICMPass());
|
2016-06-15 08:11:01 +08:00
|
|
|
addPass(createSeparateConstOffsetFromGEPPass());
|
|
|
|
addPass(createSpeculativeExecutionPass());
|
|
|
|
// ReassociateGEPs exposes more opportunites for SLSR. See
|
|
|
|
// the example in reassociate-geps-and-slsr.ll.
|
|
|
|
addPass(createStraightLineStrengthReducePass());
|
|
|
|
// SeparateConstOffsetFromGEP and SLSR creates common expressions which GVN or
|
|
|
|
// EarlyCSE can reuse.
|
|
|
|
addEarlyCSEOrGVNPass();
|
|
|
|
// Run NaryReassociate after EarlyCSE/GVN to be more effective.
|
|
|
|
addPass(createNaryReassociatePass());
|
|
|
|
// NaryReassociate on GEPs creates redundant common expressions, so run
|
|
|
|
// EarlyCSE after it.
|
|
|
|
addPass(createEarlyCSEPass());
|
|
|
|
}
|
|
|
|
|
2014-11-04 03:49:05 +08:00
|
|
|
void AMDGPUPassConfig::addIRPasses() {
|
2017-04-13 04:48:56 +08:00
|
|
|
const AMDGPUTargetMachine &TM = getAMDGPUTargetMachine();
|
|
|
|
|
2016-05-18 23:41:07 +08:00
|
|
|
// There is no reason to run these.
|
|
|
|
disablePass(&StackMapLivenessID);
|
|
|
|
disablePass(&FuncletLayoutID);
|
|
|
|
disablePass(&PatchableFunctionID);
|
|
|
|
|
2018-10-02 11:50:56 +08:00
|
|
|
addPass(createAtomicExpandPass());
|
2018-10-26 21:18:36 +08:00
|
|
|
|
|
|
|
// This must occur before inlining, as the inliner will not look through
|
|
|
|
// bitcast calls.
|
|
|
|
addPass(createAMDGPUFixFunctionBitcastsPass());
|
|
|
|
|
2017-05-19 01:21:13 +08:00
|
|
|
addPass(createAMDGPULowerIntrinsicsPass());
|
2017-02-10 06:00:42 +08:00
|
|
|
|
2018-10-03 10:47:25 +08:00
|
|
|
// Function calls are not supported, so make sure we inline everything.
|
|
|
|
addPass(createAMDGPUAlwaysInlinePass());
|
|
|
|
addPass(createAlwaysInlinerLegacyPass());
|
|
|
|
// We need to add the barrier noop pass, otherwise adding the function
|
|
|
|
// inlining pass will cause all of the PassConfigs passes to be run
|
|
|
|
// one function at a time, which means if we have a nodule with two
|
|
|
|
// functions, then we will generate code for the first function
|
|
|
|
// without ever running any passes on the second.
|
|
|
|
addPass(createBarrierNoopPass());
|
2015-11-07 02:01:57 +08:00
|
|
|
|
2017-01-31 02:40:29 +08:00
|
|
|
if (TM.getTargetTriple().getArch() == Triple::amdgcn) {
|
|
|
|
// TODO: May want to move later or split into an early and late one.
|
|
|
|
|
2017-05-19 01:21:13 +08:00
|
|
|
addPass(createAMDGPUCodeGenPreparePass());
|
2017-01-31 02:40:29 +08:00
|
|
|
}
|
|
|
|
|
2015-08-08 07:19:30 +08:00
|
|
|
// Handle uses of OpenCL image2d_t, image3d_t and sampler_t arguments.
|
2018-05-13 18:04:48 +08:00
|
|
|
if (TM.getTargetTriple().getArch() == Triple::r600)
|
|
|
|
addPass(createR600OpenCLImageTypeLoweringPass());
|
2015-11-07 02:01:57 +08:00
|
|
|
|
2017-10-11 03:39:48 +08:00
|
|
|
// Replace OpenCL enqueued block function pointers with global variables.
|
|
|
|
addPass(createAMDGPUOpenCLEnqueuedBlockLoweringPass());
|
|
|
|
|
2016-06-28 04:32:13 +08:00
|
|
|
if (TM.getOptLevel() > CodeGenOpt::None) {
|
2017-02-08 14:16:04 +08:00
|
|
|
addPass(createInferAddressSpacesPass());
|
2017-05-19 01:21:13 +08:00
|
|
|
addPass(createAMDGPUPromoteAlloca());
|
2016-06-28 04:32:13 +08:00
|
|
|
|
|
|
|
if (EnableSROA)
|
|
|
|
addPass(createSROAPass());
|
2016-06-15 08:11:01 +08:00
|
|
|
|
2016-10-01 00:39:24 +08:00
|
|
|
addStraightLineScalarOptimizationPasses();
|
2017-03-18 07:56:58 +08:00
|
|
|
|
|
|
|
if (EnableAMDGPUAliasAnalysis) {
|
|
|
|
addPass(createAMDGPUAAWrapperPass());
|
|
|
|
addPass(createExternalAAWrapperPass([](Pass &P, Function &,
|
|
|
|
AAResults &AAR) {
|
|
|
|
if (auto *WrapperPass = P.getAnalysisIfAvailable<AMDGPUAAWrapperPass>())
|
|
|
|
AAR.addAAResult(WrapperPass->getResult());
|
|
|
|
}));
|
|
|
|
}
|
2016-10-01 00:39:24 +08:00
|
|
|
}
|
2016-06-15 08:11:01 +08:00
|
|
|
|
|
|
|
TargetPassConfig::addIRPasses();
|
|
|
|
|
|
|
|
// EarlyCSE is not always strong enough to clean up what LSR produces. For
|
|
|
|
// example, GVN can combine
|
|
|
|
//
|
|
|
|
// %0 = add %a, %b
|
|
|
|
// %1 = add %b, %a
|
|
|
|
//
|
|
|
|
// and
|
|
|
|
//
|
|
|
|
// %0 = shl nsw %a, 2
|
|
|
|
// %1 = shl %a, 2
|
|
|
|
//
|
|
|
|
// but EarlyCSE can do neither of them.
|
|
|
|
if (getOptLevel() != CodeGenOpt::None)
|
|
|
|
addEarlyCSEOrGVNPass();
|
2014-06-18 00:53:14 +08:00
|
|
|
}
|
|
|
|
|
2016-07-01 11:33:52 +08:00
|
|
|
void AMDGPUPassConfig::addCodeGenPrepare() {
|
AMDGPU: Add pass to lower kernel arguments to loads
This replaces most argument uses with loads, but for
now not all.
The code in SelectionDAG for calling convention lowering
is actively harmful for amdgpu_kernel. It attempts to
split the argument types into register legal types, which
results in low quality code for arbitary types. Since
all kernel arguments are passed in memory, we just want the
raw types.
I've tried a couple of methods of mitigating this in SelectionDAG,
but it's easier to just bypass this problem alltogether. It's
possible to hack around the problem in the initial lowering,
but the real problem is the DAG then expects to be able to use
CopyToReg/CopyFromReg for uses of the arguments outside the block.
Exposing the argument loads in the IR also has the advantage
that the LoadStoreVectorizer can merge them.
I'm not sure the best approach to dealing with the IR
argument list is. The patch as-is just leaves the IR arguments
in place, so all the existing code will still compute the same
kernarg size and pointlessly lowers the arguments.
Arguably the frontend should emit kernels with an empty argument
list in the first place. Alternatively a dummy array could be
inserted as a single argument just to reserve space.
This does have some disadvantages. Local pointer kernel arguments can
no longer have AssertZext placed on them as the equivalent !range
metadata is not valid on pointer typed loads. This is mostly bad
for SI which needs to know about the known bits in order to use the
DS instruction offset, so in this case this is not done.
More importantly, this skips noalias arguments since this pass
does not yet convert this to the equivalent !alias.scope and !noalias
metadata. Producing this metadata correctly seems to be tricky,
although this logically is the same as inlining into a function which
doesn't exist. Additionally, exposing these loads to the vectorizer
may result in degraded aliasing information if a pointer load is
merged with another argument load.
I'm also not entirely sure this is preserving the current clover
ABI, although I would greatly prefer if it would stop widening
arguments and match the HSA ABI. As-is I think it is extending
< 4-byte arguments to 4-bytes but doesn't align them to 4-bytes.
llvm-svn: 335650
2018-06-27 03:10:00 +08:00
|
|
|
if (TM->getTargetTriple().getArch() == Triple::amdgcn &&
|
|
|
|
EnableLowerKernelArguments)
|
|
|
|
addPass(createAMDGPULowerKernelArgumentsPass());
|
|
|
|
|
2016-07-01 11:33:52 +08:00
|
|
|
TargetPassConfig::addCodeGenPrepare();
|
|
|
|
|
|
|
|
if (EnableLoadStoreVectorizer)
|
|
|
|
addPass(createLoadStoreVectorizerPass());
|
|
|
|
}
|
|
|
|
|
2016-06-24 14:30:11 +08:00
|
|
|
bool AMDGPUPassConfig::addPreISel() {
|
2018-09-25 17:39:21 +08:00
|
|
|
addPass(createLowerSwitchPass());
|
2013-08-06 10:43:45 +08:00
|
|
|
addPass(createFlattenCFGPass());
|
2012-12-12 05:25:42 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool AMDGPUPassConfig::addInstSelector() {
|
2017-08-04 06:30:46 +08:00
|
|
|
addPass(createAMDGPUISelDag(&getAMDGPUTargetMachine(), getOptLevel()));
|
2015-02-12 01:11:51 +08:00
|
|
|
return false;
|
|
|
|
}
|
2014-11-19 05:06:58 +08:00
|
|
|
|
2015-09-26 01:41:20 +08:00
|
|
|
bool AMDGPUPassConfig::addGCPasses() {
|
|
|
|
// Do nothing. GC is not supported.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-02-12 01:11:51 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// R600 Pass Setup
|
|
|
|
//===----------------------------------------------------------------------===//
|
2014-11-19 05:06:58 +08:00
|
|
|
|
2015-02-12 01:11:51 +08:00
|
|
|
bool R600PassConfig::addPreISel() {
|
|
|
|
AMDGPUPassConfig::addPreISel();
|
2016-06-24 14:30:22 +08:00
|
|
|
|
|
|
|
if (EnableR600StructurizeCFG)
|
2016-02-13 07:45:29 +08:00
|
|
|
addPass(createStructurizeCFGPass());
|
2012-12-12 05:25:42 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-08-08 12:57:55 +08:00
|
|
|
bool R600PassConfig::addInstSelector() {
|
|
|
|
addPass(createR600ISelDag(&getAMDGPUTargetMachine(), getOptLevel()));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-02-12 01:11:51 +08:00
|
|
|
void R600PassConfig::addPreRegAlloc() {
|
2017-05-19 01:21:13 +08:00
|
|
|
addPass(createR600VectorRegMerger());
|
2012-12-12 05:25:42 +08:00
|
|
|
}
|
|
|
|
|
2015-02-12 01:11:51 +08:00
|
|
|
void R600PassConfig::addPreSched2() {
|
|
|
|
addPass(createR600EmitClauseMarkers(), false);
|
2016-06-28 04:32:13 +08:00
|
|
|
if (EnableR600IfConvert)
|
2015-02-12 01:11:51 +08:00
|
|
|
addPass(&IfConverterID, false);
|
2017-05-19 01:21:13 +08:00
|
|
|
addPass(createR600ClauseMergePass(), false);
|
2015-02-12 01:11:51 +08:00
|
|
|
}
|
2013-01-19 05:15:53 +08:00
|
|
|
|
2015-02-12 01:11:51 +08:00
|
|
|
void R600PassConfig::addPreEmitPass() {
|
|
|
|
addPass(createAMDGPUCFGStructurizerPass(), false);
|
2017-05-19 01:21:13 +08:00
|
|
|
addPass(createR600ExpandSpecialInstrsPass(), false);
|
2015-02-12 01:11:51 +08:00
|
|
|
addPass(&FinalizeMachineBundlesID, false);
|
2017-05-19 01:21:13 +08:00
|
|
|
addPass(createR600Packetizer(), false);
|
|
|
|
addPass(createR600ControlFlowFinalizer(), false);
|
2012-12-12 05:25:42 +08:00
|
|
|
}
|
|
|
|
|
2015-02-12 01:11:51 +08:00
|
|
|
TargetPassConfig *R600TargetMachine::createPassConfig(PassManagerBase &PM) {
|
2017-05-31 05:36:41 +08:00
|
|
|
return new R600PassConfig(*this, PM);
|
2015-02-12 01:11:51 +08:00
|
|
|
}
|
2012-12-12 05:25:42 +08:00
|
|
|
|
2015-02-12 01:11:51 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// GCN Pass Setup
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2016-06-28 04:32:13 +08:00
|
|
|
ScheduleDAGInstrs *GCNPassConfig::createMachineScheduler(
|
|
|
|
MachineSchedContext *C) const {
|
2018-07-12 04:59:01 +08:00
|
|
|
const GCNSubtarget &ST = C->MF->getSubtarget<GCNSubtarget>();
|
2016-06-28 04:32:13 +08:00
|
|
|
if (ST.enableSIScheduler())
|
|
|
|
return createSIMachineScheduler(C);
|
2016-08-30 03:42:52 +08:00
|
|
|
return createGCNMaxOccupancyMachineScheduler(C);
|
2016-06-28 04:32:13 +08:00
|
|
|
}
|
|
|
|
|
2015-02-12 01:11:51 +08:00
|
|
|
bool GCNPassConfig::addPreISel() {
|
|
|
|
AMDGPUPassConfig::addPreISel();
|
2015-11-07 02:01:57 +08:00
|
|
|
|
2018-10-08 23:49:19 +08:00
|
|
|
if (EnableAtomicOptimizations) {
|
|
|
|
addPass(createAMDGPUAtomicOptimizerPass());
|
|
|
|
}
|
|
|
|
|
2015-11-07 02:01:57 +08:00
|
|
|
// FIXME: We need to run a pass to propagate the attributes when calls are
|
|
|
|
// supported.
|
2018-12-14 05:23:12 +08:00
|
|
|
addPass(createAMDGPUAnnotateKernelFeaturesPass());
|
2017-03-25 03:52:05 +08:00
|
|
|
|
|
|
|
// Merge divergent exit nodes. StructurizeCFG won't recognize the multi-exit
|
|
|
|
// regions formed by them.
|
|
|
|
addPass(&AMDGPUUnifyDivergentExitNodesID);
|
2017-05-16 04:18:37 +08:00
|
|
|
if (!LateCFGStructurize) {
|
|
|
|
addPass(createStructurizeCFGPass(true)); // true -> SkipUniformRegions
|
|
|
|
}
|
2015-02-12 01:11:51 +08:00
|
|
|
addPass(createSinkingPass());
|
2015-12-16 04:55:55 +08:00
|
|
|
addPass(createAMDGPUAnnotateUniformValues());
|
2017-05-16 04:18:37 +08:00
|
|
|
if (!LateCFGStructurize) {
|
|
|
|
addPass(createSIAnnotateControlFlowPass());
|
|
|
|
}
|
2015-12-16 04:55:55 +08:00
|
|
|
|
2015-02-12 01:11:51 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-04-15 05:58:24 +08:00
|
|
|
void GCNPassConfig::addMachineSSAOptimization() {
|
|
|
|
TargetPassConfig::addMachineSSAOptimization();
|
|
|
|
|
|
|
|
// We want to fold operands after PeepholeOptimizer has run (or as part of
|
|
|
|
// it), because it will eliminate extra copies making it easier to fold the
|
|
|
|
// real source operand. We want to eliminate dead instructions after, so that
|
|
|
|
// we see fewer uses of the copies. We then need to clean up the dead
|
|
|
|
// instructions leftover after the operands are folded as well.
|
|
|
|
//
|
|
|
|
// XXX - Can we get away without running DeadMachineInstructionElim again?
|
|
|
|
addPass(&SIFoldOperandsID);
|
2018-11-30 22:21:56 +08:00
|
|
|
if (EnableDPPCombine)
|
|
|
|
addPass(&GCNDPPCombineID);
|
2016-04-15 05:58:24 +08:00
|
|
|
addPass(&DeadMachineInstructionElimID);
|
2016-08-30 03:15:22 +08:00
|
|
|
addPass(&SILoadStoreOptimizerID);
|
2017-04-07 18:53:12 +08:00
|
|
|
if (EnableSDWAPeephole) {
|
|
|
|
addPass(&SIPeepholeSDWAID);
|
2018-01-19 14:46:10 +08:00
|
|
|
addPass(&EarlyMachineLICMID);
|
2017-05-31 00:49:24 +08:00
|
|
|
addPass(&MachineCSEID);
|
|
|
|
addPass(&SIFoldOperandsID);
|
2017-04-07 18:53:12 +08:00
|
|
|
addPass(&DeadMachineInstructionElimID);
|
|
|
|
}
|
2017-06-04 01:39:47 +08:00
|
|
|
addPass(createSIShrinkInstructionsPass());
|
2016-04-15 05:58:24 +08:00
|
|
|
}
|
|
|
|
|
2017-01-25 12:25:02 +08:00
|
|
|
bool GCNPassConfig::addILPOpts() {
|
|
|
|
if (EnableEarlyIfConversion)
|
|
|
|
addPass(&EarlyIfConverterID);
|
|
|
|
|
|
|
|
TargetPassConfig::addILPOpts();
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-02-12 01:11:51 +08:00
|
|
|
bool GCNPassConfig::addInstSelector() {
|
|
|
|
AMDGPUPassConfig::addInstSelector();
|
2015-11-04 06:30:13 +08:00
|
|
|
addPass(&SIFixSGPRCopiesID);
|
AMDGPU: Rewrite SILowerI1Copies to always stay on SALU
Summary:
Instead of writing boolean values temporarily into 32-bit VGPRs
if they are involved in PHIs or are observed from outside a loop,
we use bitwise masking operations to combine lane masks in a way
that is consistent with wave control flow.
Move SIFixSGPRCopies to before this pass, since that pass
incorrectly attempts to move SGPR phis to VGPRs.
This should recover most of the code quality that was lost with
the bug fix in "AMDGPU: Remove PHI loop condition optimization".
There are still some relevant cases where code quality could be
improved, in particular:
- We often introduce redundant masks with EXEC. Ideally, we'd
have a generic computeKnownBits-like analysis to determine
whether masks are already masked by EXEC, so we can avoid this
masking both here and when lowering uniform control flow.
- The criterion we use to determine whether a def is observed
from outside a loop is conservative: it doesn't check whether
(loop) branch conditions are uniform.
Change-Id: Ibabdb373a7510e426b90deef00f5e16c5d56e64b
Reviewers: arsenm, rampitec, tpr
Subscribers: kzhuravl, jvesely, wdng, mgorny, yaxunl, dstuttard, t-tye, eraman, llvm-commits
Differential Revision: https://reviews.llvm.org/D53496
llvm-svn: 345719
2018-10-31 21:27:08 +08:00
|
|
|
addPass(createSILowerI1CopiesPass());
|
2018-11-16 09:13:34 +08:00
|
|
|
addPass(createSIFixupVectorISelPass());
|
[AMDGPU] Add support for TFE/LWE in image intrinsics. 2nd try
TFE and LWE support requires extra result registers that are written in the
event of a failure in order to detect that failure case.
The specific use-case that initiated these changes is sparse texture support.
This means that if image intrinsics are used with either option turned on, the
programmer must ensure that the return type can contain all of the expected
results. This can result in redundant registers since the vector size must be a
power-of-2.
This change takes roughly 6 parts:
1. Modify the instruction defs in tablegen to add new instruction variants that
can accomodate the extra return values.
2. Updates to lowerImage in SIISelLowering.cpp to accomodate setting TFE or LWE
(where the bulk of the work for these instruction types is now done)
3. Extra verification code to catch cases where intrinsics have been used but
insufficient return registers are used.
4. Modification to the adjustWritemask optimisation to account for TFE/LWE being
enabled (requires extra registers to be maintained for error return value).
5. An extra pass to zero initialize the error value return - this is because if
the error does not occur, the register is not written and thus must be zeroed
before use. Also added a new (on by default) option to ensure ALL return values
are zero-initialized that is required for sparse texture support.
6. Disable the inst_combine optimization in the presence of tfe/lwe (later TODO
for this to re-enable and handle correctly).
There's an additional fix now to avoid a dmask=0
For an image intrinsic with tfe where all result channels except tfe
were unused, I was getting an image instruction with dmask=0 and only a
single vgpr result for tfe. That is incorrect because the hardware
assumes there is at least one vgpr result, plus the one for tfe.
Fixed by forcing dmask to 1, which gives the desired two vgpr result
with tfe in the second one.
The TFE or LWE result is returned from the intrinsics using an aggregate
type. Look in the test code provided to see how this works, but in essence IR
code to invoke the intrinsic looks as follows:
%v = call {<4 x float>,i32} @llvm.amdgcn.image.load.1d.v4f32i32.i32(i32 15,
i32 %s, <8 x i32> %rsrc, i32 1, i32 0)
%v.vec = extractvalue {<4 x float>, i32} %v, 0
%v.err = extractvalue {<4 x float>, i32} %v, 1
This re-submit of the change also includes a slight modification in
SIISelLowering.cpp to work-around a compiler bug for the powerpc_le
platform that caused a buildbot failure on a previous submission.
Differential revision: https://reviews.llvm.org/D48826
Change-Id: If222bc03642e76cf98059a6bef5d5bffeda38dda
Work around for ppcle compiler bug
Change-Id: Ie284cf24b2271215be1b9dc95b485fd15000e32b
llvm-svn: 351054
2019-01-14 19:55:24 +08:00
|
|
|
addPass(createSIAddIMGInitPass());
|
2015-02-12 01:11:51 +08:00
|
|
|
return false;
|
2012-12-12 05:25:42 +08:00
|
|
|
}
|
|
|
|
|
2016-04-15 03:09:28 +08:00
|
|
|
bool GCNPassConfig::addIRTranslator() {
|
|
|
|
addPass(new IRTranslator());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-07-23 04:03:43 +08:00
|
|
|
bool GCNPassConfig::addLegalizeMachineIR() {
|
Re-commit AMDGPU/GlobalISel: Add support for simple shaders
Fix build when global-isel is disabled and fix a warning.
Summary: We can select constant/global G_LOAD, global G_STORE, and G_GEP.
Reviewers: qcolombet, MatzeB, t.p.northover, ab, arsenm
Subscribers: mehdi_amini, vkalintiris, kzhuravl, wdng, nhaehnle, mgorny, yaxunl, tony-tye, modocache, llvm-commits, dberris
Differential Revision: https://reviews.llvm.org/D26730
llvm-svn: 293551
2017-01-31 05:56:46 +08:00
|
|
|
addPass(new Legalizer());
|
2016-07-23 04:03:43 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-04-15 03:09:28 +08:00
|
|
|
bool GCNPassConfig::addRegBankSelect() {
|
Re-commit AMDGPU/GlobalISel: Add support for simple shaders
Fix build when global-isel is disabled and fix a warning.
Summary: We can select constant/global G_LOAD, global G_STORE, and G_GEP.
Reviewers: qcolombet, MatzeB, t.p.northover, ab, arsenm
Subscribers: mehdi_amini, vkalintiris, kzhuravl, wdng, nhaehnle, mgorny, yaxunl, tony-tye, modocache, llvm-commits, dberris
Differential Revision: https://reviews.llvm.org/D26730
llvm-svn: 293551
2017-01-31 05:56:46 +08:00
|
|
|
addPass(new RegBankSelect());
|
2016-04-15 03:09:28 +08:00
|
|
|
return false;
|
|
|
|
}
|
2016-07-27 22:31:55 +08:00
|
|
|
|
|
|
|
bool GCNPassConfig::addGlobalInstructionSelect() {
|
Re-commit AMDGPU/GlobalISel: Add support for simple shaders
Fix build when global-isel is disabled and fix a warning.
Summary: We can select constant/global G_LOAD, global G_STORE, and G_GEP.
Reviewers: qcolombet, MatzeB, t.p.northover, ab, arsenm
Subscribers: mehdi_amini, vkalintiris, kzhuravl, wdng, nhaehnle, mgorny, yaxunl, tony-tye, modocache, llvm-commits, dberris
Differential Revision: https://reviews.llvm.org/D26730
llvm-svn: 293551
2017-01-31 05:56:46 +08:00
|
|
|
addPass(new InstructionSelect());
|
2016-07-27 22:31:55 +08:00
|
|
|
return false;
|
|
|
|
}
|
Re-commit AMDGPU/GlobalISel: Add support for simple shaders
Fix build when global-isel is disabled and fix a warning.
Summary: We can select constant/global G_LOAD, global G_STORE, and G_GEP.
Reviewers: qcolombet, MatzeB, t.p.northover, ab, arsenm
Subscribers: mehdi_amini, vkalintiris, kzhuravl, wdng, nhaehnle, mgorny, yaxunl, tony-tye, modocache, llvm-commits, dberris
Differential Revision: https://reviews.llvm.org/D26730
llvm-svn: 293551
2017-01-31 05:56:46 +08:00
|
|
|
|
2015-02-12 01:11:51 +08:00
|
|
|
void GCNPassConfig::addPreRegAlloc() {
|
2017-05-16 04:18:37 +08:00
|
|
|
if (LateCFGStructurize) {
|
|
|
|
addPass(createAMDGPUMachineCFGStructurizerPass());
|
|
|
|
}
|
2016-03-22 04:28:33 +08:00
|
|
|
addPass(createSIWholeQuadModePass());
|
2015-10-02 06:10:03 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void GCNPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) {
|
2016-08-23 03:33:16 +08:00
|
|
|
// FIXME: We have to disable the verifier here because of PHIElimination +
|
|
|
|
// TwoAddressInstructions disabling it.
|
2016-09-29 09:44:16 +08:00
|
|
|
|
|
|
|
// This must be run immediately after phi elimination and before
|
|
|
|
// TwoAddressInstructions, otherwise the processing of the tied operand of
|
|
|
|
// SI_ELSE will introduce a copy of the tied operand source after the else.
|
|
|
|
insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
|
2016-08-23 03:33:16 +08:00
|
|
|
|
[AMDGPU] Add support for Whole Wavefront Mode
Summary:
Whole Wavefront Wode (WWM) is similar to WQM, except that all of the
lanes are always enabled, regardless of control flow. This is required
for implementing wavefront reductions in non-uniform control flow, where
we need to use the inactive lanes to propagate intermediate results, so
they need to be enabled. We need to propagate WWM to uses (unless
they're explicitly marked as exact) so that they also propagate
intermediate results correctly. We do the analysis and exec mask munging
during the WQM pass, since there are interactions with WQM for things
that require both WQM and WWM. For simplicity, WWM is entirely
block-local -- blocks are never WWM on entry or exit of a block, and WWM
is not propagated to the block level. This means that computations
involving WWM cannot involve control flow, but we only ever plan to use
WWM for a few limited purposes (none of which involve control flow)
anyways.
Shaders can ask for WWM using the @llvm.amdgcn.wwm intrinsic. There
isn't yet a way to turn WWM off -- that will be added in a future
change.
Finally, it turns out that turning on inactive lanes causes a number of
problems with register allocation. While the best long-term solution
seems like teaching LLVM's register allocator about predication, for now
we need to add some hacks to prevent ourselves from getting into trouble
due to constraints that aren't currently expressed in LLVM. For the gory
details, see the comments at the top of SIFixWWMLiveness.cpp.
Reviewers: arsenm, nhaehnle, tpr
Subscribers: kzhuravl, wdng, mgorny, yaxunl, dstuttard, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D35524
llvm-svn: 310087
2017-08-05 02:36:52 +08:00
|
|
|
// This must be run after SILowerControlFlow, since it needs to use the
|
|
|
|
// machine-level CFG, but before register allocation.
|
|
|
|
insertPass(&SILowerControlFlowID, &SIFixWWMLivenessID, false);
|
|
|
|
|
2015-10-02 06:10:03 +08:00
|
|
|
TargetPassConfig::addFastRegAlloc(RegAllocPass);
|
|
|
|
}
|
|
|
|
|
|
|
|
void GCNPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
|
2017-08-08 02:12:48 +08:00
|
|
|
insertPass(&MachineSchedulerID, &SIOptimizeExecMaskingPreRAID);
|
2017-08-02 07:14:32 +08:00
|
|
|
|
2018-06-01 04:13:51 +08:00
|
|
|
insertPass(&SIOptimizeExecMaskingPreRAID, &SIFormMemoryClausesID);
|
|
|
|
|
2016-09-29 09:44:16 +08:00
|
|
|
// This must be run immediately after phi elimination and before
|
|
|
|
// TwoAddressInstructions, otherwise the processing of the tied operand of
|
|
|
|
// SI_ELSE will introduce a copy of the tied operand source after the else.
|
|
|
|
insertPass(&PHIEliminationID, &SILowerControlFlowID, false);
|
2016-08-23 03:33:16 +08:00
|
|
|
|
[AMDGPU] Add support for Whole Wavefront Mode
Summary:
Whole Wavefront Wode (WWM) is similar to WQM, except that all of the
lanes are always enabled, regardless of control flow. This is required
for implementing wavefront reductions in non-uniform control flow, where
we need to use the inactive lanes to propagate intermediate results, so
they need to be enabled. We need to propagate WWM to uses (unless
they're explicitly marked as exact) so that they also propagate
intermediate results correctly. We do the analysis and exec mask munging
during the WQM pass, since there are interactions with WQM for things
that require both WQM and WWM. For simplicity, WWM is entirely
block-local -- blocks are never WWM on entry or exit of a block, and WWM
is not propagated to the block level. This means that computations
involving WWM cannot involve control flow, but we only ever plan to use
WWM for a few limited purposes (none of which involve control flow)
anyways.
Shaders can ask for WWM using the @llvm.amdgcn.wwm intrinsic. There
isn't yet a way to turn WWM off -- that will be added in a future
change.
Finally, it turns out that turning on inactive lanes causes a number of
problems with register allocation. While the best long-term solution
seems like teaching LLVM's register allocator about predication, for now
we need to add some hacks to prevent ourselves from getting into trouble
due to constraints that aren't currently expressed in LLVM. For the gory
details, see the comments at the top of SIFixWWMLiveness.cpp.
Reviewers: arsenm, nhaehnle, tpr
Subscribers: kzhuravl, wdng, mgorny, yaxunl, dstuttard, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D35524
llvm-svn: 310087
2017-08-05 02:36:52 +08:00
|
|
|
// This must be run after SILowerControlFlow, since it needs to use the
|
|
|
|
// machine-level CFG, but before register allocation.
|
|
|
|
insertPass(&SILowerControlFlowID, &SIFixWWMLivenessID, false);
|
|
|
|
|
2015-10-02 06:10:03 +08:00
|
|
|
TargetPassConfig::addOptimizedRegAlloc(RegAllocPass);
|
2015-02-12 01:11:51 +08:00
|
|
|
}
|
|
|
|
|
2016-09-29 09:44:16 +08:00
|
|
|
void GCNPassConfig::addPostRegAlloc() {
|
2017-01-25 01:46:17 +08:00
|
|
|
addPass(&SIFixVGPRCopiesID);
|
2018-11-27 01:02:02 +08:00
|
|
|
if (getOptLevel() > CodeGenOpt::None)
|
|
|
|
addPass(&SIOptimizeExecMaskingID);
|
2016-09-29 09:44:16 +08:00
|
|
|
TargetPassConfig::addPostRegAlloc();
|
|
|
|
}
|
|
|
|
|
2015-02-12 01:11:51 +08:00
|
|
|
void GCNPassConfig::addPreSched2() {
|
|
|
|
}
|
|
|
|
|
|
|
|
void GCNPassConfig::addPreEmitPass() {
|
2018-07-16 18:02:41 +08:00
|
|
|
addPass(createSIMemoryLegalizerPass());
|
|
|
|
addPass(createSIInsertWaitcntsPass());
|
|
|
|
addPass(createSIShrinkInstructionsPass());
|
2018-12-10 20:06:10 +08:00
|
|
|
addPass(createSIModeRegisterPass());
|
2018-07-16 18:02:41 +08:00
|
|
|
|
2016-04-30 08:23:06 +08:00
|
|
|
// The hazard recognizer that runs as part of the post-ra scheduler does not
|
2016-06-29 00:59:53 +08:00
|
|
|
// guarantee to be able handle all hazards correctly. This is because if there
|
|
|
|
// are multiple scheduling regions in a basic block, the regions are scheduled
|
|
|
|
// bottom up, so when we begin to schedule a region we don't know what
|
|
|
|
// instructions were emitted directly before it.
|
2016-04-30 08:23:06 +08:00
|
|
|
//
|
2016-06-29 00:59:53 +08:00
|
|
|
// Here we add a stand-alone hazard recognizer pass which can handle all
|
|
|
|
// cases.
|
2018-07-16 18:02:41 +08:00
|
|
|
//
|
|
|
|
// FIXME: This stand-alone pass will emit indiv. S_NOP 0, as needed. It would
|
|
|
|
// be better for it to emit S_NOP <N> when possible.
|
2016-04-30 08:23:06 +08:00
|
|
|
addPass(&PostRAHazardRecognizerID);
|
|
|
|
|
2016-08-23 03:33:16 +08:00
|
|
|
addPass(&SIInsertSkipsPassID);
|
2016-10-07 00:20:41 +08:00
|
|
|
addPass(&BranchRelaxationPassID);
|
2015-02-12 01:11:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
TargetPassConfig *GCNTargetMachine::createPassConfig(PassManagerBase &PM) {
|
2017-05-31 05:36:41 +08:00
|
|
|
return new GCNPassConfig(*this, PM);
|
2012-12-12 05:25:42 +08:00
|
|
|
}
|