2012-12-12 05:25:42 +08:00
|
|
|
//===-- AMDGPU.h - MachineFunction passes hw codegen --------------*- C++ -*-=//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
/// \file
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2016-03-11 16:00:27 +08:00
|
|
|
#ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPU_H
|
|
|
|
#define LLVM_LIB_TARGET_AMDGPU_AMDGPU_H
|
2012-12-12 05:25:42 +08:00
|
|
|
|
2017-04-11 01:58:06 +08:00
|
|
|
#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
|
2016-10-04 02:47:26 +08:00
|
|
|
#include "llvm/Target/TargetMachine.h"
|
|
|
|
|
2012-12-12 05:25:42 +08:00
|
|
|
namespace llvm {
|
|
|
|
|
|
|
|
class AMDGPUTargetMachine;
|
2013-06-08 04:37:48 +08:00
|
|
|
class FunctionPass;
|
2016-07-20 07:16:53 +08:00
|
|
|
class GCNTargetMachine;
|
2016-08-12 03:18:50 +08:00
|
|
|
class ModulePass;
|
|
|
|
class Pass;
|
2013-06-08 04:37:48 +08:00
|
|
|
class Target;
|
|
|
|
class TargetMachine;
|
2016-08-12 03:18:50 +08:00
|
|
|
class PassRegistry;
|
2017-03-27 22:04:01 +08:00
|
|
|
class Module;
|
2012-12-12 05:25:42 +08:00
|
|
|
|
|
|
|
// R600 Passes
|
2017-05-19 01:21:13 +08:00
|
|
|
FunctionPass *createR600VectorRegMerger();
|
|
|
|
FunctionPass *createR600ExpandSpecialInstrsPass();
|
2013-12-12 01:51:41 +08:00
|
|
|
FunctionPass *createR600EmitClauseMarkers();
|
2017-05-19 01:21:13 +08:00
|
|
|
FunctionPass *createR600ClauseMergePass();
|
|
|
|
FunctionPass *createR600Packetizer();
|
|
|
|
FunctionPass *createR600ControlFlowFinalizer();
|
2013-12-12 01:51:47 +08:00
|
|
|
FunctionPass *createAMDGPUCFGStructurizerPass();
|
2017-08-08 12:57:55 +08:00
|
|
|
FunctionPass *createR600ISelDag(TargetMachine *TM, CodeGenOpt::Level OptLevel);
|
2012-12-12 05:25:42 +08:00
|
|
|
|
|
|
|
// SI Passes
|
2012-12-20 06:10:31 +08:00
|
|
|
FunctionPass *createSIAnnotateControlFlowPass();
|
2014-11-22 06:06:37 +08:00
|
|
|
FunctionPass *createSIFoldOperandsPass();
|
[ADMGPU] SDWA peephole optimization pass.
Summary:
First iteration of SDWA peephole.
This pass tries to combine several instruction into one SDWA instruction. E.g. it converts:
'''
V_LSHRREV_B32_e32 %vreg0, 16, %vreg1
V_ADD_I32_e32 %vreg2, %vreg0, %vreg3
V_LSHLREV_B32_e32 %vreg4, 16, %vreg2
'''
Into:
'''
V_ADD_I32_sdwa %vreg4, %vreg1, %vreg3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
'''
Pass structure:
1. Iterate over machine instruction in basic block and try to apply "SDWA patterns" to each of them. SDWA patterns match machine instruction into either source or destination SDWA operand. E.g. ''' V_LSHRREV_B32_e32 %vreg0, 16, %vreg1''' is matched to source SDWA operand '''%vreg1 src_sel:WORD_1'''.
2. Iterate over found SDWA operands and find instruction that could be potentially coverted into SDWA. E.g. for source SDWA operand potential instruction are all instruction in this basic block that uses '''%vreg0'''
3. Iterate over all potential instructions and check if they can be converted into SDWA.
4. Convert instructions to SDWA.
This review contains basic implementation of SDWA peephole pass. This pass requires additional testing fot both correctness and performance (no performance testing done).
There are several ways this pass can be improved:
1. Make this pass work on whole function not only basic block. As I can see this can be done right now without changes to pass.
2. Introduce more SDWA patterns
3. Introduce mnemonics to limit when SDWA patterns should apply
Reviewers: vpykhtin, alex-t, arsenm, rampitec
Subscribers: wdng, nhaehnle, mgorny
Differential Revision: https://reviews.llvm.org/D30038
llvm-svn: 298365
2017-03-21 20:51:34 +08:00
|
|
|
FunctionPass *createSIPeepholeSDWAPass();
|
2014-04-30 23:31:33 +08:00
|
|
|
FunctionPass *createSILowerI1CopiesPass();
|
2014-07-22 00:55:33 +08:00
|
|
|
FunctionPass *createSIShrinkInstructionsPass();
|
2017-05-19 01:21:13 +08:00
|
|
|
FunctionPass *createSILoadStoreOptimizerPass();
|
2016-03-22 04:28:33 +08:00
|
|
|
FunctionPass *createSIWholeQuadModePass();
|
2015-05-13 01:13:02 +08:00
|
|
|
FunctionPass *createSIFixControlFlowLiveIntervalsPass();
|
2017-08-02 07:14:32 +08:00
|
|
|
FunctionPass *createSIOptimizeExecMaskingPreRAPass();
|
2015-11-04 06:30:13 +08:00
|
|
|
FunctionPass *createSIFixSGPRCopiesPass();
|
2017-07-22 05:19:23 +08:00
|
|
|
FunctionPass *createSIMemoryLegalizerPass();
|
2016-05-11 02:33:41 +08:00
|
|
|
FunctionPass *createSIDebuggerInsertNopsPass();
|
2016-02-06 01:42:38 +08:00
|
|
|
FunctionPass *createSIInsertWaitsPass();
|
2017-04-12 11:25:12 +08:00
|
|
|
FunctionPass *createSIInsertWaitcntsPass();
|
[AMDGPU] Add support for Whole Wavefront Mode
Summary:
Whole Wavefront Wode (WWM) is similar to WQM, except that all of the
lanes are always enabled, regardless of control flow. This is required
for implementing wavefront reductions in non-uniform control flow, where
we need to use the inactive lanes to propagate intermediate results, so
they need to be enabled. We need to propagate WWM to uses (unless
they're explicitly marked as exact) so that they also propagate
intermediate results correctly. We do the analysis and exec mask munging
during the WQM pass, since there are interactions with WQM for things
that require both WQM and WWM. For simplicity, WWM is entirely
block-local -- blocks are never WWM on entry or exit of a block, and WWM
is not propagated to the block level. This means that computations
involving WWM cannot involve control flow, but we only ever plan to use
WWM for a few limited purposes (none of which involve control flow)
anyways.
Shaders can ask for WWM using the @llvm.amdgcn.wwm intrinsic. There
isn't yet a way to turn WWM off -- that will be added in a future
change.
Finally, it turns out that turning on inactive lanes causes a number of
problems with register allocation. While the best long-term solution
seems like teaching LLVM's register allocator about predication, for now
we need to add some hacks to prevent ourselves from getting into trouble
due to constraints that aren't currently expressed in LLVM. For the gory
details, see the comments at the top of SIFixWWMLiveness.cpp.
Reviewers: arsenm, nhaehnle, tpr
Subscribers: kzhuravl, wdng, mgorny, yaxunl, dstuttard, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D35524
llvm-svn: 310087
2017-08-05 02:36:52 +08:00
|
|
|
FunctionPass *createSIFixWWMLivenessPass();
|
2017-08-12 00:42:09 +08:00
|
|
|
FunctionPass *createAMDGPUSimplifyLibCallsPass();
|
|
|
|
FunctionPass *createAMDGPUUseNativeCallsPass();
|
2017-05-19 01:21:13 +08:00
|
|
|
FunctionPass *createAMDGPUCodeGenPreparePass();
|
2017-05-16 04:18:37 +08:00
|
|
|
FunctionPass *createAMDGPUMachineCFGStructurizerPass();
|
2017-07-29 02:40:05 +08:00
|
|
|
FunctionPass *createAMDGPURewriteOutArgumentsPass();
|
2017-05-16 04:18:37 +08:00
|
|
|
|
2017-08-04 06:30:46 +08:00
|
|
|
void initializeAMDGPUDAGToDAGISelPass(PassRegistry&);
|
|
|
|
|
2017-05-16 04:18:37 +08:00
|
|
|
void initializeAMDGPUMachineCFGStructurizerPass(PassRegistry&);
|
|
|
|
extern char &AMDGPUMachineCFGStructurizerID;
|
2012-12-12 05:25:42 +08:00
|
|
|
|
2017-06-03 02:02:42 +08:00
|
|
|
void initializeAMDGPUAlwaysInlinePass(PassRegistry&);
|
|
|
|
|
2017-07-14 05:43:42 +08:00
|
|
|
Pass *createAMDGPUAnnotateKernelFeaturesPass();
|
2015-11-07 02:01:57 +08:00
|
|
|
void initializeAMDGPUAnnotateKernelFeaturesPass(PassRegistry &);
|
|
|
|
extern char &AMDGPUAnnotateKernelFeaturesID;
|
|
|
|
|
2017-05-19 01:21:13 +08:00
|
|
|
ModulePass *createAMDGPULowerIntrinsicsPass();
|
2017-02-10 06:00:42 +08:00
|
|
|
void initializeAMDGPULowerIntrinsicsPass(PassRegistry &);
|
|
|
|
extern char &AMDGPULowerIntrinsicsID;
|
|
|
|
|
2017-07-29 02:40:05 +08:00
|
|
|
void initializeAMDGPURewriteOutArgumentsPass(PassRegistry &);
|
|
|
|
extern char &AMDGPURewriteOutArgumentsID;
|
|
|
|
|
2017-08-03 06:19:45 +08:00
|
|
|
void initializeR600ClauseMergePassPass(PassRegistry &);
|
|
|
|
extern char &R600ClauseMergePassID;
|
|
|
|
|
|
|
|
void initializeR600ControlFlowFinalizerPass(PassRegistry &);
|
|
|
|
extern char &R600ControlFlowFinalizerID;
|
|
|
|
|
|
|
|
void initializeR600ExpandSpecialInstrsPassPass(PassRegistry &);
|
|
|
|
extern char &R600ExpandSpecialInstrsPassID;
|
|
|
|
|
|
|
|
void initializeR600VectorRegMergerPass(PassRegistry &);
|
|
|
|
extern char &R600VectorRegMergerID;
|
|
|
|
|
|
|
|
void initializeR600PacketizerPass(PassRegistry &);
|
|
|
|
extern char &R600PacketizerID;
|
|
|
|
|
2014-11-22 06:06:37 +08:00
|
|
|
void initializeSIFoldOperandsPass(PassRegistry &);
|
|
|
|
extern char &SIFoldOperandsID;
|
|
|
|
|
[ADMGPU] SDWA peephole optimization pass.
Summary:
First iteration of SDWA peephole.
This pass tries to combine several instruction into one SDWA instruction. E.g. it converts:
'''
V_LSHRREV_B32_e32 %vreg0, 16, %vreg1
V_ADD_I32_e32 %vreg2, %vreg0, %vreg3
V_LSHLREV_B32_e32 %vreg4, 16, %vreg2
'''
Into:
'''
V_ADD_I32_sdwa %vreg4, %vreg1, %vreg3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
'''
Pass structure:
1. Iterate over machine instruction in basic block and try to apply "SDWA patterns" to each of them. SDWA patterns match machine instruction into either source or destination SDWA operand. E.g. ''' V_LSHRREV_B32_e32 %vreg0, 16, %vreg1''' is matched to source SDWA operand '''%vreg1 src_sel:WORD_1'''.
2. Iterate over found SDWA operands and find instruction that could be potentially coverted into SDWA. E.g. for source SDWA operand potential instruction are all instruction in this basic block that uses '''%vreg0'''
3. Iterate over all potential instructions and check if they can be converted into SDWA.
4. Convert instructions to SDWA.
This review contains basic implementation of SDWA peephole pass. This pass requires additional testing fot both correctness and performance (no performance testing done).
There are several ways this pass can be improved:
1. Make this pass work on whole function not only basic block. As I can see this can be done right now without changes to pass.
2. Introduce more SDWA patterns
3. Introduce mnemonics to limit when SDWA patterns should apply
Reviewers: vpykhtin, alex-t, arsenm, rampitec
Subscribers: wdng, nhaehnle, mgorny
Differential Revision: https://reviews.llvm.org/D30038
llvm-svn: 298365
2017-03-21 20:51:34 +08:00
|
|
|
void initializeSIPeepholeSDWAPass(PassRegistry &);
|
|
|
|
extern char &SIPeepholeSDWAID;
|
|
|
|
|
2016-06-10 07:18:47 +08:00
|
|
|
void initializeSIShrinkInstructionsPass(PassRegistry&);
|
|
|
|
extern char &SIShrinkInstructionsID;
|
|
|
|
|
2015-11-04 06:30:13 +08:00
|
|
|
void initializeSIFixSGPRCopiesPass(PassRegistry &);
|
|
|
|
extern char &SIFixSGPRCopiesID;
|
|
|
|
|
2017-01-25 01:46:17 +08:00
|
|
|
void initializeSIFixVGPRCopiesPass(PassRegistry &);
|
|
|
|
extern char &SIFixVGPRCopiesID;
|
|
|
|
|
2014-04-30 23:31:33 +08:00
|
|
|
void initializeSILowerI1CopiesPass(PassRegistry &);
|
|
|
|
extern char &SILowerI1CopiesID;
|
|
|
|
|
2014-10-11 06:01:59 +08:00
|
|
|
void initializeSILoadStoreOptimizerPass(PassRegistry &);
|
|
|
|
extern char &SILoadStoreOptimizerID;
|
|
|
|
|
2016-03-22 04:28:33 +08:00
|
|
|
void initializeSIWholeQuadModePass(PassRegistry &);
|
|
|
|
extern char &SIWholeQuadModeID;
|
|
|
|
|
2016-02-12 10:16:10 +08:00
|
|
|
void initializeSILowerControlFlowPass(PassRegistry &);
|
2016-08-23 03:33:16 +08:00
|
|
|
extern char &SILowerControlFlowID;
|
2016-02-12 10:16:10 +08:00
|
|
|
|
2016-08-23 03:33:16 +08:00
|
|
|
void initializeSIInsertSkipsPass(PassRegistry &);
|
|
|
|
extern char &SIInsertSkipsPassID;
|
2016-02-12 10:16:10 +08:00
|
|
|
|
2016-09-29 09:44:16 +08:00
|
|
|
void initializeSIOptimizeExecMaskingPass(PassRegistry &);
|
|
|
|
extern char &SIOptimizeExecMaskingID;
|
|
|
|
|
[AMDGPU] Add support for Whole Wavefront Mode
Summary:
Whole Wavefront Wode (WWM) is similar to WQM, except that all of the
lanes are always enabled, regardless of control flow. This is required
for implementing wavefront reductions in non-uniform control flow, where
we need to use the inactive lanes to propagate intermediate results, so
they need to be enabled. We need to propagate WWM to uses (unless
they're explicitly marked as exact) so that they also propagate
intermediate results correctly. We do the analysis and exec mask munging
during the WQM pass, since there are interactions with WQM for things
that require both WQM and WWM. For simplicity, WWM is entirely
block-local -- blocks are never WWM on entry or exit of a block, and WWM
is not propagated to the block level. This means that computations
involving WWM cannot involve control flow, but we only ever plan to use
WWM for a few limited purposes (none of which involve control flow)
anyways.
Shaders can ask for WWM using the @llvm.amdgcn.wwm intrinsic. There
isn't yet a way to turn WWM off -- that will be added in a future
change.
Finally, it turns out that turning on inactive lanes causes a number of
problems with register allocation. While the best long-term solution
seems like teaching LLVM's register allocator about predication, for now
we need to add some hacks to prevent ourselves from getting into trouble
due to constraints that aren't currently expressed in LLVM. For the gory
details, see the comments at the top of SIFixWWMLiveness.cpp.
Reviewers: arsenm, nhaehnle, tpr
Subscribers: kzhuravl, wdng, mgorny, yaxunl, dstuttard, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D35524
llvm-svn: 310087
2017-08-05 02:36:52 +08:00
|
|
|
void initializeSIFixWWMLivenessPass(PassRegistry &);
|
|
|
|
extern char &SIFixWWMLivenessID;
|
|
|
|
|
2017-08-12 00:42:09 +08:00
|
|
|
void initializeAMDGPUSimplifyLibCallsPass(PassRegistry &);
|
|
|
|
extern char &AMDGPUSimplifyLibCallsID;
|
|
|
|
|
|
|
|
void initializeAMDGPUUseNativeCallsPass(PassRegistry &);
|
|
|
|
extern char &AMDGPUUseNativeCallsID;
|
|
|
|
|
2012-12-12 05:25:42 +08:00
|
|
|
// Passes common to R600 and SI
|
2017-05-19 01:21:13 +08:00
|
|
|
FunctionPass *createAMDGPUPromoteAlloca();
|
2016-01-30 13:19:45 +08:00
|
|
|
void initializeAMDGPUPromoteAllocaPass(PassRegistry&);
|
|
|
|
extern char &AMDGPUPromoteAllocaID;
|
|
|
|
|
2012-12-20 06:10:31 +08:00
|
|
|
Pass *createAMDGPUStructurizeCFGPass();
|
2017-08-04 06:30:46 +08:00
|
|
|
FunctionPass *createAMDGPUISelDag(
|
|
|
|
TargetMachine *TM = nullptr,
|
|
|
|
CodeGenOpt::Level OptLevel = CodeGenOpt::Default);
|
2017-03-31 04:16:02 +08:00
|
|
|
ModulePass *createAMDGPUAlwaysInlinePass(bool GlobalOpt = true);
|
2015-08-08 07:19:30 +08:00
|
|
|
ModulePass *createAMDGPUOpenCLImageTypeLoweringPass();
|
2015-12-16 04:55:55 +08:00
|
|
|
FunctionPass *createAMDGPUAnnotateUniformValues();
|
2013-06-08 04:37:48 +08:00
|
|
|
|
2017-01-28 00:38:10 +08:00
|
|
|
ModulePass* createAMDGPUUnifyMetadataPass();
|
2016-12-09 03:46:04 +08:00
|
|
|
void initializeAMDGPUUnifyMetadataPass(PassRegistry&);
|
|
|
|
extern char &AMDGPUUnifyMetadataID;
|
|
|
|
|
2017-08-02 07:14:32 +08:00
|
|
|
void initializeSIOptimizeExecMaskingPreRAPass(PassRegistry&);
|
|
|
|
extern char &SIOptimizeExecMaskingPreRAID;
|
|
|
|
|
2015-12-16 04:55:55 +08:00
|
|
|
void initializeAMDGPUAnnotateUniformValuesPass(PassRegistry&);
|
|
|
|
extern char &AMDGPUAnnotateUniformValuesPassID;
|
2014-07-03 04:53:48 +08:00
|
|
|
|
2016-06-24 15:07:55 +08:00
|
|
|
void initializeAMDGPUCodeGenPreparePass(PassRegistry&);
|
|
|
|
extern char &AMDGPUCodeGenPrepareID;
|
|
|
|
|
2016-01-20 23:48:27 +08:00
|
|
|
void initializeSIAnnotateControlFlowPass(PassRegistry&);
|
|
|
|
extern char &SIAnnotateControlFlowPassID;
|
|
|
|
|
2017-07-22 05:19:23 +08:00
|
|
|
void initializeSIMemoryLegalizerPass(PassRegistry&);
|
|
|
|
extern char &SIMemoryLegalizerID;
|
|
|
|
|
2016-05-11 02:33:41 +08:00
|
|
|
void initializeSIDebuggerInsertNopsPass(PassRegistry&);
|
|
|
|
extern char &SIDebuggerInsertNopsID;
|
2016-03-03 11:53:29 +08:00
|
|
|
|
2016-02-06 01:42:38 +08:00
|
|
|
void initializeSIInsertWaitsPass(PassRegistry&);
|
|
|
|
extern char &SIInsertWaitsID;
|
|
|
|
|
2017-04-12 11:25:12 +08:00
|
|
|
void initializeSIInsertWaitcntsPass(PassRegistry&);
|
|
|
|
extern char &SIInsertWaitcntsID;
|
|
|
|
|
2017-03-25 03:52:05 +08:00
|
|
|
void initializeAMDGPUUnifyDivergentExitNodesPass(PassRegistry&);
|
|
|
|
extern char &AMDGPUUnifyDivergentExitNodesID;
|
|
|
|
|
2017-03-18 07:56:58 +08:00
|
|
|
ImmutablePass *createAMDGPUAAWrapperPass();
|
|
|
|
void initializeAMDGPUAAWrapperPassPass(PassRegistry&);
|
|
|
|
|
2017-08-04 06:30:46 +08:00
|
|
|
void initializeAMDGPUArgumentUsageInfoPass(PassRegistry &);
|
|
|
|
|
2017-09-20 12:25:58 +08:00
|
|
|
Pass *createAMDGPUFunctionInliningPass();
|
|
|
|
void initializeAMDGPUInlinerPass(PassRegistry&);
|
|
|
|
|
2016-10-10 07:00:34 +08:00
|
|
|
Target &getTheAMDGPUTarget();
|
|
|
|
Target &getTheGCNTarget();
|
2012-12-12 05:25:42 +08:00
|
|
|
|
2014-07-21 22:01:14 +08:00
|
|
|
namespace AMDGPU {
|
|
|
|
enum TargetIndex {
|
2015-01-21 01:49:47 +08:00
|
|
|
TI_CONSTDATA_START,
|
|
|
|
TI_SCRATCH_RSRC_DWORD0,
|
|
|
|
TI_SCRATCH_RSRC_DWORD1,
|
|
|
|
TI_SCRATCH_RSRC_DWORD2,
|
|
|
|
TI_SCRATCH_RSRC_DWORD3
|
2014-07-21 22:01:14 +08:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2012-12-12 05:25:42 +08:00
|
|
|
} // End namespace llvm
|
|
|
|
|
2013-06-08 04:37:48 +08:00
|
|
|
/// OpenCL uses address spaces to differentiate between
|
|
|
|
/// various memory regions on the hardware. On the CPU
|
|
|
|
/// all of the address spaces point to the same memory,
|
|
|
|
/// however on the GPU, each address space points to
|
2014-01-25 01:20:08 +08:00
|
|
|
/// a separate piece of memory that is unique from other
|
2013-06-08 04:37:48 +08:00
|
|
|
/// memory locations.
|
2017-03-27 22:04:01 +08:00
|
|
|
struct AMDGPUAS {
|
|
|
|
// The following address space values depend on the triple environment.
|
|
|
|
unsigned PRIVATE_ADDRESS; ///< Address space for private memory.
|
|
|
|
unsigned FLAT_ADDRESS; ///< Address space for flat memory.
|
|
|
|
unsigned REGION_ADDRESS; ///< Address space for region memory.
|
|
|
|
|
|
|
|
// The maximum value for flat, generic, local, private, constant and region.
|
|
|
|
const static unsigned MAX_COMMON_ADDRESS = 5;
|
|
|
|
|
|
|
|
const static unsigned GLOBAL_ADDRESS = 1; ///< Address space for global memory (RAT0, VTX0).
|
2017-04-07 03:17:32 +08:00
|
|
|
const static unsigned CONSTANT_ADDRESS = 2; ///< Address space for constant memory (VTX2)
|
2017-03-27 22:04:01 +08:00
|
|
|
const static unsigned LOCAL_ADDRESS = 3; ///< Address space for local memory.
|
|
|
|
const static unsigned PARAM_D_ADDRESS = 6; ///< Address space for direct addressible parameter memory (CONST0)
|
|
|
|
const static unsigned PARAM_I_ADDRESS = 7; ///< Address space for indirect addressible parameter memory (VTX1)
|
2013-07-23 09:48:18 +08:00
|
|
|
|
|
|
|
// Do not re-order the CONSTANT_BUFFER_* enums. Several places depend on this
|
|
|
|
// order to be able to dynamically index a constant buffer, for example:
|
|
|
|
//
|
|
|
|
// ConstantBufferAS = CONSTANT_BUFFER_0 + CBIdx
|
|
|
|
|
2017-03-27 22:04:01 +08:00
|
|
|
const static unsigned CONSTANT_BUFFER_0 = 8;
|
|
|
|
const static unsigned CONSTANT_BUFFER_1 = 9;
|
|
|
|
const static unsigned CONSTANT_BUFFER_2 = 10;
|
|
|
|
const static unsigned CONSTANT_BUFFER_3 = 11;
|
|
|
|
const static unsigned CONSTANT_BUFFER_4 = 12;
|
|
|
|
const static unsigned CONSTANT_BUFFER_5 = 13;
|
|
|
|
const static unsigned CONSTANT_BUFFER_6 = 14;
|
|
|
|
const static unsigned CONSTANT_BUFFER_7 = 15;
|
|
|
|
const static unsigned CONSTANT_BUFFER_8 = 16;
|
|
|
|
const static unsigned CONSTANT_BUFFER_9 = 17;
|
|
|
|
const static unsigned CONSTANT_BUFFER_10 = 18;
|
|
|
|
const static unsigned CONSTANT_BUFFER_11 = 19;
|
|
|
|
const static unsigned CONSTANT_BUFFER_12 = 20;
|
|
|
|
const static unsigned CONSTANT_BUFFER_13 = 21;
|
|
|
|
const static unsigned CONSTANT_BUFFER_14 = 22;
|
|
|
|
const static unsigned CONSTANT_BUFFER_15 = 23;
|
2015-06-05 00:17:42 +08:00
|
|
|
|
|
|
|
// Some places use this if the address space can't be determined.
|
2017-03-27 22:04:01 +08:00
|
|
|
const static unsigned UNKNOWN_ADDRESS_SPACE = ~0u;
|
2013-06-08 04:37:48 +08:00
|
|
|
};
|
|
|
|
|
2017-03-27 22:04:01 +08:00
|
|
|
namespace llvm {
|
|
|
|
namespace AMDGPU {
|
|
|
|
AMDGPUAS getAMDGPUAS(const Module &M);
|
|
|
|
AMDGPUAS getAMDGPUAS(const TargetMachine &TM);
|
|
|
|
AMDGPUAS getAMDGPUAS(Triple T);
|
|
|
|
} // namespace AMDGPU
|
|
|
|
} // namespace llvm
|
2013-06-08 04:37:48 +08:00
|
|
|
|
2014-08-14 00:26:38 +08:00
|
|
|
#endif
|