2012-12-12 05:25:42 +08:00
|
|
|
//===-- SIISelLowering.h - SI DAG Lowering Interface ------------*- C++ -*-===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
/// \file
|
2018-05-01 23:54:18 +08:00
|
|
|
/// SI DAG Lowering interface definition
|
2012-12-12 05:25:42 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2016-03-11 16:00:27 +08:00
|
|
|
#ifndef LLVM_LIB_TARGET_AMDGPU_SIISELLOWERING_H
|
|
|
|
#define LLVM_LIB_TARGET_AMDGPU_SIISELLOWERING_H
|
2012-12-12 05:25:42 +08:00
|
|
|
|
|
|
|
#include "AMDGPUISelLowering.h"
|
2017-08-04 07:00:29 +08:00
|
|
|
#include "AMDGPUArgumentUsageInfo.h"
|
2012-12-12 05:25:42 +08:00
|
|
|
#include "SIInstrInfo.h"
|
|
|
|
|
|
|
|
namespace llvm {
|
|
|
|
|
2016-03-11 16:00:27 +08:00
|
|
|
class SITargetLowering final : public AMDGPUTargetLowering {
|
AMDGPU: Separate R600 and GCN TableGen files
Summary:
We now have two sets of generated TableGen files, one for R600 and one
for GCN, so each sub-target now has its own tables of instructions,
registers, ISel patterns, etc. This should help reduce compile time
since each sub-target now only has to consider information that
is specific to itself. This will also help prevent the R600
sub-target from slowing down new features for GCN, like disassembler
support, GlobalISel, etc.
Reviewers: arsenm, nhaehnle, jvesely
Reviewed By: arsenm
Subscribers: MatzeB, kzhuravl, wdng, mgorny, yaxunl, dstuttard, tpr, t-tye, javed.absar, llvm-commits
Differential Revision: https://reviews.llvm.org/D46365
llvm-svn: 335942
2018-06-29 07:47:12 +08:00
|
|
|
private:
|
2018-07-12 04:59:01 +08:00
|
|
|
const GCNSubtarget *Subtarget;
|
AMDGPU: Separate R600 and GCN TableGen files
Summary:
We now have two sets of generated TableGen files, one for R600 and one
for GCN, so each sub-target now has its own tables of instructions,
registers, ISel patterns, etc. This should help reduce compile time
since each sub-target now only has to consider information that
is specific to itself. This will also help prevent the R600
sub-target from slowing down new features for GCN, like disassembler
support, GlobalISel, etc.
Reviewers: arsenm, nhaehnle, jvesely
Reviewed By: arsenm
Subscribers: MatzeB, kzhuravl, wdng, mgorny, yaxunl, dstuttard, tpr, t-tye, javed.absar, llvm-commits
Differential Revision: https://reviews.llvm.org/D46365
llvm-svn: 335942
2018-06-29 07:47:12 +08:00
|
|
|
|
2017-04-12 06:29:24 +08:00
|
|
|
SDValue lowerKernArgParameterPtr(SelectionDAG &DAG, const SDLoc &SL,
|
|
|
|
SDValue Chain, uint64_t Offset) const;
|
2017-07-28 23:52:08 +08:00
|
|
|
SDValue getImplicitArgPtr(SelectionDAG &DAG, const SDLoc &SL) const;
|
2017-04-12 06:29:24 +08:00
|
|
|
SDValue lowerKernargMemParameter(SelectionDAG &DAG, EVT VT, EVT MemVT,
|
|
|
|
const SDLoc &SL, SDValue Chain,
|
2018-05-31 00:17:51 +08:00
|
|
|
uint64_t Offset, unsigned Align, bool Signed,
|
2017-04-12 06:29:24 +08:00
|
|
|
const ISD::InputArg *Arg = nullptr) const;
|
|
|
|
|
2017-05-18 05:56:25 +08:00
|
|
|
SDValue lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA,
|
|
|
|
const SDLoc &SL, SDValue Chain,
|
|
|
|
const ISD::InputArg &Arg) const;
|
2017-08-04 07:00:29 +08:00
|
|
|
SDValue getPreloadedValue(SelectionDAG &DAG,
|
|
|
|
const SIMachineFunctionInfo &MFI,
|
|
|
|
EVT VT,
|
|
|
|
AMDGPUFunctionArgInfo::PreloadedValue) const;
|
2017-05-18 05:56:25 +08:00
|
|
|
|
2016-06-15 04:29:59 +08:00
|
|
|
SDValue LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op,
|
|
|
|
SelectionDAG &DAG) const override;
|
2015-12-01 05:15:45 +08:00
|
|
|
SDValue lowerImplicitZextParam(SelectionDAG &DAG, SDValue Op,
|
|
|
|
MVT VT, unsigned Offset) const;
|
AMDGPU: Select MIMG instructions manually in SITargetLowering
Summary:
Having TableGen patterns for image intrinsics is hitting limitations:
for D16 we already have to manually pre-lower the packing of data
values, and we will have to do the same for A16 eventually.
Since there is already some custom C++ code anyway, it is arguably easier
to just do everything in C++, now that we can use the beefed-up generic
tables backend of TableGen to provide all the required metadata and map
intrinsics to corresponding opcodes. With this approach, all image
intrinsic lowering happens in SITargetLowering::lowerImage. That code is
dense due to all the cases that it handles, but it should still be easier
to follow than what we had before, by virtue of it all being done in a
single location, and by virtue of not relying on the TableGen pattern
magic that very few people really understand.
This means that we will have MachineSDNodes with MIMG instructions
during DAG combining, but that seems alright: previously we had
intrinsic nodes instead, but those are similarly opaque to the generic
CodeGen infrastructure, and the final pattern matching just did a 1:1
translation to machine instructions anyway. If anything, the fact that
we now merge the address words into a vector before DAG combine should
be an advantage.
Change-Id: I417f26bd88f54ce9781c1668acc01f3f99774de6
Reviewers: arsenm, rampitec, rtaylor, tstellar
Subscribers: kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D48017
llvm-svn: 335228
2018-06-21 21:36:57 +08:00
|
|
|
SDValue lowerImage(SDValue Op, const AMDGPU::ImageDimIntrinsicInfo *Intr,
|
|
|
|
SelectionDAG &DAG) const;
|
2015-12-01 05:15:45 +08:00
|
|
|
|
2014-07-26 14:23:37 +08:00
|
|
|
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
|
2016-04-12 22:05:04 +08:00
|
|
|
SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
|
2014-07-26 14:23:37 +08:00
|
|
|
SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
|
AMDGPU: Try a lot harder to emit scalar loads
This has two main components. First, widen
widen short constant loads in DAG when they have
the correct alignment. This is already done a bit in
AMDGPUCodeGenPrepare, since that has access to
DivergenceAnalysis. This can't help kernarg loads
created in the DAG. Start to use DAG divergence analysis
to help this case.
The second part is to avoid kernel argument lowering
breaking the alignment of short vector elements because
calling convention lowering wants to split everything
into legal register types.
When loading a split type, load the nearest 4-byte aligned
segment and shift to get the desired bits. This extra
load of the earlier argument piece ends up merging,
and the bit extract hopefully folds out.
There are a number of improvements and regressions with
this, but I think as-is this is a better compromise between
several of the worst parts of SelectionDAG.
Particularly when i16 is legal, this produces worse code
for i8 and i16 element vector kernel arguments. This is
partially due to the very weak load merging the DAG does.
It only looks for fairly specific combines between pairs
of loads which no longer appear. In particular this
causes v4i16 loads to be split into 2 components when
previously the two halves were merged.
Worse, because of the newly introduced shifts, there
is a lot more unnecessary vector packing and unpacking code
emitted. At least some of this is due to reporting
false for isTypeDesirableForOp for i16 as a workaround for
the lack of divergence information in the DAG. The cases
where this happens it doesn't actually matter, but the
relevant code in SimplifyDemandedBits doens't have the context
to know to ignore this.
The use of the scalar cache is probably more important
than the mess of mostly scalar instructions doing this packing
and unpacking. Future work can fix this, possibly by making better
use of the new DAG divergence information for controlling promotion
decisions, or adding another version of shift + trunc + shift
combines that doesn't only know about the used types.
llvm-svn: 334180
2018-06-07 17:54:49 +08:00
|
|
|
|
|
|
|
SDValue widenLoad(LoadSDNode *Ld, DAGCombinerInfo &DCI) const;
|
2013-11-14 07:36:50 +08:00
|
|
|
SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
|
2014-02-05 01:18:40 +08:00
|
|
|
SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
|
2016-07-20 07:16:53 +08:00
|
|
|
SDValue lowerFastUnsafeFDIV(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const;
|
2016-12-22 11:05:41 +08:00
|
|
|
SDValue LowerFDIV16(SDValue Op, SelectionDAG &DAG) const;
|
2014-07-16 04:18:31 +08:00
|
|
|
SDValue LowerFDIV32(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerFDIV64(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue LowerFDIV(SDValue Op, SelectionDAG &DAG) const;
|
2014-10-04 07:54:41 +08:00
|
|
|
SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG, bool Signed) const;
|
2013-11-14 07:36:50 +08:00
|
|
|
SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
|
2014-07-20 02:44:39 +08:00
|
|
|
SDValue LowerTrig(SDValue Op, SelectionDAG &DAG) const;
|
AMDGPU: Implement {BUFFER,FLAT}_ATOMIC_CMPSWAP{,_X2}
Summary:
Implement BUFFER_ATOMIC_CMPSWAP{,_X2} instructions on all GCN targets, and FLAT_ATOMIC_CMPSWAP{,_X2} on CI+.
32-bit instruction variants tested manually on Kabini and Bonaire. Tests and parts of code provided by Jan Veselý.
Patch by: Vedran Miletić
Reviewers: arsenm, tstellarAMD, nhaehnle
Subscribers: jvesely, scchan, kanarayan, arsenm
Differential Revision: http://reviews.llvm.org/D17280
llvm-svn: 265170
2016-04-02 02:27:37 +08:00
|
|
|
SDValue LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
|
2012-12-20 06:10:31 +08:00
|
|
|
SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
|
2012-12-12 05:25:42 +08:00
|
|
|
|
2018-05-22 14:32:10 +08:00
|
|
|
SDValue adjustLoadValueType(unsigned Opcode, MemSDNode *M,
|
|
|
|
SelectionDAG &DAG,
|
|
|
|
bool IsIntrinsic = false) const;
|
|
|
|
|
2018-01-13 05:12:19 +08:00
|
|
|
SDValue handleD16VData(SDValue VData, SelectionDAG &DAG) const;
|
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Converts \p Op, which must be of floating point type, to the
|
2016-11-13 15:01:11 +08:00
|
|
|
/// floating point type \p VT, by either extending or truncating it.
|
|
|
|
SDValue getFPExtOrFPTrunc(SelectionDAG &DAG,
|
|
|
|
SDValue Op,
|
|
|
|
const SDLoc &DL,
|
|
|
|
EVT VT) const;
|
|
|
|
|
2017-04-12 06:29:24 +08:00
|
|
|
SDValue convertArgType(
|
|
|
|
SelectionDAG &DAG, EVT VT, EVT MemVT, const SDLoc &SL, SDValue Val,
|
|
|
|
bool Signed, const ISD::InputArg *Arg = nullptr) const;
|
|
|
|
|
2018-05-01 23:54:18 +08:00
|
|
|
/// Custom lowering for ISD::FP_ROUND for MVT::f16.
|
2016-11-17 12:28:37 +08:00
|
|
|
SDValue lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
|
2017-04-07 07:02:33 +08:00
|
|
|
SDValue getSegmentAperture(unsigned AS, const SDLoc &DL,
|
|
|
|
SelectionDAG &DAG) const;
|
|
|
|
|
2016-04-26 03:27:24 +08:00
|
|
|
SDValue lowerADDRSPACECAST(SDValue Op, SelectionDAG &DAG) const;
|
2017-01-24 07:09:58 +08:00
|
|
|
SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
|
2018-05-16 19:47:30 +08:00
|
|
|
SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
|
2016-06-18 06:27:03 +08:00
|
|
|
SDValue lowerTRAP(SDValue Op, SelectionDAG &DAG) const;
|
2018-05-17 00:19:34 +08:00
|
|
|
SDValue lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const;
|
2016-04-26 03:27:24 +08:00
|
|
|
|
2017-12-05 06:18:27 +08:00
|
|
|
SDNode *adjustWritemask(MachineSDNode *&N, SelectionDAG &DAG) const;
|
2013-04-10 16:39:08 +08:00
|
|
|
|
2015-01-14 09:35:22 +08:00
|
|
|
SDValue performUCharToFloatCombine(SDNode *N,
|
|
|
|
DAGCombinerInfo &DCI) const;
|
2014-08-16 01:49:05 +08:00
|
|
|
SDValue performSHLPtrCombine(SDNode *N,
|
|
|
|
unsigned AS,
|
2017-11-13 13:11:54 +08:00
|
|
|
EVT MemVT,
|
2014-08-16 01:49:05 +08:00
|
|
|
DAGCombinerInfo &DCI) const;
|
2016-09-14 23:19:03 +08:00
|
|
|
|
2016-12-22 11:44:42 +08:00
|
|
|
SDValue performMemSDNodeCombine(MemSDNode *N, DAGCombinerInfo &DCI) const;
|
|
|
|
|
2016-09-14 23:19:03 +08:00
|
|
|
SDValue splitBinaryBitConstantOp(DAGCombinerInfo &DCI, const SDLoc &SL,
|
|
|
|
unsigned Opc, SDValue LHS,
|
|
|
|
const ConstantSDNode *CRHS) const;
|
|
|
|
|
2015-01-07 07:00:46 +08:00
|
|
|
SDValue performAndCombine(SDNode *N, DAGCombinerInfo &DCI) const;
|
2015-01-07 07:00:39 +08:00
|
|
|
SDValue performOrCombine(SDNode *N, DAGCombinerInfo &DCI) const;
|
2016-09-14 23:19:03 +08:00
|
|
|
SDValue performXorCombine(SDNode *N, DAGCombinerInfo &DCI) const;
|
2017-04-01 03:53:03 +08:00
|
|
|
SDValue performZeroExtendCombine(SDNode *N, DAGCombinerInfo &DCI) const;
|
2015-01-07 07:00:39 +08:00
|
|
|
SDValue performClassCombine(SDNode *N, DAGCombinerInfo &DCI) const;
|
2016-04-14 09:42:16 +08:00
|
|
|
SDValue performFCanonicalizeCombine(SDNode *N, DAGCombinerInfo &DCI) const;
|
2014-06-12 01:50:44 +08:00
|
|
|
|
2017-02-22 07:35:48 +08:00
|
|
|
SDValue performFPMed3ImmCombine(SelectionDAG &DAG, const SDLoc &SL,
|
|
|
|
SDValue Op0, SDValue Op1) const;
|
2017-02-28 06:40:39 +08:00
|
|
|
SDValue performIntMed3ImmCombine(SelectionDAG &DAG, const SDLoc &SL,
|
|
|
|
SDValue Op0, SDValue Op1, bool Signed) const;
|
2016-01-29 04:53:42 +08:00
|
|
|
SDValue performMinMaxCombine(SDNode *N, DAGCombinerInfo &DCI) const;
|
2017-02-22 07:35:48 +08:00
|
|
|
SDValue performFMed3Combine(SDNode *N, DAGCombinerInfo &DCI) const;
|
2017-02-22 08:27:34 +08:00
|
|
|
SDValue performCvtPkRTZCombine(SDNode *N, DAGCombinerInfo &DCI) const;
|
2017-05-12 01:26:25 +08:00
|
|
|
SDValue performExtractVectorEltCombine(SDNode *N, DAGCombinerInfo &DCI) const;
|
2017-09-21 05:01:24 +08:00
|
|
|
SDValue performBuildVectorCombine(SDNode *N, DAGCombinerInfo &DCI) const;
|
2016-01-29 04:53:42 +08:00
|
|
|
|
2016-12-22 12:03:35 +08:00
|
|
|
unsigned getFusedOpcode(const SelectionDAG &DAG,
|
|
|
|
const SDNode *N0, const SDNode *N1) const;
|
2017-06-22 06:05:06 +08:00
|
|
|
SDValue performAddCombine(SDNode *N, DAGCombinerInfo &DCI) const;
|
2017-06-22 06:30:01 +08:00
|
|
|
SDValue performAddCarrySubCarryCombine(SDNode *N, DAGCombinerInfo &DCI) const;
|
|
|
|
SDValue performSubCombine(SDNode *N, DAGCombinerInfo &DCI) const;
|
2016-12-22 11:44:42 +08:00
|
|
|
SDValue performFAddCombine(SDNode *N, DAGCombinerInfo &DCI) const;
|
|
|
|
SDValue performFSubCombine(SDNode *N, DAGCombinerInfo &DCI) const;
|
2015-01-07 07:00:41 +08:00
|
|
|
SDValue performSetCCCombine(SDNode *N, DAGCombinerInfo &DCI) const;
|
2016-12-22 11:44:42 +08:00
|
|
|
SDValue performCvtF32UByteNCombine(SDNode *N, DAGCombinerInfo &DCI) const;
|
2018-05-24 13:28:34 +08:00
|
|
|
SDValue performClampCombine(SDNode *N, DAGCombinerInfo &DCI) const;
|
2018-06-27 23:33:33 +08:00
|
|
|
SDValue performRcpCombine(SDNode *N, DAGCombinerInfo &DCI) const;
|
2014-11-15 04:08:52 +08:00
|
|
|
|
2015-07-20 22:28:41 +08:00
|
|
|
bool isLegalFlatAddressingMode(const AddrMode &AM) const;
|
2017-07-29 09:12:31 +08:00
|
|
|
bool isLegalGlobalAddressingMode(const AddrMode &AM) const;
|
2015-08-08 04:18:34 +08:00
|
|
|
bool isLegalMUBUFAddressingMode(const AddrMode &AM) const;
|
2016-02-13 07:45:29 +08:00
|
|
|
|
2017-03-18 04:41:45 +08:00
|
|
|
unsigned isCFIntrinsic(const SDNode *Intr) const;
|
2016-06-25 11:11:28 +08:00
|
|
|
|
|
|
|
void createDebuggerPrologueStackObjects(MachineFunction &MF) const;
|
2016-10-21 02:12:38 +08:00
|
|
|
|
|
|
|
/// \returns True if fixup needs to be emitted for given global value \p GV,
|
|
|
|
/// false otherwise.
|
|
|
|
bool shouldEmitFixup(const GlobalValue *GV) const;
|
|
|
|
|
|
|
|
/// \returns True if GOT relocation needs to be emitted for given global value
|
|
|
|
/// \p GV, false otherwise.
|
|
|
|
bool shouldEmitGOTReloc(const GlobalValue *GV) const;
|
|
|
|
|
|
|
|
/// \returns True if PC-relative relocation needs to be emitted for given
|
|
|
|
/// global value \p GV, false otherwise.
|
|
|
|
bool shouldEmitPCReloc(const GlobalValue *GV) const;
|
|
|
|
|
2012-12-12 05:25:42 +08:00
|
|
|
public:
|
2018-07-12 04:59:01 +08:00
|
|
|
SITargetLowering(const TargetMachine &tm, const GCNSubtarget &STI);
|
2016-06-24 14:30:11 +08:00
|
|
|
|
2018-07-12 04:59:01 +08:00
|
|
|
const GCNSubtarget *getSubtarget() const;
|
2014-08-16 01:17:07 +08:00
|
|
|
|
2018-05-23 03:37:55 +08:00
|
|
|
bool isFPExtFoldable(unsigned Opcode, EVT DestVT, EVT SrcVT) const override;
|
|
|
|
|
2017-07-26 16:06:58 +08:00
|
|
|
bool isShuffleMaskLegal(ArrayRef<int> /*Mask*/, EVT /*VT*/) const override;
|
2017-03-16 07:15:12 +08:00
|
|
|
|
2016-04-12 22:05:04 +08:00
|
|
|
bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
|
2017-12-15 06:34:10 +08:00
|
|
|
MachineFunction &MF,
|
2016-04-12 22:05:04 +08:00
|
|
|
unsigned IntrinsicID) const override;
|
|
|
|
|
2017-03-16 07:15:12 +08:00
|
|
|
bool getAddrModeArguments(IntrinsicInst * /*I*/,
|
|
|
|
SmallVectorImpl<Value*> &/*Ops*/,
|
|
|
|
Type *&/*AccessTy*/) const override;
|
2014-10-22 00:25:08 +08:00
|
|
|
|
2015-07-09 10:09:40 +08:00
|
|
|
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
|
2017-07-21 19:59:37 +08:00
|
|
|
unsigned AS,
|
|
|
|
Instruction *I = nullptr) const override;
|
2014-08-16 01:17:07 +08:00
|
|
|
|
2017-07-11 04:25:54 +08:00
|
|
|
bool canMergeStoresTo(unsigned AS, EVT MemVT,
|
|
|
|
const SelectionDAG &DAG) const override;
|
2017-05-24 23:59:09 +08:00
|
|
|
|
2014-07-28 01:46:40 +08:00
|
|
|
bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS,
|
|
|
|
unsigned Align,
|
|
|
|
bool *IsFast) const override;
|
2014-07-03 08:23:43 +08:00
|
|
|
|
2014-07-29 01:49:26 +08:00
|
|
|
EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
|
|
|
|
unsigned SrcAlign, bool IsMemset,
|
|
|
|
bool ZeroMemset,
|
|
|
|
bool MemcpyStrSrc,
|
|
|
|
MachineFunction &MF) const override;
|
|
|
|
|
2015-12-16 04:55:55 +08:00
|
|
|
bool isMemOpUniform(const SDNode *N) const;
|
2016-12-09 01:28:47 +08:00
|
|
|
bool isMemOpHasNoClobberedMemOperand(const SDNode *N) const;
|
2015-12-02 07:04:00 +08:00
|
|
|
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override;
|
2016-12-03 02:12:53 +08:00
|
|
|
bool isCheapAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override;
|
2015-12-02 07:04:00 +08:00
|
|
|
|
2014-07-03 08:23:43 +08:00
|
|
|
TargetLoweringBase::LegalizeTypeAction
|
|
|
|
getPreferredVectorAction(EVT VT) const override;
|
2013-03-07 17:03:52 +08:00
|
|
|
|
2014-04-29 15:57:24 +08:00
|
|
|
bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
|
|
|
|
Type *Ty) const override;
|
2014-04-01 03:54:27 +08:00
|
|
|
|
2016-01-20 08:13:22 +08:00
|
|
|
bool isTypeDesirableForOp(unsigned Op, EVT VT) const override;
|
|
|
|
|
2016-06-25 09:59:16 +08:00
|
|
|
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
|
|
|
|
|
2017-08-02 03:54:18 +08:00
|
|
|
bool supportSplitCSR(MachineFunction *MF) const override;
|
|
|
|
void initializeSplitCSR(MachineBasicBlock *Entry) const override;
|
|
|
|
void insertCopiesSplitCSR(
|
|
|
|
MachineBasicBlock *Entry,
|
|
|
|
const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
|
|
|
|
|
2013-03-07 17:03:52 +08:00
|
|
|
SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
|
|
|
|
bool isVarArg,
|
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins,
|
2016-06-12 23:39:02 +08:00
|
|
|
const SDLoc &DL, SelectionDAG &DAG,
|
2014-04-29 15:57:24 +08:00
|
|
|
SmallVectorImpl<SDValue> &InVals) const override;
|
2013-03-07 17:03:52 +08:00
|
|
|
|
2017-05-18 05:56:25 +08:00
|
|
|
bool CanLowerReturn(CallingConv::ID CallConv,
|
|
|
|
MachineFunction &MF, bool isVarArg,
|
|
|
|
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
|
|
|
LLVMContext &Context) const override;
|
|
|
|
|
|
|
|
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
|
2016-01-14 01:23:04 +08:00
|
|
|
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
2016-06-12 23:39:02 +08:00
|
|
|
const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
|
|
|
|
SelectionDAG &DAG) const override;
|
2016-01-14 01:23:04 +08:00
|
|
|
|
2017-08-04 07:00:29 +08:00
|
|
|
void passSpecialInputs(
|
|
|
|
CallLoweringInfo &CLI,
|
|
|
|
const SIMachineFunctionInfo &Info,
|
|
|
|
SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
|
|
|
|
SmallVectorImpl<SDValue> &MemOpChains,
|
|
|
|
SDValue Chain,
|
|
|
|
SDValue StackPtr) const;
|
|
|
|
|
2017-08-02 03:54:18 +08:00
|
|
|
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
|
|
|
|
CallingConv::ID CallConv, bool isVarArg,
|
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins,
|
|
|
|
const SDLoc &DL, SelectionDAG &DAG,
|
|
|
|
SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
|
|
|
|
SDValue ThisVal) const;
|
2017-08-12 04:42:08 +08:00
|
|
|
|
|
|
|
bool mayBeEmittedAsTailCall(const CallInst *) const override;
|
|
|
|
|
|
|
|
bool isEligibleForTailCallOptimization(
|
|
|
|
SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
|
|
|
|
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
|
|
|
const SmallVectorImpl<SDValue> &OutVals,
|
|
|
|
const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
|
|
|
|
|
2017-08-02 03:54:18 +08:00
|
|
|
SDValue LowerCall(CallLoweringInfo &CLI,
|
|
|
|
SmallVectorImpl<SDValue> &InVals) const override;
|
|
|
|
|
2016-01-26 12:29:24 +08:00
|
|
|
unsigned getRegisterByName(const char* RegName, EVT VT,
|
|
|
|
SelectionDAG &DAG) const override;
|
|
|
|
|
2016-07-13 05:41:32 +08:00
|
|
|
MachineBasicBlock *splitKillBlock(MachineInstr &MI,
|
|
|
|
MachineBasicBlock *BB) const;
|
|
|
|
|
2016-07-01 06:52:52 +08:00
|
|
|
MachineBasicBlock *
|
|
|
|
EmitInstrWithCustomInserter(MachineInstr &MI,
|
|
|
|
MachineBasicBlock *BB) const override;
|
2017-10-14 05:10:22 +08:00
|
|
|
|
|
|
|
bool hasBitPreservingFPLogic(EVT VT) const override;
|
2015-01-30 03:34:32 +08:00
|
|
|
bool enableAggressiveFMAFusion(EVT VT) const override;
|
2015-07-09 10:09:04 +08:00
|
|
|
EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
|
|
|
|
EVT VT) const override;
|
2015-07-09 23:12:23 +08:00
|
|
|
MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override;
|
2014-04-29 15:57:24 +08:00
|
|
|
bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
|
2018-06-15 23:15:46 +08:00
|
|
|
SDValue splitUnaryVectorOp(SDValue Op, SelectionDAG &DAG) const;
|
|
|
|
SDValue splitBinaryVectorOp(SDValue Op, SelectionDAG &DAG) const;
|
2014-04-29 15:57:24 +08:00
|
|
|
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
|
2018-06-15 23:15:46 +08:00
|
|
|
|
2017-01-24 07:09:58 +08:00
|
|
|
void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
|
|
|
|
SelectionDAG &DAG) const override;
|
|
|
|
|
2014-04-29 15:57:24 +08:00
|
|
|
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
|
|
|
|
SDNode *PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const override;
|
2016-07-01 06:52:52 +08:00
|
|
|
void AdjustInstrPostInstrSelection(MachineInstr &MI,
|
2014-04-29 15:57:24 +08:00
|
|
|
SDNode *Node) const override;
|
2013-02-27 01:52:23 +08:00
|
|
|
|
2017-04-13 05:58:23 +08:00
|
|
|
SDNode *legalizeTargetIndependentNode(SDNode *Node, SelectionDAG &DAG) const;
|
2014-11-06 03:01:17 +08:00
|
|
|
|
2016-06-12 23:39:02 +08:00
|
|
|
MachineSDNode *wrapAddr64Rsrc(SelectionDAG &DAG, const SDLoc &DL,
|
|
|
|
SDValue Ptr) const;
|
|
|
|
MachineSDNode *buildRSRC(SelectionDAG &DAG, const SDLoc &DL, SDValue Ptr,
|
|
|
|
uint32_t RsrcDword1, uint64_t RsrcDword2And3) const;
|
2015-07-06 03:29:18 +08:00
|
|
|
std::pair<unsigned, const TargetRegisterClass *>
|
|
|
|
getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
|
|
|
|
StringRef Constraint, MVT VT) const override;
|
2015-12-10 10:12:53 +08:00
|
|
|
ConstraintType getConstraintType(StringRef Constraint) const override;
|
2016-06-12 23:39:02 +08:00
|
|
|
SDValue copyToM0(SelectionDAG &DAG, SDValue Chain, const SDLoc &DL,
|
|
|
|
SDValue V) const;
|
2017-07-19 00:44:56 +08:00
|
|
|
|
|
|
|
void finalizeLowering(MachineFunction &MF) const override;
|
2017-11-15 08:45:43 +08:00
|
|
|
|
|
|
|
void computeKnownBitsForFrameIndex(const SDValue Op,
|
|
|
|
KnownBits &Known,
|
|
|
|
const APInt &DemandedElts,
|
|
|
|
const SelectionDAG &DAG,
|
|
|
|
unsigned Depth = 0) const override;
|
AMDGPU: Move isSDNodeSourceOfDivergence() implementation to SITargetLowering
Summary:
The code that handles ISD:Register and ISD::CopyFromReg assumes
the target is amdgcn, so this is broken on r600. We don't
need this analysis on r600 anyway so we can safely move
it to SITargetLowering.
Reviewers: alex-t, arsenm, nhaehnle
Reviewed By: arsenm
Subscribers: msearles, kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits
Differential Revision: https://reviews.llvm.org/D46298
llvm-svn: 334607
2018-06-13 23:06:37 +08:00
|
|
|
|
|
|
|
bool isSDNodeSourceOfDivergence(const SDNode *N,
|
|
|
|
FunctionLoweringInfo *FLI, DivergenceAnalysis *DA) const override;
|
2012-12-12 05:25:42 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
} // End namespace llvm
|
|
|
|
|
2014-08-14 00:26:38 +08:00
|
|
|
#endif
|