llvm-project/llvm/lib/Target/XCore/XCoreISelLowering.h

237 lines
8.8 KiB
C
Raw Normal View History

2008-11-07 18:59:00 +08:00
//===-- XCoreISelLowering.h - XCore DAG Lowering Interface ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
2008-11-07 18:59:00 +08:00
//
//===----------------------------------------------------------------------===//
//
// This file defines the interfaces that XCore uses to lower LLVM code into a
// selection DAG.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_TARGET_XCORE_XCOREISELLOWERING_H
#define LLVM_LIB_TARGET_XCORE_XCOREISELLOWERING_H
2008-11-07 18:59:00 +08:00
#include "XCore.h"
2008-11-07 18:59:00 +08:00
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/TargetLowering.h"
2008-11-07 18:59:00 +08:00
namespace llvm {
2008-11-07 18:59:00 +08:00
// Forward delcarations
class XCoreSubtarget;
class XCoreTargetMachine;
2008-11-07 18:59:00 +08:00
namespace XCoreISD {
enum NodeType : unsigned {
2008-11-07 18:59:00 +08:00
// Start the numbering where the builtin ops and target ops leave off.
FIRST_NUMBER = ISD::BUILTIN_OP_END,
2008-11-07 18:59:00 +08:00
// Branch and link (call)
BL,
// pc relative address
PCRelativeWrapper,
// dp relative address
DPRelativeWrapper,
2008-11-07 18:59:00 +08:00
// cp relative address
CPRelativeWrapper,
// Load word from stack
LDWSP,
2008-11-07 18:59:00 +08:00
// Store word to stack
STWSP,
// Corresponds to retsp instruction
RETSP,
2008-11-07 18:59:00 +08:00
// Corresponds to LADD instruction
LADD,
// Corresponds to LSUB instruction
LSUB,
// Corresponds to LMUL instruction
LMUL,
// Corresponds to MACCU instruction
MACCU,
// Corresponds to MACCS instruction
MACCS,
// Corresponds to CRC8 instruction
CRC8,
// Jumptable branch.
BR_JT,
// Jumptable branch using long branches for each entry.
BR_JT32,
// Offset from frame pointer to the first (possible) on-stack argument
FRAME_TO_ARGS_OFFSET,
// Exception handler return. The stack is restored to the first
// followed by a jump to the second argument.
EH_RETURN,
// Memory barrier.
MEMBARRIER
2008-11-07 18:59:00 +08:00
};
}
2008-11-07 18:59:00 +08:00
//===--------------------------------------------------------------------===//
// TargetLowering Implementation
//===--------------------------------------------------------------------===//
class XCoreTargetLowering : public TargetLowering
2008-11-07 18:59:00 +08:00
{
public:
explicit XCoreTargetLowering(const TargetMachine &TM,
const XCoreSubtarget &Subtarget);
2008-11-07 18:59:00 +08:00
using TargetLowering::isZExtFree;
bool isZExtFree(SDValue Val, EVT VT2) const override;
unsigned getJumpTableEncoding() const override;
MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override {
return MVT::i32;
}
2008-11-07 18:59:00 +08:00
/// LowerOperation - Provide custom lowering hooks for some operations.
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
/// ReplaceNodeResults - Replace the results of node with an illegal result
/// type with new values built out of custom code.
///
void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
SelectionDAG &DAG) const override;
2008-11-07 18:59:00 +08:00
/// getTargetNodeName - This method returns the name of a target specific
2008-11-07 18:59:00 +08:00
// DAG node.
const char *getTargetNodeName(unsigned Opcode) const override;
MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr &MI,
MachineBasicBlock *MBB) const override;
2008-11-07 18:59:00 +08:00
bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
Type *Ty, unsigned AS,
Instruction *I = nullptr) const override;
2008-11-07 18:59:00 +08:00
/// If a physical register, this returns the register that receives the
/// exception address on entry to an EH pad.
unsigned
getExceptionPointerRegister(const Constant *PersonalityFn) const override {
return XCore::R0;
}
/// If a physical register, this returns the register that receives the
/// exception typeid on entry to a landing pad.
unsigned
getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
return XCore::R1;
}
2008-11-07 18:59:00 +08:00
private:
const TargetMachine &TM;
2008-11-07 18:59:00 +08:00
const XCoreSubtarget &Subtarget;
2008-11-07 18:59:00 +08:00
// Lower Operand helpers
SDValue LowerCCCArguments(SDValue Chain, CallingConv::ID CallConv,
bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue LowerCCCCallTo(SDValue Chain, SDValue Callee,
CallingConv::ID CallConv, bool isVarArg,
bool isTailCall,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const;
SDValue getGlobalAddressWrapper(SDValue GA, const GlobalValue *GV,
SelectionDAG &DAG) const;
SDValue lowerLoadWordFromAlignedBasePlusOffset(const SDLoc &DL,
SDValue Chain, SDValue Base,
int64_t Offset,
SelectionDAG &DAG) const;
2008-11-07 18:59:00 +08:00
// Lower Operand specifics
SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const;
Seperate volatility and atomicity/ordering in SelectionDAG At the moment, we mark every atomic memory access as being also volatile. This is unnecessarily conservative and prohibits many legal transforms (DCE, folding, etc..). This patch removes MOVolatile from the MachineMemOperands of atomic, but not volatile, instructions. This should be strictly NFC after a series of previous patches which have gone in to ensure backend code is conservative about handling of isAtomic MMOs. Once it's in and baked for a bit, we'll start working through removing unnecessary bailouts one by one. We applied this same strategy to the middle end a few years ago, with good success. To make sure this patch itself is NFC, it is build on top of a series of other patches which adjust code to (for the moment) be as conservative for an atomic access as for a volatile access and build up a test corpus (mostly in test/CodeGen/X86/atomics-unordered.ll).. Previously landed D57593 Fix a bug in the definition of isUnordered on MachineMemOperand D57596 [CodeGen] Be conservative about atomic accesses as for volatile D57802 Be conservative about unordered accesses for the moment rL353959: [Tests] First batch of cornercase tests for unordered atomics. rL353966: [Tests] RMW folding tests w/unordered atomic operations. rL353972: [Tests] More unordered atomic lowering tests. rL353989: [SelectionDAG] Inline a single use helper function, and remove last non-MMO interface rL354740: [Hexagon, SystemZ] Be super conservative about atomics rL354800: [Lanai] Be super conservative about atomics rL354845: [ARM] Be super conservative about atomics Attention Out of Tree Backend Owners: This patch may break you. If it does, you can use the TLI getMMOFlags hook to restore the MOVolatile to any instruction you need to. (See llvm-dev thread titled "PSA: Changes to how atomics are handled in backends" started Feb 27, 2019.) Differential Revision: https://reviews.llvm.org/D57601 llvm-svn: 355025
2019-02-28 04:20:08 +08:00
MachineMemOperand::Flags getMMOFlags(const Instruction &I) const override;
2008-11-07 18:59:00 +08:00
// Inline asm support
std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
StringRef Constraint, MVT VT) const override;
2008-11-07 18:59:00 +08:00
// Expand specifics
SDValue TryExpandADDWithMul(SDNode *Op, SelectionDAG &DAG) const;
SDValue ExpandADDSUB(SDNode *Op, SelectionDAG &DAG) const;
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
void computeKnownBitsForTargetNode(const SDValue Op,
KnownBits &Known,
const APInt &DemandedElts,
const SelectionDAG &DAG,
unsigned Depth = 0) const override;
SDValue
LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
const SDLoc &dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const override;
SDValue
LowerCall(TargetLowering::CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const override;
SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
const SDLoc &dl, SelectionDAG &DAG) const override;
bool
CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
2012-07-19 08:11:40 +08:00
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &ArgsFlags,
LLVMContext &Context) const override;
bool shouldInsertFencesForAtomic(const Instruction *I) const override {
return true;
}
2008-11-07 18:59:00 +08:00
};
}
2008-11-07 18:59:00 +08:00
#endif