[C++11] Add 'override' keyword to virtual methods that override their base class.

llvm-svn: 203378
This commit is contained in:
Craig Topper 2014-03-09 07:44:38 +00:00
parent 970714bcf9
commit 2d9361e325
21 changed files with 314 additions and 310 deletions

View File

@ -26,8 +26,8 @@ public:
const MCRegisterInfo &MRI)
: MCInstPrinter(MAI, MII, MRI) {}
virtual void printRegName(raw_ostream &OS, unsigned RegNo) const;
virtual void printInst(const MCInst *MI, raw_ostream &OS, StringRef Annot);
void printRegName(raw_ostream &OS, unsigned RegNo) const override;
void printInst(const MCInst *MI, raw_ostream &OS, StringRef Annot) override;
// Autogenerated by tblgen, returns true if we successfully printed an
// alias.

View File

@ -56,9 +56,9 @@ namespace {
MCE(mce), PICBaseOffset(0), Is64BitMode(false),
IsPIC(TM.getRelocationModel() == Reloc::PIC_) {}
bool runOnMachineFunction(MachineFunction &MF);
bool runOnMachineFunction(MachineFunction &MF) override;
virtual const char *getPassName() const {
const char *getPassName() const override {
return "X86 Machine Code Emitter";
}
@ -76,7 +76,7 @@ namespace {
void emitInstruction(MachineInstr &MI, const MCInstrDesc *Desc);
void getAnalysisUsage(AnalysisUsage &AU) const {
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
AU.addRequired<MachineModuleInfo>();
MachineFunctionPass::getAnalysisUsage(AU);

View File

@ -62,16 +62,16 @@ public:
X86ScalarSSEf32 = Subtarget->hasSSE1();
}
virtual bool TargetSelectInstruction(const Instruction *I);
bool TargetSelectInstruction(const Instruction *I) override;
/// \brief The specified machine instr operand is a vreg, and that
/// vreg is being provided by the specified load instruction. If possible,
/// try to fold the load as an operand to the instruction, returning true if
/// possible.
virtual bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
const LoadInst *LI);
bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
const LoadInst *LI) override;
virtual bool FastLowerArguments();
bool FastLowerArguments() override;
#include "X86GenFastISel.inc"
@ -128,11 +128,11 @@ private:
bool handleConstantAddresses(const Value *V, X86AddressMode &AM);
unsigned TargetMaterializeConstant(const Constant *C);
unsigned TargetMaterializeConstant(const Constant *C) override;
unsigned TargetMaterializeAlloca(const AllocaInst *C);
unsigned TargetMaterializeAlloca(const AllocaInst *C) override;
unsigned TargetMaterializeFloatZero(const ConstantFP *CF);
unsigned TargetMaterializeFloatZero(const ConstantFP *CF) override;
/// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
/// computed in an SSE register, not on the X87 floating point stack.

View File

@ -39,7 +39,7 @@ namespace {
/// where appropriate.
bool processBasicBlock(MachineFunction &MF, MachineFunction::iterator MFI);
virtual const char *getPassName() const { return "X86 Atom LEA Fixup";}
const char *getPassName() const override { return "X86 Atom LEA Fixup";}
/// \brief Given a machine register, look for the instruction
/// which writes it in the current basic block. If found,
@ -80,7 +80,7 @@ namespace {
/// \brief Loop over all of the basic blocks,
/// replacing instructions by equivalent LEA instructions
/// if needed and when possible.
virtual bool runOnMachineFunction(MachineFunction &MF);
bool runOnMachineFunction(MachineFunction &MF) override;
private:
MachineFunction *MF;

View File

@ -59,7 +59,7 @@ namespace {
memset(RegMap, 0, sizeof(RegMap));
}
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
AU.addRequired<EdgeBundles>();
AU.addPreservedID(MachineLoopInfoID);
@ -67,9 +67,9 @@ namespace {
MachineFunctionPass::getAnalysisUsage(AU);
}
virtual bool runOnMachineFunction(MachineFunction &MF);
bool runOnMachineFunction(MachineFunction &MF) override;
virtual const char *getPassName() const { return "X86 FP Stackifier"; }
const char *getPassName() const override { return "X86 FP Stackifier"; }
private:
const TargetInstrInfo *TII; // Machine instruction info.

View File

@ -40,36 +40,36 @@ public:
/// emitProlog/emitEpilog - These methods insert prolog and epilog code into
/// the function.
void emitPrologue(MachineFunction &MF) const;
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
void emitPrologue(MachineFunction &MF) const override;
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
void adjustForSegmentedStacks(MachineFunction &MF) const;
void adjustForSegmentedStacks(MachineFunction &MF) const override;
void adjustForHiPEPrologue(MachineFunction &MF) const;
void adjustForHiPEPrologue(MachineFunction &MF) const override;
void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
RegScavenger *RS = NULL) const;
RegScavenger *RS = NULL) const override;
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI) const;
const TargetRegisterInfo *TRI) const override;
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI) const;
MachineBasicBlock::iterator MI,
const std::vector<CalleeSavedInfo> &CSI,
const TargetRegisterInfo *TRI) const override;
bool hasFP(const MachineFunction &MF) const;
bool hasReservedCallFrame(const MachineFunction &MF) const;
bool hasFP(const MachineFunction &MF) const override;
bool hasReservedCallFrame(const MachineFunction &MF) const override;
int getFrameIndexOffset(const MachineFunction &MF, int FI) const;
int getFrameIndexOffset(const MachineFunction &MF, int FI) const override;
int getFrameIndexReference(const MachineFunction &MF, int FI,
unsigned &FrameReg) const;
unsigned &FrameReg) const override;
void eliminateCallFramePseudoInstr(MachineFunction &MF,
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const;
MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI) const override;
};
} // End llvm namespace

View File

@ -156,15 +156,15 @@ namespace {
Subtarget(&tm.getSubtarget<X86Subtarget>()),
OptForSize(false) {}
virtual const char *getPassName() const {
const char *getPassName() const override {
return "X86 DAG->DAG Instruction Selection";
}
virtual void EmitFunctionEntryCode();
void EmitFunctionEntryCode() override;
virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const;
bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const override;
virtual void PreprocessISelDAG();
void PreprocessISelDAG() override;
inline bool immSext8(SDNode *N) const {
return isInt<8>(cast<ConstantSDNode>(N)->getSExtValue());
@ -181,7 +181,7 @@ namespace {
#include "X86GenDAGISel.inc"
private:
SDNode *Select(SDNode *N);
SDNode *Select(SDNode *N) override;
SDNode *SelectGather(SDNode *N, unsigned Opc);
SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
SDNode *SelectAtomicLoadArith(SDNode *Node, MVT NVT);
@ -219,9 +219,9 @@ namespace {
/// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
/// inline asm expressions.
virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
char ConstraintCode,
std::vector<SDValue> &OutOps);
bool SelectInlineAsmMemoryOperand(const SDValue &Op,
char ConstraintCode,
std::vector<SDValue> &OutOps) override;
void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI);

View File

@ -526,28 +526,28 @@ namespace llvm {
public:
explicit X86TargetLowering(X86TargetMachine &TM);
virtual unsigned getJumpTableEncoding() const;
unsigned getJumpTableEncoding() const override;
virtual MVT getScalarShiftAmountTy(EVT LHSTy) const { return MVT::i8; }
MVT getScalarShiftAmountTy(EVT LHSTy) const override { return MVT::i8; }
virtual const MCExpr *
const MCExpr *
LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
const MachineBasicBlock *MBB, unsigned uid,
MCContext &Ctx) const;
MCContext &Ctx) const override;
/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
/// jumptable.
virtual SDValue getPICJumpTableRelocBase(SDValue Table,
SelectionDAG &DAG) const;
virtual const MCExpr *
SDValue getPICJumpTableRelocBase(SDValue Table,
SelectionDAG &DAG) const override;
const MCExpr *
getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
unsigned JTI, MCContext &Ctx) const;
unsigned JTI, MCContext &Ctx) const override;
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area. For X86, aggregates
/// that contains are placed at 16-byte boundaries while the rest are at
/// 4-byte boundaries.
virtual unsigned getByValTypeAlignment(Type *Ty) const;
unsigned getByValTypeAlignment(Type *Ty) const override;
/// getOptimalMemOpType - Returns the target specific optimal type for load
/// and store operations as a result of memset, memcpy, and memmove
@ -560,10 +560,9 @@ namespace llvm {
/// source is constant so it does not need to be loaded.
/// It returns EVT::Other if the type should be determined using generic
/// target-independent logic.
virtual EVT
getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
MachineFunction &MF) const;
EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
MachineFunction &MF) const override;
/// isSafeMemOpType - Returns true if it's safe to use load / store of the
/// specified type to expand memcpy / memset inline. This is mostly true
@ -571,89 +570,91 @@ namespace llvm {
/// targets without SSE2 f64 load / store are done with fldl / fstpl which
/// also does type conversion. Note the specified type doesn't have to be
/// legal as the hook is used before type legalization.
virtual bool isSafeMemOpType(MVT VT) const;
bool isSafeMemOpType(MVT VT) const override;
/// allowsUnalignedMemoryAccesses - Returns true if the target allows
/// unaligned memory accesses. of the specified type. Returns whether it
/// is "fast" by reference in the second argument.
virtual bool allowsUnalignedMemoryAccesses(EVT VT, unsigned AS,
bool *Fast) const;
bool allowsUnalignedMemoryAccesses(EVT VT, unsigned AS,
bool *Fast) const override;
/// LowerOperation - Provide custom lowering hooks for some operations.
///
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
/// ReplaceNodeResults - Replace the results of node with an illegal result
/// type with new values built out of custom code.
///
virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
SelectionDAG &DAG) const;
void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
SelectionDAG &DAG) const override;
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
/// isTypeDesirableForOp - Return true if the target has native support for
/// the specified value type and it is 'desirable' to use the type for the
/// given node type. e.g. On x86 i16 is legal, but undesirable since i16
/// instruction encodings are longer and some i16 instructions are slow.
virtual bool isTypeDesirableForOp(unsigned Opc, EVT VT) const;
bool isTypeDesirableForOp(unsigned Opc, EVT VT) const override;
/// isTypeDesirable - Return true if the target has native support for the
/// specified value type and it is 'desirable' to use the type. e.g. On x86
/// i16 is legal, but undesirable since i16 instruction encodings are longer
/// and some i16 instructions are slow.
virtual bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const;
bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const override;
virtual MachineBasicBlock *
MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr *MI,
MachineBasicBlock *MBB) const;
MachineBasicBlock *MBB) const override;
/// getTargetNodeName - This method returns the name of a target specific
/// DAG node.
virtual const char *getTargetNodeName(unsigned Opcode) const;
const char *getTargetNodeName(unsigned Opcode) const override;
/// getSetCCResultType - Return the value type to use for ISD::SETCC.
virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
EVT getSetCCResultType(LLVMContext &Context, EVT VT) const override;
/// computeMaskedBitsForTargetNode - Determine which of the bits specified
/// in Mask are known to be either zero or one and return them in the
/// KnownZero/KnownOne bitsets.
virtual void computeMaskedBitsForTargetNode(const SDValue Op,
APInt &KnownZero,
APInt &KnownOne,
const SelectionDAG &DAG,
unsigned Depth = 0) const;
void computeMaskedBitsForTargetNode(const SDValue Op,
APInt &KnownZero,
APInt &KnownOne,
const SelectionDAG &DAG,
unsigned Depth = 0) const override;
// ComputeNumSignBitsForTargetNode - Determine the number of bits in the
// operation that are sign bits.
virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
unsigned Depth) const;
unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
unsigned Depth) const override;
virtual bool
isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
bool isGAPlusOffset(SDNode *N, const GlobalValue* &GA,
int64_t &Offset) const override;
SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const;
virtual bool ExpandInlineAsm(CallInst *CI) const;
bool ExpandInlineAsm(CallInst *CI) const override;
ConstraintType getConstraintType(const std::string &Constraint) const;
ConstraintType
getConstraintType(const std::string &Constraint) const override;
/// Examine constraint string and operand type and determine a weight value.
/// The operand object must already have been set up with the operand type.
virtual ConstraintWeight getSingleConstraintMatchWeight(
AsmOperandInfo &info, const char *constraint) const;
ConstraintWeight
getSingleConstraintMatchWeight(AsmOperandInfo &info,
const char *constraint) const override;
virtual const char *LowerXConstraint(EVT ConstraintVT) const;
const char *LowerXConstraint(EVT ConstraintVT) const override;
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
/// vector. If it is invalid, don't add anything to Ops. If hasMemory is
/// true it means one of the asm constraint of the inline asm instruction
/// being processed is 'm'.
virtual void LowerAsmOperandForConstraint(SDValue Op,
std::string &Constraint,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const;
void LowerAsmOperandForConstraint(SDValue Op,
std::string &Constraint,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const override;
/// getRegForInlineAsmConstraint - Given a physical register constraint
/// (e.g. {edx}), return the register number and the register class for the
@ -661,34 +662,34 @@ namespace llvm {
/// error, this returns a register number of 0.
std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
MVT VT) const;
MVT VT) const override;
/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const;
bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const override;
/// isLegalICmpImmediate - Return true if the specified immediate is legal
/// icmp immediate, that is the target has icmp instructions which can
/// compare a register against the immediate without having to materialize
/// the immediate into a register.
virtual bool isLegalICmpImmediate(int64_t Imm) const;
bool isLegalICmpImmediate(int64_t Imm) const override;
/// isLegalAddImmediate - Return true if the specified immediate is legal
/// add immediate, that is the target has add instructions which can
/// add a register and the immediate without having to materialize
/// the immediate into a register.
virtual bool isLegalAddImmediate(int64_t Imm) const;
bool isLegalAddImmediate(int64_t Imm) const override;
virtual bool isVectorShiftByScalarCheap(Type *Ty) const;
bool isVectorShiftByScalarCheap(Type *Ty) const override;
/// isTruncateFree - Return true if it's free to truncate a value of
/// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
/// register EAX to i16 by referencing its sub-register AX.
virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const;
virtual bool isTruncateFree(EVT VT1, EVT VT2) const;
bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
bool isTruncateFree(EVT VT1, EVT VT2) const override;
virtual bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const;
bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
/// isZExtFree - Return true if any actual instruction that defines a
/// value of type Ty1 implicit zero-extends the value to Ty2 in the result
@ -698,44 +699,44 @@ namespace llvm {
/// does not necessarily apply to truncate instructions. e.g. on x86-64,
/// all instructions that define 32-bit values implicit zero-extend the
/// result out to 64 bits.
virtual bool isZExtFree(Type *Ty1, Type *Ty2) const;
virtual bool isZExtFree(EVT VT1, EVT VT2) const;
virtual bool isZExtFree(SDValue Val, EVT VT2) const;
bool isZExtFree(Type *Ty1, Type *Ty2) const override;
bool isZExtFree(EVT VT1, EVT VT2) const override;
bool isZExtFree(SDValue Val, EVT VT2) const override;
/// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
/// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
/// expanded to FMAs when this method returns true, otherwise fmuladd is
/// expanded to fmul + fadd.
virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const;
bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
/// isNarrowingProfitable - Return true if it's profitable to narrow
/// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow
/// from i32 to i8 but not from i32 to i16.
virtual bool isNarrowingProfitable(EVT VT1, EVT VT2) const;
bool isNarrowingProfitable(EVT VT1, EVT VT2) const override;
/// isFPImmLegal - Returns true if the target can instruction select the
/// specified FP immediate natively. If false, the legalizer will
/// materialize the FP immediate as a load from a constant pool.
virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
/// isShuffleMaskLegal - Targets can use this to indicate that they only
/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
/// By default, if a target supports the VECTOR_SHUFFLE node, all mask
/// values are assumed to be legal.
virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask,
EVT VT) const;
bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask,
EVT VT) const override;
/// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is
/// used by Targets can use this to indicate if there is a suitable
/// VECTOR_SHUFFLE that can be used to replace a VAND with a constant
/// pool entry.
virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
EVT VT) const;
bool isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
EVT VT) const override;
/// ShouldShrinkFPConstant - If true, then instruction selection should
/// seek to shrink the FP constant of the specified type to a smaller type
/// in order to save space and / or reduce runtime.
virtual bool ShouldShrinkFPConstant(EVT VT) const {
bool ShouldShrinkFPConstant(EVT VT) const override {
// Don't shrink FP constpool if SSE2 is available since cvtss2sd is more
// expensive than a straight movsd. On the other hand, it's important to
// shrink long double fp constant since fldt is very slow.
@ -767,32 +768,32 @@ namespace llvm {
/// \brief Returns true if it is beneficial to convert a load of a constant
/// to just the constant itself.
virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
Type *Ty) const;
bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
Type *Ty) const override;
/// createFastISel - This method returns a target specific FastISel object,
/// or null if the target does not support "fast" ISel.
virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo) const;
FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo) const override;
/// getStackCookieLocation - Return true if the target stores stack
/// protector cookies at a fixed offset in some non-standard address
/// space, and populates the address space and offset as
/// appropriate.
virtual bool getStackCookieLocation(unsigned &AddressSpace, unsigned &Offset) const;
bool getStackCookieLocation(unsigned &AddressSpace,
unsigned &Offset) const override;
SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot,
SelectionDAG &DAG) const;
virtual bool isNoopAddrSpaceCast(unsigned SrcAS,
unsigned DestAS) const override;
bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override;
/// \brief Reset the operation actions based on target options.
virtual void resetOperationActions();
void resetOperationActions() override;
protected:
std::pair<const TargetRegisterClass*, uint8_t>
findRepresentativeClass(MVT VT) const;
findRepresentativeClass(MVT VT) const override;
private:
/// Subtarget - Keep a pointer to the X86Subtarget around so that we can
@ -904,37 +905,34 @@ namespace llvm {
SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
virtual SDValue
SDValue
LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) const;
virtual SDValue
LowerCall(CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const;
SmallVectorImpl<SDValue> &InVals) const override;
SDValue LowerCall(CallLoweringInfo &CLI,
SmallVectorImpl<SDValue> &InVals) const override;
virtual SDValue
LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
SDLoc dl, SelectionDAG &DAG) const;
SDValue LowerReturn(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<SDValue> &OutVals,
SDLoc dl, SelectionDAG &DAG) const override;
virtual bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const;
bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
virtual bool mayBeEmittedAsTailCall(CallInst *CI) const;
bool mayBeEmittedAsTailCall(CallInst *CI) const override;
virtual MVT
getTypeForExtArgOrReturn(MVT VT, ISD::NodeType ExtendKind) const;
MVT getTypeForExtArgOrReturn(MVT VT,
ISD::NodeType ExtendKind) const override;
virtual bool
CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
LLVMContext &Context) const;
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
LLVMContext &Context) const override;
virtual const uint16_t *getScratchRegisters(CallingConv::ID CC) const;
const uint16_t *getScratchRegisters(CallingConv::ID CC) const override;
/// Utility function to emit atomic-load-arith operations (and, or, xor,
/// nand, max, min, umax, umin). It takes the corresponding instruction to

View File

@ -5268,7 +5268,7 @@ namespace {
static char ID;
CGBR() : MachineFunctionPass(ID) {}
virtual bool runOnMachineFunction(MachineFunction &MF) {
bool runOnMachineFunction(MachineFunction &MF) override {
const X86TargetMachine *TM =
static_cast<const X86TargetMachine *>(&MF.getTarget());
@ -5315,11 +5315,11 @@ namespace {
return true;
}
virtual const char *getPassName() const {
const char *getPassName() const override {
return "X86 PIC Global Base Reg Initialization";
}
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
MachineFunctionPass::getAnalysisUsage(AU);
}
@ -5335,7 +5335,7 @@ namespace {
static char ID;
LDTLSCleanup() : MachineFunctionPass(ID) {}
virtual bool runOnMachineFunction(MachineFunction &MF) {
bool runOnMachineFunction(MachineFunction &MF) override {
X86MachineFunctionInfo* MFI = MF.getInfo<X86MachineFunctionInfo>();
if (MFI->getNumLocalDynamicTLSAccesses() < 2) {
// No point folding accesses if there isn't at least two.
@ -5428,11 +5428,11 @@ namespace {
return Copy;
}
virtual const char *getPassName() const {
const char *getPassName() const override {
return "Local Dynamic TLS Access Clean-up";
}
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesCFG();
AU.addRequired<MachineDominatorTree>();
MachineFunctionPass::getAnalysisUsage(AU);

View File

@ -169,30 +169,32 @@ public:
/// true, then it's expected the pre-extension value is available as a subreg
/// of the result register. This also returns the sub-register index in
/// SubIdx.
virtual bool isCoalescableExtInstr(const MachineInstr &MI,
unsigned &SrcReg, unsigned &DstReg,
unsigned &SubIdx) const;
bool isCoalescableExtInstr(const MachineInstr &MI,
unsigned &SrcReg, unsigned &DstReg,
unsigned &SubIdx) const override;
unsigned isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const;
unsigned isLoadFromStackSlot(const MachineInstr *MI,
int &FrameIndex) const override;
/// isLoadFromStackSlotPostFE - Check for post-frame ptr elimination
/// stack locations as well. This uses a heuristic so it isn't
/// reliable for correctness.
unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI,
int &FrameIndex) const;
int &FrameIndex) const override;
unsigned isStoreToStackSlot(const MachineInstr *MI, int &FrameIndex) const;
unsigned isStoreToStackSlot(const MachineInstr *MI,
int &FrameIndex) const override;
/// isStoreToStackSlotPostFE - Check for post-frame ptr elimination
/// stack locations as well. This uses a heuristic so it isn't
/// reliable for correctness.
unsigned isStoreToStackSlotPostFE(const MachineInstr *MI,
int &FrameIndex) const;
int &FrameIndex) const override;
bool isReallyTriviallyReMaterializable(const MachineInstr *MI,
AliasAnalysis *AA) const;
AliasAnalysis *AA) const override;
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
unsigned DestReg, unsigned SubIdx,
const MachineInstr *Orig,
const TargetRegisterInfo &TRI) const;
const TargetRegisterInfo &TRI) const override;
/// Given an operand within a MachineInstr, insert preceding code to put it
/// into the right format for a particular kind of LEA instruction. This may
@ -217,43 +219,43 @@ public:
/// This method returns a null pointer if the transformation cannot be
/// performed, otherwise it returns the new instruction.
///
virtual MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
MachineBasicBlock::iterator &MBBI,
LiveVariables *LV) const;
MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI,
MachineBasicBlock::iterator &MBBI,
LiveVariables *LV) const override;
/// commuteInstruction - We have a few instructions that must be hacked on to
/// commute them.
///
virtual MachineInstr *commuteInstruction(MachineInstr *MI, bool NewMI) const;
MachineInstr *commuteInstruction(MachineInstr *MI, bool NewMI) const override;
// Branch analysis.
virtual bool isUnpredicatedTerminator(const MachineInstr* MI) const;
virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const;
virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const;
virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
const SmallVectorImpl<MachineOperand> &Cond,
DebugLoc DL) const;
virtual bool canInsertSelect(const MachineBasicBlock&,
const SmallVectorImpl<MachineOperand> &Cond,
unsigned, unsigned, int&, int&, int&) const;
virtual void insertSelect(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, DebugLoc DL,
unsigned DstReg,
const SmallVectorImpl<MachineOperand> &Cond,
unsigned TrueReg, unsigned FalseReg) const;
virtual void copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const;
virtual void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned SrcReg, bool isKill, int FrameIndex,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const;
bool isUnpredicatedTerminator(const MachineInstr* MI) const override;
bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const override;
unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
MachineBasicBlock *FBB,
const SmallVectorImpl<MachineOperand> &Cond,
DebugLoc DL) const override;
bool canInsertSelect(const MachineBasicBlock&,
const SmallVectorImpl<MachineOperand> &Cond,
unsigned, unsigned, int&, int&, int&) const override;
void insertSelect(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, DebugLoc DL,
unsigned DstReg,
const SmallVectorImpl<MachineOperand> &Cond,
unsigned TrueReg, unsigned FalseReg) const override;
void copyPhysReg(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI, DebugLoc DL,
unsigned DestReg, unsigned SrcReg,
bool KillSrc) const override;
void storeRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned SrcReg, bool isKill, int FrameIndex,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const override;
virtual void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
SmallVectorImpl<MachineOperand> &Addr,
@ -262,11 +264,11 @@ public:
MachineInstr::mmo_iterator MMOEnd,
SmallVectorImpl<MachineInstr*> &NewMIs) const;
virtual void loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, int FrameIndex,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const;
void loadRegFromStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator MI,
unsigned DestReg, int FrameIndex,
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const override;
virtual void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
SmallVectorImpl<MachineOperand> &Addr,
@ -275,7 +277,7 @@ public:
MachineInstr::mmo_iterator MMOEnd,
SmallVectorImpl<MachineInstr*> &NewMIs) const;
virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const;
bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
/// foldMemoryOperand - If this target supports it, fold a load or store of
/// the specified stack slot into the specified machine instruction for the
@ -283,33 +285,33 @@ public:
/// folding and return true, otherwise it should return false. If it folds
/// the instruction, it is likely that the MachineInstruction the iterator
/// references has been changed.
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr* MI,
const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr* MI,
const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const override;
/// foldMemoryOperand - Same as the previous version except it allows folding
/// of any load and store from / to any address, not just from a specific
/// stack slot.
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr* MI,
const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const;
MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr* MI,
const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const override;
/// canFoldMemoryOperand - Returns true if the specified load / store is
/// folding is possible.
virtual bool canFoldMemoryOperand(const MachineInstr*,
const SmallVectorImpl<unsigned> &) const;
bool canFoldMemoryOperand(const MachineInstr*,
const SmallVectorImpl<unsigned> &) const override;
/// unfoldMemoryOperand - Separate a single instruction which folded a load or
/// a store or a load and a store into two or more instruction. If this is
/// possible, returns true as well as the new instructions by reference.
virtual bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
SmallVectorImpl<MachineInstr*> &NewMIs) const;
bool unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
SmallVectorImpl<MachineInstr*> &NewMIs) const override;
virtual bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
SmallVectorImpl<SDNode*> &NewNodes) const;
bool unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
SmallVectorImpl<SDNode*> &NewNodes) const override;
/// getOpcodeAfterMemoryUnfold - Returns the opcode of the would be new
/// instruction after load / store are unfolded from an instruction of the
@ -317,17 +319,17 @@ public:
/// possible. If LoadRegIndex is non-null, it is filled in with the operand
/// index of the operand which will hold the register holding the loaded
/// value.
virtual unsigned getOpcodeAfterMemoryUnfold(unsigned Opc,
bool UnfoldLoad, bool UnfoldStore,
unsigned *LoadRegIndex = 0) const;
unsigned getOpcodeAfterMemoryUnfold(unsigned Opc,
bool UnfoldLoad, bool UnfoldStore,
unsigned *LoadRegIndex = 0) const override;
/// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler
/// to determine if two loads are loading from the same base address. It
/// should only return true if the base pointers are the same and the
/// only differences between the two addresses are the offset. It also returns
/// the offsets by reference.
virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
int64_t &Offset1, int64_t &Offset2) const;
bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1,
int64_t &Offset2) const override;
/// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
/// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should
@ -337,21 +339,21 @@ public:
/// from the common base address. It returns true if it decides it's desirable
/// to schedule the two loads together. "NumLoads" is the number of loads that
/// have already been scheduled after Load1.
virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
int64_t Offset1, int64_t Offset2,
unsigned NumLoads) const;
bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
int64_t Offset1, int64_t Offset2,
unsigned NumLoads) const override;
virtual bool shouldScheduleAdjacent(MachineInstr* First,
MachineInstr *Second) const override;
bool shouldScheduleAdjacent(MachineInstr* First,
MachineInstr *Second) const override;
virtual void getNoopForMachoTarget(MCInst &NopInst) const;
void getNoopForMachoTarget(MCInst &NopInst) const override;
virtual
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const;
bool
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
/// isSafeToMoveRegClassDefs - Return true if it's safe to move a machine
/// instruction that defines the specified register class.
bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const;
bool isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const override;
static bool isX86_64ExtendedReg(const MachineOperand &MO) {
if (!MO.isReg()) return false;
@ -365,16 +367,17 @@ public:
unsigned getGlobalBaseReg(MachineFunction *MF) const;
std::pair<uint16_t, uint16_t>
getExecutionDomain(const MachineInstr *MI) const;
getExecutionDomain(const MachineInstr *MI) const override;
void setExecutionDomain(MachineInstr *MI, unsigned Domain) const;
void setExecutionDomain(MachineInstr *MI, unsigned Domain) const override;
unsigned getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum,
const TargetRegisterInfo *TRI) const;
unsigned
getPartialRegUpdateClearance(const MachineInstr *MI, unsigned OpNum,
const TargetRegisterInfo *TRI) const override;
unsigned getUndefRegClearance(const MachineInstr *MI, unsigned &OpNum,
const TargetRegisterInfo *TRI) const;
const TargetRegisterInfo *TRI) const override;
void breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
const TargetRegisterInfo *TRI) const;
const TargetRegisterInfo *TRI) const override;
MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
MachineInstr* MI,
@ -382,27 +385,28 @@ public:
const SmallVectorImpl<MachineOperand> &MOs,
unsigned Size, unsigned Alignment) const;
bool isHighLatencyDef(int opc) const;
bool isHighLatencyDef(int opc) const override;
bool hasHighOperandLatency(const InstrItineraryData *ItinData,
const MachineRegisterInfo *MRI,
const MachineInstr *DefMI, unsigned DefIdx,
const MachineInstr *UseMI, unsigned UseIdx) const;
const MachineInstr *UseMI,
unsigned UseIdx) const override;
/// analyzeCompare - For a comparison instruction, return the source registers
/// in SrcReg and SrcReg2 if having two register operands, and the value it
/// compares against in CmpValue. Return true if the comparison instruction
/// can be analyzed.
virtual bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
unsigned &SrcReg2,
int &CmpMask, int &CmpValue) const;
bool analyzeCompare(const MachineInstr *MI, unsigned &SrcReg,
unsigned &SrcReg2, int &CmpMask,
int &CmpValue) const override;
/// optimizeCompareInstr - Check if there exists an earlier instruction that
/// operates on the same source operands and sets flags in the same way as
/// Compare; remove Compare if possible.
virtual bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
unsigned SrcReg2, int CmpMask, int CmpValue,
const MachineRegisterInfo *MRI) const;
bool optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg,
unsigned SrcReg2, int CmpMask, int CmpValue,
const MachineRegisterInfo *MRI) const override;
/// optimizeLoadInstr - Try to remove the load by folding it to a register
/// operand at the use. We fold the load instructions if and only if the
@ -411,10 +415,10 @@ public:
/// defined by the load we are trying to fold. DefMI returns the machine
/// instruction that defines FoldAsLoadDefReg, and the function returns
/// the machine instruction generated due to folding.
virtual MachineInstr* optimizeLoadInstr(MachineInstr *MI,
const MachineRegisterInfo *MRI,
unsigned &FoldAsLoadDefReg,
MachineInstr *&DefMI) const;
MachineInstr* optimizeLoadInstr(MachineInstr *MI,
const MachineRegisterInfo *MRI,
unsigned &FoldAsLoadDefReg,
MachineInstr *&DefMI) const override;
private:
MachineInstr * convertToThreeAddressWithLEA(unsigned MIOpc,

View File

@ -35,41 +35,41 @@ namespace llvm {
/// overwriting OLD with a branch to NEW. This is used for self-modifying
/// code.
///
virtual void replaceMachineCodeForFunction(void *Old, void *New);
void replaceMachineCodeForFunction(void *Old, void *New) override;
/// emitGlobalValueIndirectSym - Use the specified JITCodeEmitter object
/// to emit an indirect symbol which contains the address of the specified
/// ptr.
virtual void *emitGlobalValueIndirectSym(const GlobalValue* GV, void *ptr,
JITCodeEmitter &JCE);
void *emitGlobalValueIndirectSym(const GlobalValue* GV, void *ptr,
JITCodeEmitter &JCE) override;
// getStubLayout - Returns the size and alignment of the largest call stub
// on X86.
virtual StubLayout getStubLayout();
StubLayout getStubLayout() override;
/// emitFunctionStub - Use the specified JITCodeEmitter object to emit a
/// small native function that simply calls the function at the specified
/// address.
virtual void *emitFunctionStub(const Function* F, void *Target,
JITCodeEmitter &JCE);
void *emitFunctionStub(const Function* F, void *Target,
JITCodeEmitter &JCE) override;
/// getPICJumpTableEntry - Returns the value of the jumptable entry for the
/// specific basic block.
virtual uintptr_t getPICJumpTableEntry(uintptr_t BB, uintptr_t JTBase);
uintptr_t getPICJumpTableEntry(uintptr_t BB, uintptr_t JTBase) override;
/// getLazyResolverFunction - Expose the lazy resolver to the JIT.
virtual LazyResolverFn getLazyResolverFunction(JITCompilerFn);
LazyResolverFn getLazyResolverFunction(JITCompilerFn) override;
/// relocate - Before the JIT can run a block of code that has been emitted,
/// it must rewrite the code to contain the actual addresses of any
/// referenced global symbols.
virtual void relocate(void *Function, MachineRelocation *MR,
unsigned NumRelocs, unsigned char* GOTBase);
void relocate(void *Function, MachineRelocation *MR,
unsigned NumRelocs, unsigned char* GOTBase) override;
/// allocateThreadLocalMemory - Each target has its own way of
/// handling thread local variables. This method returns a value only
/// meaningful to the target.
virtual char* allocateThreadLocalMemory(size_t size);
char* allocateThreadLocalMemory(size_t size) override;
/// setPICBase / getPICBase - Getter / setter of PICBase, used to compute
/// PIC jumptable entry.

View File

@ -51,9 +51,9 @@ namespace {
PadShortFunc() : MachineFunctionPass(ID)
, Threshold(4), TM(0), TII(0) {}
virtual bool runOnMachineFunction(MachineFunction &MF);
bool runOnMachineFunction(MachineFunction &MF) override;
virtual const char *getPassName() const {
const char *getPassName() const override {
return "X86 Atom pad short functions";
}

View File

@ -62,66 +62,70 @@ public:
/// getCompactUnwindRegNum - This function maps the register to the number for
/// compact unwind encoding. Return -1 if the register isn't valid.
int getCompactUnwindRegNum(unsigned RegNum, bool isEH) const;
int getCompactUnwindRegNum(unsigned RegNum, bool isEH) const override;
/// Code Generation virtual methods...
///
virtual bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const;
bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const override;
/// getMatchingSuperRegClass - Return a subclass of the specified register
/// class A so that each register in it has a sub-register of the
/// specified sub-register index which is in the specified register class B.
virtual const TargetRegisterClass *
const TargetRegisterClass *
getMatchingSuperRegClass(const TargetRegisterClass *A,
const TargetRegisterClass *B, unsigned Idx) const;
const TargetRegisterClass *B,
unsigned Idx) const override;
virtual const TargetRegisterClass *
getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const;
const TargetRegisterClass *
getSubClassWithSubReg(const TargetRegisterClass *RC,
unsigned Idx) const override;
const TargetRegisterClass*
getLargestLegalSuperClass(const TargetRegisterClass *RC) const;
getLargestLegalSuperClass(const TargetRegisterClass *RC) const override;
/// getPointerRegClass - Returns a TargetRegisterClass used for pointer
/// values.
const TargetRegisterClass *
getPointerRegClass(const MachineFunction &MF, unsigned Kind = 0) const;
getPointerRegClass(const MachineFunction &MF,
unsigned Kind = 0) const override;
/// getCrossCopyRegClass - Returns a legal register class to copy a register
/// in the specified class to or from. Returns NULL if it is possible to copy
/// between a two registers of the specified class.
const TargetRegisterClass *
getCrossCopyRegClass(const TargetRegisterClass *RC) const;
getCrossCopyRegClass(const TargetRegisterClass *RC) const override;
unsigned getRegPressureLimit(const TargetRegisterClass *RC,
MachineFunction &MF) const;
MachineFunction &MF) const override;
/// getCalleeSavedRegs - Return a null-terminated list of all of the
/// callee-save registers on this target.
const uint16_t *getCalleeSavedRegs(const MachineFunction* MF = 0) const;
const uint32_t *getCallPreservedMask(CallingConv::ID) const;
const uint16_t *
getCalleeSavedRegs(const MachineFunction* MF = 0) const override;
const uint32_t *getCallPreservedMask(CallingConv::ID) const override;
const uint32_t *getNoPreservedMask() const;
/// getReservedRegs - Returns a bitset indexed by physical register number
/// indicating if a register is a special register that has particular uses and
/// should be considered unavailable at all times, e.g. SP, RA. This is used by
/// register scavenger to determine what registers are free.
BitVector getReservedRegs(const MachineFunction &MF) const;
BitVector getReservedRegs(const MachineFunction &MF) const override;
bool hasBasePointer(const MachineFunction &MF) const;
bool canRealignStack(const MachineFunction &MF) const;
bool needsStackRealignment(const MachineFunction &MF) const;
bool needsStackRealignment(const MachineFunction &MF) const override;
bool hasReservedSpillSlot(const MachineFunction &MF, unsigned Reg,
int &FrameIdx) const;
int &FrameIdx) const override;
void eliminateFrameIndex(MachineBasicBlock::iterator MI,
int SPAdj, unsigned FIOperandNum,
RegScavenger *RS = NULL) const;
RegScavenger *RS = NULL) const override;
// Debug information queries.
unsigned getFrameRegister(const MachineFunction &MF) const;
unsigned getFrameRegister(const MachineFunction &MF) const override;
unsigned getStackRegister() const { return StackPtr; }
unsigned getBaseRegister() const { return BasePtr; }
// FIXME: Move to FrameInfok

View File

@ -33,22 +33,20 @@ public:
explicit X86SelectionDAGInfo(const X86TargetMachine &TM);
~X86SelectionDAGInfo();
virtual
SDValue EmitTargetCodeForMemset(SelectionDAG &DAG, SDLoc dl,
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
bool isVolatile,
MachinePointerInfo DstPtrInfo) const;
MachinePointerInfo DstPtrInfo) const override;
virtual
SDValue EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl,
SDValue Chain,
SDValue Dst, SDValue Src,
SDValue Size, unsigned Align,
bool isVolatile, bool AlwaysInline,
MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) const;
MachinePointerInfo SrcPtrInfo) const override;
};
}

View File

@ -240,7 +240,7 @@ public:
void AutoDetectSubtargetFeatures();
/// \brief Reset the features for the X86 target.
virtual void resetSubtargetFeatures(const MachineFunction *MF);
void resetSubtargetFeatures(const MachineFunction *MF) override;
private:
void initializeEnvironment();
void resetSubtargetFeatures(StringRef CPU, StringRef FS);
@ -411,7 +411,7 @@ public:
/// enablePostRAScheduler - run for Atom optimization.
bool enablePostRAScheduler(CodeGenOpt::Level OptLevel,
TargetSubtargetInfo::AntiDepBreakMode& Mode,
RegClassVector& CriticalPathRCs) const;
RegClassVector& CriticalPathRCs) const override;
bool postRAScheduler() const { return PostRAScheduler; }

View File

@ -158,11 +158,11 @@ public:
return *getX86TargetMachine().getSubtargetImpl();
}
virtual bool addInstSelector();
virtual bool addILPOpts();
virtual bool addPreRegAlloc();
virtual bool addPostRegAlloc();
virtual bool addPreEmitPass();
bool addInstSelector() override;
bool addILPOpts() override;
bool addPreRegAlloc() override;
bool addPostRegAlloc() override;
bool addPreEmitPass() override;
};
} // namespace

View File

@ -46,38 +46,37 @@ public:
Reloc::Model RM, CodeModel::Model CM,
CodeGenOpt::Level OL);
virtual const DataLayout *getDataLayout() const { return &DL; }
virtual const X86InstrInfo *getInstrInfo() const {
const DataLayout *getDataLayout() const override { return &DL; }
const X86InstrInfo *getInstrInfo() const override {
return &InstrInfo;
}
virtual const TargetFrameLowering *getFrameLowering() const {
const TargetFrameLowering *getFrameLowering() const override {
return &FrameLowering;
}
virtual X86JITInfo *getJITInfo() {
X86JITInfo *getJITInfo() override {
return &JITInfo;
}
virtual const X86Subtarget *getSubtargetImpl() const{ return &Subtarget; }
virtual const X86TargetLowering *getTargetLowering() const {
const X86Subtarget *getSubtargetImpl() const override { return &Subtarget; }
const X86TargetLowering *getTargetLowering() const override {
return &TLInfo;
}
virtual const X86SelectionDAGInfo *getSelectionDAGInfo() const {
const X86SelectionDAGInfo *getSelectionDAGInfo() const override {
return &TSInfo;
}
virtual const X86RegisterInfo *getRegisterInfo() const {
const X86RegisterInfo *getRegisterInfo() const override {
return &getInstrInfo()->getRegisterInfo();
}
virtual const InstrItineraryData *getInstrItineraryData() const {
const InstrItineraryData *getInstrItineraryData() const override {
return &InstrItins;
}
/// \brief Register X86 analysis passes with a pass manager.
virtual void addAnalysisPasses(PassManagerBase &PM);
void addAnalysisPasses(PassManagerBase &PM) override;
// Set up the pass pipeline.
virtual TargetPassConfig *createPassConfig(PassManagerBase &PM);
TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
virtual bool addCodeEmitter(PassManagerBase &PM,
JITCodeEmitter &JCE);
bool addCodeEmitter(PassManagerBase &PM, JITCodeEmitter &JCE) override;
};
} // End llvm namespace

View File

@ -35,11 +35,11 @@ namespace {
static char ID;
VZeroUpperInserter() : MachineFunctionPass(ID) {}
virtual bool runOnMachineFunction(MachineFunction &MF);
bool runOnMachineFunction(MachineFunction &MF) override;
bool processBasicBlock(MachineFunction &MF, MachineBasicBlock &MBB);
virtual const char *getPassName() const { return "X86 vzeroupper inserter";}
const char *getPassName() const override {return "X86 vzeroupper inserter";}
private:
const TargetInstrInfo *TII; // Machine instruction info.

View File

@ -615,7 +615,7 @@ EmitMatcherList(const Matcher *N, unsigned Indent, unsigned CurrentIdx,
void MatcherTableEmitter::EmitPredicateFunctions(formatted_raw_ostream &OS) {
// Emit pattern predicates.
if (!PatternPredicates.empty()) {
OS << "virtual bool CheckPatternPredicate(unsigned PredNo) const {\n";
OS << "bool CheckPatternPredicate(unsigned PredNo) const override {\n";
OS << " switch (PredNo) {\n";
OS << " default: llvm_unreachable(\"Invalid predicate in table?\");\n";
for (unsigned i = 0, e = PatternPredicates.size(); i != e; ++i)
@ -633,8 +633,8 @@ void MatcherTableEmitter::EmitPredicateFunctions(formatted_raw_ostream &OS) {
PFsByName[I->first->getName()] = I->second;
if (!NodePredicates.empty()) {
OS << "virtual bool CheckNodePredicate(SDNode *Node,\n";
OS << " unsigned PredNo) const {\n";
OS << "bool CheckNodePredicate(SDNode *Node,\n";
OS << " unsigned PredNo) const override {\n";
OS << " switch (PredNo) {\n";
OS << " default: llvm_unreachable(\"Invalid predicate in table?\");\n";
for (unsigned i = 0, e = NodePredicates.size(); i != e; ++i) {
@ -653,9 +653,9 @@ void MatcherTableEmitter::EmitPredicateFunctions(formatted_raw_ostream &OS) {
// Emit CompletePattern matchers.
// FIXME: This should be const.
if (!ComplexPatterns.empty()) {
OS << "virtual bool CheckComplexPattern(SDNode *Root, SDNode *Parent,\n";
OS << " SDValue N, unsigned PatternNo,\n";
OS << " SmallVectorImpl<std::pair<SDValue, SDNode*> > &Result) {\n";
OS << "bool CheckComplexPattern(SDNode *Root, SDNode *Parent,\n";
OS << " SDValue N, unsigned PatternNo,\n";
OS << " SmallVectorImpl<std::pair<SDValue, SDNode*> > &Result) override {\n";
OS << " unsigned NextRes = Result.size();\n";
OS << " switch (PatternNo) {\n";
OS << " default: llvm_unreachable(\"Invalid pattern # in table?\");\n";
@ -694,7 +694,7 @@ void MatcherTableEmitter::EmitPredicateFunctions(formatted_raw_ostream &OS) {
// Emit SDNodeXForm handlers.
// FIXME: This should be const.
if (!NodeXForms.empty()) {
OS << "virtual SDValue RunSDNodeXForm(SDValue V, unsigned XFormNo) {\n";
OS << "SDValue RunSDNodeXForm(SDValue V, unsigned XFormNo) override {\n";
OS << " switch (XFormNo) {\n";
OS << " default: llvm_unreachable(\"Invalid xform # in table?\");\n";

View File

@ -965,23 +965,24 @@ RegisterInfoEmitter::runTargetHeader(raw_ostream &OS, CodeGenTarget &Target,
OS << "struct " << ClassName << " : public TargetRegisterInfo {\n"
<< " explicit " << ClassName
<< "(unsigned RA, unsigned D = 0, unsigned E = 0, unsigned PC = 0);\n"
<< " virtual bool needsStackRealignment(const MachineFunction &) const\n"
<< " bool needsStackRealignment(const MachineFunction &) const override\n"
<< " { return false; }\n";
if (!RegBank.getSubRegIndices().empty()) {
OS << " virtual unsigned composeSubRegIndicesImpl"
<< "(unsigned, unsigned) const;\n"
<< " virtual const TargetRegisterClass *"
"getSubClassWithSubReg(const TargetRegisterClass*, unsigned) const;\n";
OS << " unsigned composeSubRegIndicesImpl"
<< "(unsigned, unsigned) const override;\n"
<< " const TargetRegisterClass *getSubClassWithSubReg"
<< "(const TargetRegisterClass*, unsigned) const override;\n";
}
OS << " virtual const RegClassWeight &getRegClassWeight("
<< "const TargetRegisterClass *RC) const;\n"
<< " virtual unsigned getRegUnitWeight(unsigned RegUnit) const;\n"
<< " virtual unsigned getNumRegPressureSets() const;\n"
<< " virtual const char *getRegPressureSetName(unsigned Idx) const;\n"
<< " virtual unsigned getRegPressureSetLimit(unsigned Idx) const;\n"
<< " virtual const int *getRegClassPressureSets("
<< "const TargetRegisterClass *RC) const;\n"
<< " virtual const int *getRegUnitPressureSets(unsigned RegUnit) const;\n"
OS << " const RegClassWeight &getRegClassWeight("
<< "const TargetRegisterClass *RC) const override;\n"
<< " unsigned getRegUnitWeight(unsigned RegUnit) const override;\n"
<< " unsigned getNumRegPressureSets() const override;\n"
<< " const char *getRegPressureSetName(unsigned Idx) const override;\n"
<< " unsigned getRegPressureSetLimit(unsigned Idx) const override;\n"
<< " const int *getRegClassPressureSets("
<< "const TargetRegisterClass *RC) const override;\n"
<< " const int *getRegUnitPressureSets("
<< "unsigned RegUnit) const override;\n"
<< "};\n\n";
ArrayRef<CodeGenRegisterClass*> RegisterClasses = RegBank.getRegClasses();

View File

@ -1498,7 +1498,7 @@ void SubtargetEmitter::run(raw_ostream &OS) {
<< "StringRef FS);\n"
<< "public:\n"
<< " unsigned resolveSchedClass(unsigned SchedClass, const MachineInstr *DefMI,"
<< " const TargetSchedModel *SchedModel) const;\n"
<< " const TargetSchedModel *SchedModel) const override;\n"
<< " DFAPacketizer *createDFAPacketizer(const InstrItineraryData *IID)"
<< " const;\n"
<< "};\n";