80-col fixups.

llvm-svn: 237403
This commit is contained in:
Eric Christopher 2015-05-14 23:07:13 +00:00
parent 275806f90b
commit b3faa900bd
1 changed files with 21 additions and 15 deletions

View File

@ -126,8 +126,8 @@ public:
/// Enum that specifies what a AtomicRMWInst is expanded to, if at all. Exists /// Enum that specifies what a AtomicRMWInst is expanded to, if at all. Exists
/// because different targets have different levels of support for these /// because different targets have different levels of support for these
/// atomic RMW instructions, and also have different options w.r.t. what they should /// atomic RMW instructions, and also have different options w.r.t. what they
/// expand to. /// should expand to.
enum class AtomicRMWExpansionKind { enum class AtomicRMWExpansionKind {
None, // Don't expand the instruction. None, // Don't expand the instruction.
LLSC, // Expand the instruction into loadlinked/storeconditional; used LLSC, // Expand the instruction into loadlinked/storeconditional; used
@ -258,11 +258,12 @@ public:
/// isLoadBitCastBeneficial() - Return true if the following transform /// isLoadBitCastBeneficial() - Return true if the following transform
/// is beneficial. /// is beneficial.
/// fold (conv (load x)) -> (load (conv*)x) /// fold (conv (load x)) -> (load (conv*)x)
/// On architectures that don't natively support some vector loads efficiently, /// On architectures that don't natively support some vector loads
/// casting the load to a smaller vector of larger types and loading /// efficiently, casting the load to a smaller vector of larger types and
/// is more efficient, however, this can be undone by optimizations in /// loading is more efficient, however, this can be undone by optimizations in
/// dag combiner. /// dag combiner.
virtual bool isLoadBitCastBeneficial(EVT /* Load */, EVT /* Bitcast */) const { virtual bool isLoadBitCastBeneficial(EVT /* Load */,
EVT /* Bitcast */) const {
return true; return true;
} }
@ -270,7 +271,7 @@ public:
virtual bool isCheapToSpeculateCttz() const { virtual bool isCheapToSpeculateCttz() const {
return false; return false;
} }
/// \brief Return true if it is cheap to speculate a call to intrinsic ctlz. /// \brief Return true if it is cheap to speculate a call to intrinsic ctlz.
virtual bool isCheapToSpeculateCtlz() const { virtual bool isCheapToSpeculateCtlz() const {
return false; return false;
@ -573,7 +574,8 @@ public:
/// Return how this load with extension should be treated: either it is legal, /// Return how this load with extension should be treated: either it is legal,
/// needs to be promoted to a larger size, needs to be expanded to some other /// needs to be promoted to a larger size, needs to be expanded to some other
/// code sequence, or the target has a custom expander for it. /// code sequence, or the target has a custom expander for it.
LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const { LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT,
EVT MemVT) const {
if (ValVT.isExtended() || MemVT.isExtended()) return Expand; if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy; unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy; unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
@ -1053,8 +1055,9 @@ public:
/// seq_cst. But if they are lowered to monotonic accesses, no amount of /// seq_cst. But if they are lowered to monotonic accesses, no amount of
/// IR-level fences can prevent it. /// IR-level fences can prevent it.
/// @{ /// @{
virtual Instruction* emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord, virtual Instruction *emitLeadingFence(IRBuilder<> &Builder,
bool IsStore, bool IsLoad) const { AtomicOrdering Ord, bool IsStore,
bool IsLoad) const {
if (!getInsertFencesForAtomic()) if (!getInsertFencesForAtomic())
return nullptr; return nullptr;
@ -1064,8 +1067,9 @@ public:
return nullptr; return nullptr;
} }
virtual Instruction* emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord, virtual Instruction *emitTrailingFence(IRBuilder<> &Builder,
bool IsStore, bool IsLoad) const { AtomicOrdering Ord, bool IsStore,
bool IsLoad) const {
if (!getInsertFencesForAtomic()) if (!getInsertFencesForAtomic())
return nullptr; return nullptr;
@ -1110,7 +1114,8 @@ public:
/// it succeeds, and nullptr otherwise. /// it succeeds, and nullptr otherwise.
/// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo
/// another round of expansion. /// another round of expansion.
virtual LoadInst *lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const { virtual LoadInst *
lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const {
return nullptr; return nullptr;
} }
@ -1962,7 +1967,8 @@ protected:
/// Replace/modify any TargetFrameIndex operands with a targte-dependent /// Replace/modify any TargetFrameIndex operands with a targte-dependent
/// sequence of memory operands that is recognized by PrologEpilogInserter. /// sequence of memory operands that is recognized by PrologEpilogInserter.
MachineBasicBlock *emitPatchPoint(MachineInstr *MI, MachineBasicBlock *MBB) const; MachineBasicBlock *emitPatchPoint(MachineInstr *MI,
MachineBasicBlock *MBB) const;
}; };
/// This class defines information used to lower LLVM code to legal SelectionDAG /// This class defines information used to lower LLVM code to legal SelectionDAG
@ -2687,7 +2693,7 @@ public:
/// Hooks for building estimates in place of slower divisions and square /// Hooks for building estimates in place of slower divisions and square
/// roots. /// roots.
/// Return a reciprocal square root estimate value for the input operand. /// Return a reciprocal square root estimate value for the input operand.
/// The RefinementSteps output is the number of Newton-Raphson refinement /// The RefinementSteps output is the number of Newton-Raphson refinement
/// iterations required to generate a sufficient (though not necessarily /// iterations required to generate a sufficient (though not necessarily