forked from OSchip/llvm-project
parent
275806f90b
commit
b3faa900bd
llvm/include/llvm/Target
|
@ -126,8 +126,8 @@ public:
|
|||
|
||||
/// Enum that specifies what a AtomicRMWInst is expanded to, if at all. Exists
|
||||
/// because different targets have different levels of support for these
|
||||
/// atomic RMW instructions, and also have different options w.r.t. what they should
|
||||
/// expand to.
|
||||
/// atomic RMW instructions, and also have different options w.r.t. what they
|
||||
/// should expand to.
|
||||
enum class AtomicRMWExpansionKind {
|
||||
None, // Don't expand the instruction.
|
||||
LLSC, // Expand the instruction into loadlinked/storeconditional; used
|
||||
|
@ -258,11 +258,12 @@ public:
|
|||
/// isLoadBitCastBeneficial() - Return true if the following transform
|
||||
/// is beneficial.
|
||||
/// fold (conv (load x)) -> (load (conv*)x)
|
||||
/// On architectures that don't natively support some vector loads efficiently,
|
||||
/// casting the load to a smaller vector of larger types and loading
|
||||
/// is more efficient, however, this can be undone by optimizations in
|
||||
/// On architectures that don't natively support some vector loads
|
||||
/// efficiently, casting the load to a smaller vector of larger types and
|
||||
/// loading is more efficient, however, this can be undone by optimizations in
|
||||
/// dag combiner.
|
||||
virtual bool isLoadBitCastBeneficial(EVT /* Load */, EVT /* Bitcast */) const {
|
||||
virtual bool isLoadBitCastBeneficial(EVT /* Load */,
|
||||
EVT /* Bitcast */) const {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -270,7 +271,7 @@ public:
|
|||
virtual bool isCheapToSpeculateCttz() const {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/// \brief Return true if it is cheap to speculate a call to intrinsic ctlz.
|
||||
virtual bool isCheapToSpeculateCtlz() const {
|
||||
return false;
|
||||
|
@ -573,7 +574,8 @@ public:
|
|||
/// Return how this load with extension should be treated: either it is legal,
|
||||
/// needs to be promoted to a larger size, needs to be expanded to some other
|
||||
/// code sequence, or the target has a custom expander for it.
|
||||
LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const {
|
||||
LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT,
|
||||
EVT MemVT) const {
|
||||
if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
|
||||
unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
|
||||
unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
|
||||
|
@ -1053,8 +1055,9 @@ public:
|
|||
/// seq_cst. But if they are lowered to monotonic accesses, no amount of
|
||||
/// IR-level fences can prevent it.
|
||||
/// @{
|
||||
virtual Instruction* emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord,
|
||||
bool IsStore, bool IsLoad) const {
|
||||
virtual Instruction *emitLeadingFence(IRBuilder<> &Builder,
|
||||
AtomicOrdering Ord, bool IsStore,
|
||||
bool IsLoad) const {
|
||||
if (!getInsertFencesForAtomic())
|
||||
return nullptr;
|
||||
|
||||
|
@ -1064,8 +1067,9 @@ public:
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
virtual Instruction* emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord,
|
||||
bool IsStore, bool IsLoad) const {
|
||||
virtual Instruction *emitTrailingFence(IRBuilder<> &Builder,
|
||||
AtomicOrdering Ord, bool IsStore,
|
||||
bool IsLoad) const {
|
||||
if (!getInsertFencesForAtomic())
|
||||
return nullptr;
|
||||
|
||||
|
@ -1110,7 +1114,8 @@ public:
|
|||
/// it succeeds, and nullptr otherwise.
|
||||
/// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo
|
||||
/// another round of expansion.
|
||||
virtual LoadInst *lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const {
|
||||
virtual LoadInst *
|
||||
lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
@ -1962,7 +1967,8 @@ protected:
|
|||
|
||||
/// Replace/modify any TargetFrameIndex operands with a targte-dependent
|
||||
/// sequence of memory operands that is recognized by PrologEpilogInserter.
|
||||
MachineBasicBlock *emitPatchPoint(MachineInstr *MI, MachineBasicBlock *MBB) const;
|
||||
MachineBasicBlock *emitPatchPoint(MachineInstr *MI,
|
||||
MachineBasicBlock *MBB) const;
|
||||
};
|
||||
|
||||
/// This class defines information used to lower LLVM code to legal SelectionDAG
|
||||
|
@ -2687,7 +2693,7 @@ public:
|
|||
|
||||
/// Hooks for building estimates in place of slower divisions and square
|
||||
/// roots.
|
||||
|
||||
|
||||
/// Return a reciprocal square root estimate value for the input operand.
|
||||
/// The RefinementSteps output is the number of Newton-Raphson refinement
|
||||
/// iterations required to generate a sufficient (though not necessarily
|
||||
|
|
Loading…
Reference in New Issue