forked from OSchip/llvm-project
AtomicExpand: Change return type for shouldExpandAtomicStoreInIR
Use the same enum as the other atomic instructions for consistency, in preparation for addition of another strategy. Introduce a new "Expand" option, since the store expansion does not use cmpxchg. Alternatively, the existing CmpXChg strategy could be renamed to Expand.
This commit is contained in:
parent
1b547799c5
commit
c4ea925f50
|
@ -256,6 +256,7 @@ public:
|
|||
MaskedIntrinsic, // Use a target-specific intrinsic for the LL/SC loop.
|
||||
BitTestIntrinsic, // Use a target-specific intrinsic for special bit
|
||||
// operations; used by X86.
|
||||
Expand, // Generic expansion in terms of other atomic operations.
|
||||
};
|
||||
|
||||
/// Enum that specifies when a multiplication should be expanded.
|
||||
|
@ -2020,12 +2021,6 @@ public:
|
|||
// be unnecessarily held, except if clrex, inserted by this hook, is executed.
|
||||
virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilderBase &Builder) const {}
|
||||
|
||||
/// Returns true if the given (atomic) store should be expanded by the
|
||||
/// IR-level AtomicExpand pass into an "atomic xchg" which ignores its input.
|
||||
virtual bool shouldExpandAtomicStoreInIR(StoreInst *SI) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Returns true if arguments should be sign-extended in lib calls.
|
||||
virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
|
||||
return IsSigned;
|
||||
|
@ -2042,6 +2037,13 @@ public:
|
|||
return AtomicExpansionKind::None;
|
||||
}
|
||||
|
||||
/// Returns how the given (atomic) store should be expanded by the IR-level
|
||||
/// AtomicExpand pass into. For instance AtomicExpansionKind::Expand will try
|
||||
/// to use an atomicrmw xchg.
|
||||
virtual AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const {
|
||||
return AtomicExpansionKind::None;
|
||||
}
|
||||
|
||||
/// Returns how the given atomic cmpxchg should be expanded by the IR-level
|
||||
/// AtomicExpand pass.
|
||||
virtual AtomicExpansionKind
|
||||
|
|
|
@ -77,6 +77,7 @@ private:
|
|||
bool expandAtomicLoadToLL(LoadInst *LI);
|
||||
bool expandAtomicLoadToCmpXchg(LoadInst *LI);
|
||||
StoreInst *convertAtomicStoreToIntegerType(StoreInst *SI);
|
||||
bool tryExpandAtomicStore(StoreInst *SI);
|
||||
void expandAtomicStore(StoreInst *SI);
|
||||
bool tryExpandAtomicRMW(AtomicRMWInst *AI);
|
||||
AtomicRMWInst *convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI);
|
||||
|
@ -271,10 +272,8 @@ bool AtomicExpand::runOnFunction(Function &F) {
|
|||
MadeChange = true;
|
||||
}
|
||||
|
||||
if (TLI->shouldExpandAtomicStoreInIR(SI)) {
|
||||
expandAtomicStore(SI);
|
||||
if (tryExpandAtomicStore(SI))
|
||||
MadeChange = true;
|
||||
}
|
||||
} else if (RMWI) {
|
||||
// There are two different ways of expanding RMW instructions:
|
||||
// - into a load if it is idempotent
|
||||
|
@ -418,6 +417,18 @@ bool AtomicExpand::tryExpandAtomicLoad(LoadInst *LI) {
|
|||
}
|
||||
}
|
||||
|
||||
bool AtomicExpand::tryExpandAtomicStore(StoreInst *SI) {
|
||||
switch (TLI->shouldExpandAtomicStoreInIR(SI)) {
|
||||
case TargetLoweringBase::AtomicExpansionKind::None:
|
||||
return false;
|
||||
case TargetLoweringBase::AtomicExpansionKind::Expand:
|
||||
expandAtomicStore(SI);
|
||||
return true;
|
||||
default:
|
||||
llvm_unreachable("Unhandled case in tryExpandAtomicStore");
|
||||
}
|
||||
}
|
||||
|
||||
bool AtomicExpand::expandAtomicLoadToLL(LoadInst *LI) {
|
||||
IRBuilder<> Builder(LI);
|
||||
|
||||
|
|
|
@ -19369,12 +19369,12 @@ bool AArch64TargetLowering::shouldInsertFencesForAtomic(
|
|||
// Loads and stores less than 128-bits are already atomic; ones above that
|
||||
// are doomed anyway, so defer to the default libcall and blame the OS when
|
||||
// things go wrong.
|
||||
bool AArch64TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
|
||||
TargetLoweringBase::AtomicExpansionKind
|
||||
AArch64TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
|
||||
unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits();
|
||||
if (Size != 128)
|
||||
return false;
|
||||
|
||||
return !isOpSuitableForLDPSTP(SI);
|
||||
if (Size != 128 || isOpSuitableForLDPSTP(SI))
|
||||
return AtomicExpansionKind::None;
|
||||
return AtomicExpansionKind::Expand;
|
||||
}
|
||||
|
||||
// Loads and stores less than 128-bits are already atomic; ones above that
|
||||
|
|
|
@ -673,7 +673,8 @@ public:
|
|||
|
||||
TargetLoweringBase::AtomicExpansionKind
|
||||
shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
|
||||
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
|
||||
TargetLoweringBase::AtomicExpansionKind
|
||||
shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
|
||||
TargetLoweringBase::AtomicExpansionKind
|
||||
shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
|
||||
|
||||
|
|
|
@ -20949,7 +20949,8 @@ Instruction *ARMTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
|
|||
// are doomed anyway, so defer to the default libcall and blame the OS when
|
||||
// things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit
|
||||
// anything for those.
|
||||
bool ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
|
||||
TargetLoweringBase::AtomicExpansionKind
|
||||
ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
|
||||
bool has64BitAtomicStore;
|
||||
if (Subtarget->isMClass())
|
||||
has64BitAtomicStore = false;
|
||||
|
@ -20959,7 +20960,8 @@ bool ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
|
|||
has64BitAtomicStore = Subtarget->hasV6Ops();
|
||||
|
||||
unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits();
|
||||
return Size == 64 && has64BitAtomicStore;
|
||||
return Size == 64 && has64BitAtomicStore ? AtomicExpansionKind::Expand
|
||||
: AtomicExpansionKind::None;
|
||||
}
|
||||
|
||||
// Loads and stores less than 64-bits are already atomic; ones above that
|
||||
|
|
|
@ -665,7 +665,8 @@ class VectorType;
|
|||
bool shouldInsertFencesForAtomic(const Instruction *I) const override;
|
||||
TargetLoweringBase::AtomicExpansionKind
|
||||
shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
|
||||
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
|
||||
TargetLoweringBase::AtomicExpansionKind
|
||||
shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
|
||||
TargetLoweringBase::AtomicExpansionKind
|
||||
shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
|
||||
TargetLoweringBase::AtomicExpansionKind
|
||||
|
|
|
@ -3666,9 +3666,12 @@ HexagonTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
|
|||
: AtomicExpansionKind::None;
|
||||
}
|
||||
|
||||
bool HexagonTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
|
||||
TargetLowering::AtomicExpansionKind
|
||||
HexagonTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
|
||||
// Do not expand loads and stores that don't exceed 64 bits.
|
||||
return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64;
|
||||
return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64
|
||||
? AtomicExpansionKind::Expand
|
||||
: AtomicExpansionKind::None;
|
||||
}
|
||||
|
||||
TargetLowering::AtomicExpansionKind
|
||||
|
|
|
@ -328,7 +328,7 @@ public:
|
|||
Value *emitStoreConditional(IRBuilderBase &Builder, Value *Val, Value *Addr,
|
||||
AtomicOrdering Ord) const override;
|
||||
AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
|
||||
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
|
||||
AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
|
||||
AtomicExpansionKind
|
||||
shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
|
||||
|
||||
|
|
|
@ -30421,7 +30421,8 @@ bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const {
|
|||
return false;
|
||||
}
|
||||
|
||||
bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
|
||||
TargetLoweringBase::AtomicExpansionKind
|
||||
X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
|
||||
Type *MemType = SI->getValueOperand()->getType();
|
||||
|
||||
bool NoImplicitFloatOps =
|
||||
|
@ -30429,9 +30430,10 @@ bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
|
|||
if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
|
||||
!Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
|
||||
(Subtarget.hasSSE1() || Subtarget.hasX87()))
|
||||
return false;
|
||||
return AtomicExpansionKind::None;
|
||||
|
||||
return needsCmpXchgNb(MemType);
|
||||
return needsCmpXchgNb(MemType) ? AtomicExpansionKind::Expand
|
||||
: AtomicExpansionKind::None;
|
||||
}
|
||||
|
||||
// Note: this turns large loads into lock cmpxchg8b/16b.
|
||||
|
|
|
@ -1621,7 +1621,8 @@ namespace llvm {
|
|||
|
||||
TargetLoweringBase::AtomicExpansionKind
|
||||
shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
|
||||
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
|
||||
TargetLoweringBase::AtomicExpansionKind
|
||||
shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
|
||||
TargetLoweringBase::AtomicExpansionKind
|
||||
shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
|
||||
TargetLoweringBase::AtomicExpansionKind
|
||||
|
|
Loading…
Reference in New Issue