[Alignment][NFC] Use Align for code creating MemOp

Summary:
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790

Reviewers: courbet

Subscribers: hiraditya, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D73874
This commit is contained in:
Guillaume Chatelet 2020-02-03 13:09:53 +01:00
parent b37d59353f
commit fc19465965
6 changed files with 124 additions and 104 deletions

View File

@ -19,6 +19,7 @@
#include "llvm/CodeGen/LowLevelType.h" #include "llvm/CodeGen/LowLevelType.h"
#include "llvm/CodeGen/Register.h" #include "llvm/CodeGen/Register.h"
#include "llvm/Support/Alignment.h"
namespace llvm { namespace llvm {
@ -189,13 +190,13 @@ public:
private: private:
// Memcpy family optimization helpers. // Memcpy family optimization helpers.
bool optimizeMemcpy(MachineInstr &MI, Register Dst, Register Src, bool optimizeMemcpy(MachineInstr &MI, Register Dst, Register Src,
unsigned KnownLen, unsigned DstAlign, unsigned SrcAlign, unsigned KnownLen, Align DstAlign, Align SrcAlign,
bool IsVolatile); bool IsVolatile);
bool optimizeMemmove(MachineInstr &MI, Register Dst, Register Src, bool optimizeMemmove(MachineInstr &MI, Register Dst, Register Src,
unsigned KnownLen, unsigned DstAlign, unsigned SrcAlign, unsigned KnownLen, Align DstAlign, Align SrcAlign,
bool IsVolatile); bool IsVolatile);
bool optimizeMemset(MachineInstr &MI, Register Dst, Register Val, bool optimizeMemset(MachineInstr &MI, Register Dst, Register Val,
unsigned KnownLen, unsigned DstAlign, bool IsVolatile); unsigned KnownLen, Align DstAlign, bool IsVolatile);
/// Given a non-indexed load or store instruction \p MI, find an offset that /// Given a non-indexed load or store instruction \p MI, find an offset that
/// can be usefully and legally folded into it as a post-indexing operation. /// can be usefully and legally folded into it as a post-indexing operation.

View File

@ -461,13 +461,22 @@ public:
} }
/// Return the alignment of the specified stack object. /// Return the alignment of the specified stack object.
/// FIXME: Remove this function once transition to Align is over.
unsigned getObjectAlignment(int ObjectIdx) const { unsigned getObjectAlignment(int ObjectIdx) const {
assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() && assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
"Invalid Object Idx!"); "Invalid Object Idx!");
return Objects[ObjectIdx + NumFixedObjects].Alignment.value(); return Objects[ObjectIdx + NumFixedObjects].Alignment.value();
} }
/// Return the alignment of the specified stack object.
Align getObjectAlign(int ObjectIdx) const {
assert(unsigned(ObjectIdx + NumFixedObjects) < Objects.size() &&
"Invalid Object Idx!");
return Objects[ObjectIdx + NumFixedObjects].Alignment;
}
/// setObjectAlignment - Change the alignment of the specified stack object. /// setObjectAlignment - Change the alignment of the specified stack object.
/// FIXME: Remove this function once transition to Align is over.
void setObjectAlignment(int ObjectIdx, unsigned Align) { void setObjectAlignment(int ObjectIdx, unsigned Align) {
assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() && assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
"Invalid Object Idx!"); "Invalid Object Idx!");
@ -478,6 +487,17 @@ public:
ensureMaxAlignment(Align); ensureMaxAlignment(Align);
} }
/// setObjectAlignment - Change the alignment of the specified stack object.
void setObjectAlignment(int ObjectIdx, Align Alignment) {
assert(unsigned(ObjectIdx + NumFixedObjects) < Objects.size() &&
"Invalid Object Idx!");
Objects[ObjectIdx + NumFixedObjects].Alignment = Alignment;
// Only ensure max alignment for the default stack.
if (getStackID(ObjectIdx) == 0)
ensureMaxAlignment(Alignment);
}
/// Return the underlying Alloca of the specified /// Return the underlying Alloca of the specified
/// stack object if it exists. Returns 0 if none exists. /// stack object if it exists. Returns 0 if none exists.
const AllocaInst* getObjectAllocation(int ObjectIdx) const { const AllocaInst* getObjectAllocation(int ObjectIdx) const {

View File

@ -110,7 +110,7 @@ namespace Sched {
struct MemOp { struct MemOp {
// Shared // Shared
uint64_t Size; uint64_t Size;
unsigned DstAlign; // Specified alignment of the memory operation or zero if uint64_t DstAlign; // Specified alignment of the memory operation or zero if
// destination alignment can satisfy any constraint. // destination alignment can satisfy any constraint.
bool AllowOverlap; bool AllowOverlap;
// memset only // memset only
@ -119,30 +119,27 @@ struct MemOp {
// memcpy only // memcpy only
bool MemcpyStrSrc; // Indicates whether the memcpy source is an in-register bool MemcpyStrSrc; // Indicates whether the memcpy source is an in-register
// constant so it does not need to be loaded. // constant so it does not need to be loaded.
unsigned SrcAlign; // Inferred alignment of the source or zero if the memory uint64_t SrcAlign; // Inferred alignment of the source or zero if the memory
// operation does not need to load the value. // operation does not need to load the value.
static MemOp Copy(uint64_t Size, bool DstAlignCanChange, unsigned DstAlign, static MemOp Copy(uint64_t Size, bool DstAlignCanChange, Align DstAlign,
unsigned SrcAlign, bool IsVolatile, Align SrcAlign, bool IsVolatile,
bool MemcpyStrSrc = false) { bool MemcpyStrSrc = false) {
assert(DstAlign && "Destination alignment should be set");
assert(SrcAlign && "Source alignment should be set");
return { return {
/*.Size =*/Size, /*.Size =*/Size,
/*.DstAlign =*/DstAlignCanChange ? 0 : DstAlign, /*.DstAlign =*/DstAlignCanChange ? 0 : DstAlign.value(),
/*.AllowOverlap =*/!IsVolatile, /*.AllowOverlap =*/!IsVolatile,
/*.IsMemset =*/false, /*.IsMemset =*/false,
/*.ZeroMemset =*/false, /*.ZeroMemset =*/false,
/*.MemcpyStrSrc =*/MemcpyStrSrc, /*.MemcpyStrSrc =*/MemcpyStrSrc,
/*.SrcAlign =*/SrcAlign, /*.SrcAlign =*/SrcAlign.value(),
}; };
} }
static MemOp Set(uint64_t Size, bool DstAlignCanChange, unsigned DstAlign, static MemOp Set(uint64_t Size, bool DstAlignCanChange, Align DstAlign,
bool IsZeroMemset, bool IsVolatile) { bool IsZeroMemset, bool IsVolatile) {
assert(DstAlign && "Destination alignment should be set");
return { return {
/*.Size =*/Size, /*.Size =*/Size,
/*.DstAlign =*/DstAlignCanChange ? 0 : DstAlign, /*.DstAlign =*/DstAlignCanChange ? 0 : DstAlign.value(),
/*.AllowOverlap =*/!IsVolatile, /*.AllowOverlap =*/!IsVolatile,
/*.IsMemset =*/true, /*.IsMemset =*/true,
/*.ZeroMemset =*/IsZeroMemset, /*.ZeroMemset =*/IsZeroMemset,

View File

@ -954,9 +954,9 @@ static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB) {
return Val; return Val;
} }
bool CombinerHelper::optimizeMemset(MachineInstr &MI, Register Dst, Register Val, bool CombinerHelper::optimizeMemset(MachineInstr &MI, Register Dst,
unsigned KnownLen, unsigned Align, Register Val, unsigned KnownLen,
bool IsVolatile) { Align Alignment, bool IsVolatile) {
auto &MF = *MI.getParent()->getParent(); auto &MF = *MI.getParent()->getParent();
const auto &TLI = *MF.getSubtarget().getTargetLowering(); const auto &TLI = *MF.getSubtarget().getTargetLowering();
auto &DL = MF.getDataLayout(); auto &DL = MF.getDataLayout();
@ -983,7 +983,7 @@ bool CombinerHelper::optimizeMemset(MachineInstr &MI, Register Dst, Register Val
if (!findGISelOptimalMemOpLowering(MemOps, Limit, if (!findGISelOptimalMemOpLowering(MemOps, Limit,
MemOp::Set(KnownLen, DstAlignCanChange, MemOp::Set(KnownLen, DstAlignCanChange,
Align, Alignment,
/*IsZeroMemset=*/IsZeroVal, /*IsZeroMemset=*/IsZeroVal,
/*IsVolatile=*/IsVolatile), /*IsVolatile=*/IsVolatile),
DstPtrInfo.getAddrSpace(), ~0u, DstPtrInfo.getAddrSpace(), ~0u,
@ -993,13 +993,13 @@ bool CombinerHelper::optimizeMemset(MachineInstr &MI, Register Dst, Register Val
if (DstAlignCanChange) { if (DstAlignCanChange) {
// Get an estimate of the type from the LLT. // Get an estimate of the type from the LLT.
Type *IRTy = getTypeForLLT(MemOps[0], C); Type *IRTy = getTypeForLLT(MemOps[0], C);
unsigned NewAlign = (unsigned)DL.getABITypeAlignment(IRTy); Align NewAlign = DL.getABITypeAlign(IRTy);
if (NewAlign > Align) { if (NewAlign > Alignment) {
Align = NewAlign; Alignment = NewAlign;
unsigned FI = FIDef->getOperand(1).getIndex(); unsigned FI = FIDef->getOperand(1).getIndex();
// Give the stack frame object a larger alignment if needed. // Give the stack frame object a larger alignment if needed.
if (MFI.getObjectAlignment(FI) < Align) if (MFI.getObjectAlign(FI) < Alignment)
MFI.setObjectAlignment(FI, Align); MFI.setObjectAlignment(FI, Alignment);
} }
} }
@ -1067,10 +1067,9 @@ bool CombinerHelper::optimizeMemset(MachineInstr &MI, Register Dst, Register Val
return true; return true;
} }
bool CombinerHelper::optimizeMemcpy(MachineInstr &MI, Register Dst, bool CombinerHelper::optimizeMemcpy(MachineInstr &MI, Register Dst,
Register Src, unsigned KnownLen, Register Src, unsigned KnownLen,
unsigned DstAlign, unsigned SrcAlign, Align DstAlign, Align SrcAlign,
bool IsVolatile) { bool IsVolatile) {
auto &MF = *MI.getParent()->getParent(); auto &MF = *MI.getParent()->getParent();
const auto &TLI = *MF.getSubtarget().getTargetLowering(); const auto &TLI = *MF.getSubtarget().getTargetLowering();
@ -1082,7 +1081,7 @@ bool CombinerHelper::optimizeMemcpy(MachineInstr &MI, Register Dst,
bool DstAlignCanChange = false; bool DstAlignCanChange = false;
MachineFrameInfo &MFI = MF.getFrameInfo(); MachineFrameInfo &MFI = MF.getFrameInfo();
bool OptSize = shouldLowerMemFuncForSize(MF); bool OptSize = shouldLowerMemFuncForSize(MF);
unsigned Alignment = MinAlign(DstAlign, SrcAlign); Align Alignment = commonAlignment(DstAlign, SrcAlign);
MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI); MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex())) if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
@ -1111,21 +1110,20 @@ bool CombinerHelper::optimizeMemcpy(MachineInstr &MI, Register Dst,
if (DstAlignCanChange) { if (DstAlignCanChange) {
// Get an estimate of the type from the LLT. // Get an estimate of the type from the LLT.
Type *IRTy = getTypeForLLT(MemOps[0], C); Type *IRTy = getTypeForLLT(MemOps[0], C);
unsigned NewAlign = (unsigned)DL.getABITypeAlignment(IRTy); Align NewAlign = DL.getABITypeAlign(IRTy);
// Don't promote to an alignment that would require dynamic stack // Don't promote to an alignment that would require dynamic stack
// realignment. // realignment.
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (!TRI->needsStackRealignment(MF)) if (!TRI->needsStackRealignment(MF))
while (NewAlign > Alignment && while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign))
DL.exceedsNaturalStackAlignment(Align(NewAlign))) NewAlign = NewAlign / 2;
NewAlign /= 2;
if (NewAlign > Alignment) { if (NewAlign > Alignment) {
Alignment = NewAlign; Alignment = NewAlign;
unsigned FI = FIDef->getOperand(1).getIndex(); unsigned FI = FIDef->getOperand(1).getIndex();
// Give the stack frame object a larger alignment if needed. // Give the stack frame object a larger alignment if needed.
if (MFI.getObjectAlignment(FI) < Alignment) if (MFI.getObjectAlign(FI) < Alignment)
MFI.setObjectAlignment(FI, Alignment); MFI.setObjectAlignment(FI, Alignment);
} }
} }
@ -1176,9 +1174,9 @@ bool CombinerHelper::optimizeMemcpy(MachineInstr &MI, Register Dst,
} }
bool CombinerHelper::optimizeMemmove(MachineInstr &MI, Register Dst, bool CombinerHelper::optimizeMemmove(MachineInstr &MI, Register Dst,
Register Src, unsigned KnownLen, Register Src, unsigned KnownLen,
unsigned DstAlign, unsigned SrcAlign, Align DstAlign, Align SrcAlign,
bool IsVolatile) { bool IsVolatile) {
auto &MF = *MI.getParent()->getParent(); auto &MF = *MI.getParent()->getParent();
const auto &TLI = *MF.getSubtarget().getTargetLowering(); const auto &TLI = *MF.getSubtarget().getTargetLowering();
auto &DL = MF.getDataLayout(); auto &DL = MF.getDataLayout();
@ -1189,7 +1187,7 @@ bool CombinerHelper::optimizeMemmove(MachineInstr &MI, Register Dst,
bool DstAlignCanChange = false; bool DstAlignCanChange = false;
MachineFrameInfo &MFI = MF.getFrameInfo(); MachineFrameInfo &MFI = MF.getFrameInfo();
bool OptSize = shouldLowerMemFuncForSize(MF); bool OptSize = shouldLowerMemFuncForSize(MF);
unsigned Alignment = MinAlign(DstAlign, SrcAlign); Align Alignment = commonAlignment(DstAlign, SrcAlign);
MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI); MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex())) if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
@ -1217,21 +1215,20 @@ bool CombinerHelper::optimizeMemmove(MachineInstr &MI, Register Dst,
if (DstAlignCanChange) { if (DstAlignCanChange) {
// Get an estimate of the type from the LLT. // Get an estimate of the type from the LLT.
Type *IRTy = getTypeForLLT(MemOps[0], C); Type *IRTy = getTypeForLLT(MemOps[0], C);
unsigned NewAlign = (unsigned)DL.getABITypeAlignment(IRTy); Align NewAlign = DL.getABITypeAlign(IRTy);
// Don't promote to an alignment that would require dynamic stack // Don't promote to an alignment that would require dynamic stack
// realignment. // realignment.
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (!TRI->needsStackRealignment(MF)) if (!TRI->needsStackRealignment(MF))
while (NewAlign > Alignment && while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign))
DL.exceedsNaturalStackAlignment(Align(NewAlign))) NewAlign = NewAlign / 2;
NewAlign /= 2;
if (NewAlign > Alignment) { if (NewAlign > Alignment) {
Alignment = NewAlign; Alignment = NewAlign;
unsigned FI = FIDef->getOperand(1).getIndex(); unsigned FI = FIDef->getOperand(1).getIndex();
// Give the stack frame object a larger alignment if needed. // Give the stack frame object a larger alignment if needed.
if (MFI.getObjectAlignment(FI) < Alignment) if (MFI.getObjectAlign(FI) < Alignment)
MFI.setObjectAlignment(FI, Alignment); MFI.setObjectAlignment(FI, Alignment);
} }
} }
@ -1297,8 +1294,8 @@ bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) {
if (IsVolatile) if (IsVolatile)
return false; return false;
unsigned DstAlign = MemOp->getBaseAlignment(); Align DstAlign(MemOp->getBaseAlignment());
unsigned SrcAlign = 0; Align SrcAlign;
Register Dst = MI.getOperand(1).getReg(); Register Dst = MI.getOperand(1).getReg();
Register Src = MI.getOperand(2).getReg(); Register Src = MI.getOperand(2).getReg();
Register Len = MI.getOperand(3).getReg(); Register Len = MI.getOperand(3).getReg();
@ -1306,7 +1303,7 @@ bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) {
if (ID != Intrinsic::memset) { if (ID != Intrinsic::memset) {
assert(MMOIt != MI.memoperands_end() && "Expected a second MMO on MI"); assert(MMOIt != MI.memoperands_end() && "Expected a second MMO on MI");
MemOp = *(++MMOIt); MemOp = *(++MMOIt);
SrcAlign = MemOp->getBaseAlignment(); SrcAlign = Align(MemOp->getBaseAlignment());
} }
// See if this is a constant length copy // See if this is a constant length copy

View File

@ -5877,7 +5877,7 @@ static void chainLoadsAndStoresForMemcpy(SelectionDAG &DAG, const SDLoc &dl,
static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
SDValue Chain, SDValue Dst, SDValue Src, SDValue Chain, SDValue Dst, SDValue Src,
uint64_t Size, unsigned Alignment, uint64_t Size, Align Alignment,
bool isVol, bool AlwaysInline, bool isVol, bool AlwaysInline,
MachinePointerInfo DstPtrInfo, MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) { MachinePointerInfo SrcPtrInfo) {
@ -5901,9 +5901,10 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
DstAlignCanChange = true; DstAlignCanChange = true;
unsigned SrcAlign = DAG.InferPtrAlignment(Src); MaybeAlign SrcAlign(DAG.InferPtrAlignment(Src));
if (Alignment > SrcAlign) if (!SrcAlign || Alignment > *SrcAlign)
SrcAlign = Alignment; SrcAlign = Alignment;
assert(SrcAlign && "SrcAlign must be set");
ConstantDataArraySlice Slice; ConstantDataArraySlice Slice;
bool CopyFromConstant = isMemSrcFromConstant(Src, Slice); bool CopyFromConstant = isMemSrcFromConstant(Src, Slice);
bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr; bool isZeroConstant = CopyFromConstant && Slice.Array == nullptr;
@ -5912,7 +5913,7 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
? MemOp::Set(Size, DstAlignCanChange, Alignment, ? MemOp::Set(Size, DstAlignCanChange, Alignment,
/*IsZeroMemset*/ true, isVol) /*IsZeroMemset*/ true, isVol)
: MemOp::Copy(Size, DstAlignCanChange, Alignment, : MemOp::Copy(Size, DstAlignCanChange, Alignment,
SrcAlign, isVol, CopyFromConstant); *SrcAlign, isVol, CopyFromConstant);
if (!TLI.findOptimalMemOpLowering( if (!TLI.findOptimalMemOpLowering(
MemOps, Limit, Op, DstPtrInfo.getAddrSpace(), MemOps, Limit, Op, DstPtrInfo.getAddrSpace(),
SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes())) SrcPtrInfo.getAddrSpace(), MF.getFunction().getAttributes()))
@ -5920,19 +5921,18 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
if (DstAlignCanChange) { if (DstAlignCanChange) {
Type *Ty = MemOps[0].getTypeForEVT(C); Type *Ty = MemOps[0].getTypeForEVT(C);
unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty); Align NewAlign = DL.getABITypeAlign(Ty);
// Don't promote to an alignment that would require dynamic stack // Don't promote to an alignment that would require dynamic stack
// realignment. // realignment.
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (!TRI->needsStackRealignment(MF)) if (!TRI->needsStackRealignment(MF))
while (NewAlign > Alignment && while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign))
DL.exceedsNaturalStackAlignment(Align(NewAlign))) NewAlign = NewAlign / 2;
NewAlign /= 2;
if (NewAlign > Alignment) { if (NewAlign > Alignment) {
// Give the stack frame object a larger alignment if needed. // Give the stack frame object a larger alignment if needed.
if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) if (MFI.getObjectAlign(FI->getIndex()) < NewAlign)
MFI.setObjectAlignment(FI->getIndex(), NewAlign); MFI.setObjectAlignment(FI->getIndex(), NewAlign);
Alignment = NewAlign; Alignment = NewAlign;
} }
@ -5979,7 +5979,7 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
if (Value.getNode()) { if (Value.getNode()) {
Store = DAG.getStore( Store = DAG.getStore(
Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl),
DstPtrInfo.getWithOffset(DstOff), Alignment, MMOFlags); DstPtrInfo.getWithOffset(DstOff), Alignment.value(), MMOFlags);
OutChains.push_back(Store); OutChains.push_back(Store);
} }
} }
@ -6002,12 +6002,13 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain, Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
DAG.getMemBasePlusOffset(Src, SrcOff, dl), DAG.getMemBasePlusOffset(Src, SrcOff, dl),
SrcPtrInfo.getWithOffset(SrcOff), VT, SrcPtrInfo.getWithOffset(SrcOff), VT,
MinAlign(SrcAlign, SrcOff), SrcMMOFlags); commonAlignment(*SrcAlign, SrcOff).value(),
SrcMMOFlags);
OutLoadChains.push_back(Value.getValue(1)); OutLoadChains.push_back(Value.getValue(1));
Store = DAG.getTruncStore( Store = DAG.getTruncStore(
Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl),
DstPtrInfo.getWithOffset(DstOff), VT, Alignment, MMOFlags); DstPtrInfo.getWithOffset(DstOff), VT, Alignment.value(), MMOFlags);
OutStoreChains.push_back(Store); OutStoreChains.push_back(Store);
} }
SrcOff += VTSize; SrcOff += VTSize;
@ -6063,7 +6064,7 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl, static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
SDValue Chain, SDValue Dst, SDValue Src, SDValue Chain, SDValue Dst, SDValue Src,
uint64_t Size, unsigned Align, uint64_t Size, Align Alignment,
bool isVol, bool AlwaysInline, bool isVol, bool AlwaysInline,
MachinePointerInfo DstPtrInfo, MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) { MachinePointerInfo SrcPtrInfo) {
@ -6085,13 +6086,14 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst); FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
if (FI && !MFI.isFixedObjectIndex(FI->getIndex())) if (FI && !MFI.isFixedObjectIndex(FI->getIndex()))
DstAlignCanChange = true; DstAlignCanChange = true;
unsigned SrcAlign = DAG.InferPtrAlignment(Src); MaybeAlign SrcAlign(DAG.InferPtrAlignment(Src));
if (Align > SrcAlign) if (!SrcAlign || Alignment > *SrcAlign)
SrcAlign = Align; SrcAlign = Alignment;
assert(SrcAlign && "SrcAlign must be set");
unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize); unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
if (!TLI.findOptimalMemOpLowering( if (!TLI.findOptimalMemOpLowering(
MemOps, Limit, MemOps, Limit,
MemOp::Copy(Size, DstAlignCanChange, Align, SrcAlign, MemOp::Copy(Size, DstAlignCanChange, Alignment, *SrcAlign,
/*IsVolatile*/ true), /*IsVolatile*/ true),
DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(), DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(),
MF.getFunction().getAttributes())) MF.getFunction().getAttributes()))
@ -6099,12 +6101,12 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
if (DstAlignCanChange) { if (DstAlignCanChange) {
Type *Ty = MemOps[0].getTypeForEVT(C); Type *Ty = MemOps[0].getTypeForEVT(C);
unsigned NewAlign = (unsigned)DL.getABITypeAlignment(Ty); Align NewAlign = DL.getABITypeAlign(Ty);
if (NewAlign > Align) { if (NewAlign > Alignment) {
// Give the stack frame object a larger alignment if needed. // Give the stack frame object a larger alignment if needed.
if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) if (MFI.getObjectAlign(FI->getIndex()) < NewAlign)
MFI.setObjectAlignment(FI->getIndex(), NewAlign); MFI.setObjectAlignment(FI->getIndex(), NewAlign);
Align = NewAlign; Alignment = NewAlign;
} }
} }
@ -6126,9 +6128,9 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
if (isDereferenceable) if (isDereferenceable)
SrcMMOFlags |= MachineMemOperand::MODereferenceable; SrcMMOFlags |= MachineMemOperand::MODereferenceable;
Value = Value = DAG.getLoad(
DAG.getLoad(VT, dl, Chain, DAG.getMemBasePlusOffset(Src, SrcOff, dl), VT, dl, Chain, DAG.getMemBasePlusOffset(Src, SrcOff, dl),
SrcPtrInfo.getWithOffset(SrcOff), SrcAlign, SrcMMOFlags); SrcPtrInfo.getWithOffset(SrcOff), SrcAlign->value(), SrcMMOFlags);
LoadValues.push_back(Value); LoadValues.push_back(Value);
LoadChains.push_back(Value.getValue(1)); LoadChains.push_back(Value.getValue(1));
SrcOff += VTSize; SrcOff += VTSize;
@ -6140,9 +6142,9 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
unsigned VTSize = VT.getSizeInBits() / 8; unsigned VTSize = VT.getSizeInBits() / 8;
SDValue Store; SDValue Store;
Store = DAG.getStore(Chain, dl, LoadValues[i], Store = DAG.getStore(
DAG.getMemBasePlusOffset(Dst, DstOff, dl), Chain, dl, LoadValues[i], DAG.getMemBasePlusOffset(Dst, DstOff, dl),
DstPtrInfo.getWithOffset(DstOff), Align, MMOFlags); DstPtrInfo.getWithOffset(DstOff), Alignment.value(), MMOFlags);
OutChains.push_back(Store); OutChains.push_back(Store);
DstOff += VTSize; DstOff += VTSize;
} }
@ -6159,7 +6161,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
/// \param Dst Pointer to destination memory location. /// \param Dst Pointer to destination memory location.
/// \param Src Value of byte to write into the memory. /// \param Src Value of byte to write into the memory.
/// \param Size Number of bytes to write. /// \param Size Number of bytes to write.
/// \param Align Alignment of the destination in bytes. /// \param Alignment Alignment of the destination in bytes.
/// \param isVol True if destination is volatile. /// \param isVol True if destination is volatile.
/// \param DstPtrInfo IR information on the memory pointer. /// \param DstPtrInfo IR information on the memory pointer.
/// \returns New head in the control flow, if lowering was successful, empty /// \returns New head in the control flow, if lowering was successful, empty
@ -6170,7 +6172,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, const SDLoc &dl,
/// memory size. /// memory size.
static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl, static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl,
SDValue Chain, SDValue Dst, SDValue Src, SDValue Chain, SDValue Dst, SDValue Src,
uint64_t Size, unsigned Align, bool isVol, uint64_t Size, Align Alignment, bool isVol,
MachinePointerInfo DstPtrInfo) { MachinePointerInfo DstPtrInfo) {
// Turn a memset of undef to nop. // Turn a memset of undef to nop.
// FIXME: We need to honor volatile even is Src is undef. // FIXME: We need to honor volatile even is Src is undef.
@ -6192,18 +6194,18 @@ static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl,
isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue(); isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
if (!TLI.findOptimalMemOpLowering( if (!TLI.findOptimalMemOpLowering(
MemOps, TLI.getMaxStoresPerMemset(OptSize), MemOps, TLI.getMaxStoresPerMemset(OptSize),
MemOp::Set(Size, DstAlignCanChange, Align, IsZeroVal, isVol), MemOp::Set(Size, DstAlignCanChange, Alignment, IsZeroVal, isVol),
DstPtrInfo.getAddrSpace(), ~0u, MF.getFunction().getAttributes())) DstPtrInfo.getAddrSpace(), ~0u, MF.getFunction().getAttributes()))
return SDValue(); return SDValue();
if (DstAlignCanChange) { if (DstAlignCanChange) {
Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
unsigned NewAlign = (unsigned)DAG.getDataLayout().getABITypeAlignment(Ty); Align NewAlign = DAG.getDataLayout().getABITypeAlign(Ty);
if (NewAlign > Align) { if (NewAlign > Alignment) {
// Give the stack frame object a larger alignment if needed. // Give the stack frame object a larger alignment if needed.
if (MFI.getObjectAlignment(FI->getIndex()) < NewAlign) if (MFI.getObjectAlign(FI->getIndex()) < NewAlign)
MFI.setObjectAlignment(FI->getIndex(), NewAlign); MFI.setObjectAlignment(FI->getIndex(), NewAlign);
Align = NewAlign; Alignment = NewAlign;
} }
} }
@ -6241,7 +6243,7 @@ static SDValue getMemsetStores(SelectionDAG &DAG, const SDLoc &dl,
assert(Value.getValueType() == VT && "Value with wrong type."); assert(Value.getValueType() == VT && "Value with wrong type.");
SDValue Store = DAG.getStore( SDValue Store = DAG.getStore(
Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl), Chain, dl, Value, DAG.getMemBasePlusOffset(Dst, DstOff, dl),
DstPtrInfo.getWithOffset(DstOff), Align, DstPtrInfo.getWithOffset(DstOff), Alignment.value(),
isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone); isVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone);
OutChains.push_back(Store); OutChains.push_back(Store);
DstOff += VT.getSizeInBits() / 8; DstOff += VT.getSizeInBits() / 8;
@ -6262,11 +6264,12 @@ static void checkAddrSpaceIsValidForLibcall(const TargetLowering *TLI,
} }
SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst,
SDValue Src, SDValue Size, unsigned Align, SDValue Src, SDValue Size, unsigned Alignment,
bool isVol, bool AlwaysInline, bool isTailCall, bool isVol, bool AlwaysInline, bool isTailCall,
MachinePointerInfo DstPtrInfo, MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) { MachinePointerInfo SrcPtrInfo) {
assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); assert(Alignment &&
"The SDAG layer expects explicit alignment and reserves 0");
// Check to see if we should lower the memcpy to loads and stores first. // Check to see if we should lower the memcpy to loads and stores first.
// For cases within the target-specified limits, this is the best choice. // For cases within the target-specified limits, this is the best choice.
@ -6276,9 +6279,9 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst,
if (ConstantSize->isNullValue()) if (ConstantSize->isNullValue())
return Chain; return Chain;
SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, SDValue Result = getMemcpyLoadsAndStores(
ConstantSize->getZExtValue(),Align, *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
isVol, false, DstPtrInfo, SrcPtrInfo); Align(Alignment), isVol, false, DstPtrInfo, SrcPtrInfo);
if (Result.getNode()) if (Result.getNode())
return Result; return Result;
} }
@ -6287,7 +6290,7 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst,
// code. If the target chooses to do this, this is the next best. // code. If the target chooses to do this, this is the next best.
if (TSI) { if (TSI) {
SDValue Result = TSI->EmitTargetCodeForMemcpy( SDValue Result = TSI->EmitTargetCodeForMemcpy(
*this, dl, Chain, Dst, Src, Size, Align, isVol, AlwaysInline, *this, dl, Chain, Dst, Src, Size, Alignment, isVol, AlwaysInline,
DstPtrInfo, SrcPtrInfo); DstPtrInfo, SrcPtrInfo);
if (Result.getNode()) if (Result.getNode())
return Result; return Result;
@ -6297,9 +6300,9 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, const SDLoc &dl, SDValue Dst,
// use a (potentially long) sequence of loads and stores. // use a (potentially long) sequence of loads and stores.
if (AlwaysInline) { if (AlwaysInline) {
assert(ConstantSize && "AlwaysInline requires a constant size!"); assert(ConstantSize && "AlwaysInline requires a constant size!");
return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src, return getMemcpyLoadsAndStores(
ConstantSize->getZExtValue(), Align, isVol, *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
true, DstPtrInfo, SrcPtrInfo); Align(Alignment), isVol, true, DstPtrInfo, SrcPtrInfo);
} }
checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace()); checkAddrSpaceIsValidForLibcall(TLI, DstPtrInfo.getAddrSpace());
@ -6378,11 +6381,12 @@ SDValue SelectionDAG::getAtomicMemcpy(SDValue Chain, const SDLoc &dl,
} }
SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst,
SDValue Src, SDValue Size, unsigned Align, SDValue Src, SDValue Size, unsigned Alignment,
bool isVol, bool isTailCall, bool isVol, bool isTailCall,
MachinePointerInfo DstPtrInfo, MachinePointerInfo DstPtrInfo,
MachinePointerInfo SrcPtrInfo) { MachinePointerInfo SrcPtrInfo) {
assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); assert(Alignment &&
"The SDAG layer expects explicit alignment and reserves 0");
// Check to see if we should lower the memmove to loads and stores first. // Check to see if we should lower the memmove to loads and stores first.
// For cases within the target-specified limits, this is the best choice. // For cases within the target-specified limits, this is the best choice.
@ -6392,10 +6396,9 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst,
if (ConstantSize->isNullValue()) if (ConstantSize->isNullValue())
return Chain; return Chain;
SDValue Result = SDValue Result = getMemmoveLoadsAndStores(
getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src, *this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
ConstantSize->getZExtValue(), Align, isVol, Align(Alignment), isVol, false, DstPtrInfo, SrcPtrInfo);
false, DstPtrInfo, SrcPtrInfo);
if (Result.getNode()) if (Result.getNode())
return Result; return Result;
} }
@ -6403,8 +6406,9 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, const SDLoc &dl, SDValue Dst,
// Then check to see if we should lower the memmove with target-specific // Then check to see if we should lower the memmove with target-specific
// code. If the target chooses to do this, this is the next best. // code. If the target chooses to do this, this is the next best.
if (TSI) { if (TSI) {
SDValue Result = TSI->EmitTargetCodeForMemmove( SDValue Result =
*this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo, SrcPtrInfo); TSI->EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size,
Alignment, isVol, DstPtrInfo, SrcPtrInfo);
if (Result.getNode()) if (Result.getNode())
return Result; return Result;
} }
@ -6482,10 +6486,11 @@ SDValue SelectionDAG::getAtomicMemmove(SDValue Chain, const SDLoc &dl,
} }
SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst, SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst,
SDValue Src, SDValue Size, unsigned Align, SDValue Src, SDValue Size, unsigned Alignment,
bool isVol, bool isTailCall, bool isVol, bool isTailCall,
MachinePointerInfo DstPtrInfo) { MachinePointerInfo DstPtrInfo) {
assert(Align && "The SDAG layer expects explicit alignment and reserves 0"); assert(Alignment &&
"The SDAG layer expects explicit alignment and reserves 0");
// Check to see if we should lower the memset to stores first. // Check to see if we should lower the memset to stores first.
// For cases within the target-specified limits, this is the best choice. // For cases within the target-specified limits, this is the best choice.
@ -6495,9 +6500,9 @@ SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst,
if (ConstantSize->isNullValue()) if (ConstantSize->isNullValue())
return Chain; return Chain;
SDValue Result = SDValue Result = getMemsetStores(*this, dl, Chain, Dst, Src,
getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(), ConstantSize->getZExtValue(),
Align, isVol, DstPtrInfo); Align(Alignment), isVol, DstPtrInfo);
if (Result.getNode()) if (Result.getNode())
return Result; return Result;
@ -6507,7 +6512,7 @@ SDValue SelectionDAG::getMemset(SDValue Chain, const SDLoc &dl, SDValue Dst,
// code. If the target chooses to do this, this is the next best. // code. If the target chooses to do this, this is the next best.
if (TSI) { if (TSI) {
SDValue Result = TSI->EmitTargetCodeForMemset( SDValue Result = TSI->EmitTargetCodeForMemset(
*this, dl, Chain, Dst, Src, Size, Align, isVol, DstPtrInfo); *this, dl, Chain, Dst, Src, Size, Alignment, isVol, DstPtrInfo);
if (Result.getNode()) if (Result.getNode())
return Result; return Result;
} }

View File

@ -566,8 +566,8 @@ int ARMTTIImpl::getMemcpyCost(const Instruction *I) {
return LibCallCost; return LibCallCost;
const unsigned Size = C->getValue().getZExtValue(); const unsigned Size = C->getValue().getZExtValue();
const unsigned DstAlign = MI->getDestAlignment(); const Align DstAlign = *MI->getDestAlign();
const unsigned SrcAlign = MI->getSourceAlignment(); const Align SrcAlign = *MI->getSourceAlign();
const Function *F = I->getParent()->getParent(); const Function *F = I->getParent()->getParent();
const unsigned Limit = TLI->getMaxStoresPerMemmove(F->hasMinSize()); const unsigned Limit = TLI->getMaxStoresPerMemmove(F->hasMinSize());
std::vector<EVT> MemOps; std::vector<EVT> MemOps;