forked from OSchip/llvm-project
[TargetLowering] Add allowsMemoryAccess(MachineMemOperand) helper wrapper. NFCI.
As suggested by @arsenm on D63075 - this adds a TargetLowering::allowsMemoryAccess wrapper that takes a Load/Store node's MachineMemOperand to handle the AddressSpace/Alignment arguments and will also implicitly handle the MachineMemOperand::Flags change in D63075. llvm-svn: 363048
This commit is contained in:
parent
be20daa8eb
commit
266f43964e
|
@ -1430,6 +1430,14 @@ public:
|
|||
unsigned AddrSpace = 0, unsigned Alignment = 1,
|
||||
bool *Fast = nullptr) const;
|
||||
|
||||
/// Return true if the target supports a memory access of this type for the
|
||||
/// given MachineMemOperand. If the access is allowed, the optional
|
||||
/// final parameter returns if the access is also fast (as defined by the
|
||||
/// target).
|
||||
bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
|
||||
const MachineMemOperand &MMO,
|
||||
bool *Fast = nullptr) const;
|
||||
|
||||
/// Returns the target specific optimal type for load and store operations as
|
||||
/// a result of memset, memcpy, and memmove lowering.
|
||||
///
|
||||
|
|
|
@ -6439,9 +6439,9 @@ SDValue DAGCombiner::MatchStoreCombine(StoreSDNode *N) {
|
|||
|
||||
// Check that a store of the wide type is both allowed and fast on the target
|
||||
bool Fast = false;
|
||||
bool Allowed = TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(),
|
||||
VT, FirstStore->getAddressSpace(),
|
||||
FirstStore->getAlignment(), &Fast);
|
||||
bool Allowed =
|
||||
TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
|
||||
*FirstStore->getMemOperand(), &Fast);
|
||||
if (!Allowed || !Fast)
|
||||
return SDValue();
|
||||
|
||||
|
@ -6604,8 +6604,7 @@ SDValue DAGCombiner::MatchLoadCombine(SDNode *N) {
|
|||
// Check that a load of the wide type is both allowed and fast on the target
|
||||
bool Fast = false;
|
||||
bool Allowed = TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(),
|
||||
VT, FirstLoad->getAddressSpace(),
|
||||
FirstLoad->getAlignment(), &Fast);
|
||||
VT, *FirstLoad->getMemOperand(), &Fast);
|
||||
if (!Allowed || !Fast)
|
||||
return SDValue();
|
||||
|
||||
|
@ -10828,15 +10827,14 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) {
|
|||
TLI.isOperationLegal(ISD::LOAD, VT)) &&
|
||||
TLI.isLoadBitCastBeneficial(N0.getValueType(), VT)) {
|
||||
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
|
||||
unsigned OrigAlign = LN0->getAlignment();
|
||||
|
||||
bool Fast = false;
|
||||
if (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
|
||||
LN0->getAddressSpace(), OrigAlign, &Fast) &&
|
||||
*LN0->getMemOperand(), &Fast) &&
|
||||
Fast) {
|
||||
SDValue Load =
|
||||
DAG.getLoad(VT, SDLoc(N), LN0->getChain(), LN0->getBasePtr(),
|
||||
LN0->getPointerInfo(), OrigAlign,
|
||||
LN0->getPointerInfo(), LN0->getAlignment(),
|
||||
LN0->getMemOperand()->getFlags(), LN0->getAAInfo());
|
||||
DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1));
|
||||
return Load;
|
||||
|
@ -15439,8 +15437,8 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
|
|||
|
||||
if (TLI.isTypeLegal(StoreTy) &&
|
||||
TLI.canMergeStoresTo(FirstStoreAS, StoreTy, DAG) &&
|
||||
TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS,
|
||||
FirstStoreAlign, &IsFast) &&
|
||||
TLI.allowsMemoryAccess(Context, DL, StoreTy,
|
||||
*FirstInChain->getMemOperand(), &IsFast) &&
|
||||
IsFast) {
|
||||
LastIntegerTrunc = false;
|
||||
LastLegalType = i + 1;
|
||||
|
@ -15451,8 +15449,9 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
|
|||
TLI.getTypeToTransformTo(Context, StoredVal.getValueType());
|
||||
if (TLI.isTruncStoreLegal(LegalizedStoredValTy, StoreTy) &&
|
||||
TLI.canMergeStoresTo(FirstStoreAS, LegalizedStoredValTy, DAG) &&
|
||||
TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS,
|
||||
FirstStoreAlign, &IsFast) &&
|
||||
TLI.allowsMemoryAccess(Context, DL, StoreTy,
|
||||
*FirstInChain->getMemOperand(),
|
||||
&IsFast) &&
|
||||
IsFast) {
|
||||
LastIntegerTrunc = true;
|
||||
LastLegalType = i + 1;
|
||||
|
@ -15470,8 +15469,8 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
|
|||
EVT Ty = EVT::getVectorVT(Context, MemVT.getScalarType(), Elts);
|
||||
if (TLI.isTypeLegal(Ty) && TLI.isTypeLegal(MemVT) &&
|
||||
TLI.canMergeStoresTo(FirstStoreAS, Ty, DAG) &&
|
||||
TLI.allowsMemoryAccess(Context, DL, Ty, FirstStoreAS,
|
||||
FirstStoreAlign, &IsFast) &&
|
||||
TLI.allowsMemoryAccess(
|
||||
Context, DL, Ty, *FirstInChain->getMemOperand(), &IsFast) &&
|
||||
IsFast)
|
||||
LastLegalVectorType = i + 1;
|
||||
}
|
||||
|
@ -15542,8 +15541,8 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
|
|||
|
||||
if (TLI.isTypeLegal(Ty) &&
|
||||
TLI.canMergeStoresTo(FirstStoreAS, Ty, DAG) &&
|
||||
TLI.allowsMemoryAccess(Context, DL, Ty, FirstStoreAS,
|
||||
FirstStoreAlign, &IsFast) &&
|
||||
TLI.allowsMemoryAccess(Context, DL, Ty,
|
||||
*FirstInChain->getMemOperand(), &IsFast) &&
|
||||
IsFast)
|
||||
NumStoresToMerge = i + 1;
|
||||
}
|
||||
|
@ -15634,7 +15633,6 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
|
|||
unsigned FirstStoreAS = FirstInChain->getAddressSpace();
|
||||
unsigned FirstStoreAlign = FirstInChain->getAlignment();
|
||||
LoadSDNode *FirstLoad = cast<LoadSDNode>(LoadNodes[0].MemNode);
|
||||
unsigned FirstLoadAS = FirstLoad->getAddressSpace();
|
||||
unsigned FirstLoadAlign = FirstLoad->getAlignment();
|
||||
|
||||
// Scan the memory operations on the chain and find the first
|
||||
|
@ -15674,11 +15672,11 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
|
|||
bool IsFastSt, IsFastLd;
|
||||
if (TLI.isTypeLegal(StoreTy) &&
|
||||
TLI.canMergeStoresTo(FirstStoreAS, StoreTy, DAG) &&
|
||||
TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS,
|
||||
FirstStoreAlign, &IsFastSt) &&
|
||||
TLI.allowsMemoryAccess(Context, DL, StoreTy,
|
||||
*FirstInChain->getMemOperand(), &IsFastSt) &&
|
||||
IsFastSt &&
|
||||
TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstLoadAS,
|
||||
FirstLoadAlign, &IsFastLd) &&
|
||||
TLI.allowsMemoryAccess(Context, DL, StoreTy,
|
||||
*FirstLoad->getMemOperand(), &IsFastLd) &&
|
||||
IsFastLd) {
|
||||
LastLegalVectorType = i + 1;
|
||||
}
|
||||
|
@ -15688,11 +15686,11 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
|
|||
StoreTy = EVT::getIntegerVT(Context, SizeInBits);
|
||||
if (TLI.isTypeLegal(StoreTy) &&
|
||||
TLI.canMergeStoresTo(FirstStoreAS, StoreTy, DAG) &&
|
||||
TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS,
|
||||
FirstStoreAlign, &IsFastSt) &&
|
||||
TLI.allowsMemoryAccess(Context, DL, StoreTy,
|
||||
*FirstInChain->getMemOperand(), &IsFastSt) &&
|
||||
IsFastSt &&
|
||||
TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstLoadAS,
|
||||
FirstLoadAlign, &IsFastLd) &&
|
||||
TLI.allowsMemoryAccess(Context, DL, StoreTy,
|
||||
*FirstLoad->getMemOperand(), &IsFastLd) &&
|
||||
IsFastLd) {
|
||||
LastLegalIntegerType = i + 1;
|
||||
DoIntegerTruncate = false;
|
||||
|
@ -15707,11 +15705,12 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
|
|||
TLI.isLoadExtLegal(ISD::SEXTLOAD, LegalizedStoredValTy,
|
||||
StoreTy) &&
|
||||
TLI.isLoadExtLegal(ISD::EXTLOAD, LegalizedStoredValTy, StoreTy) &&
|
||||
TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS,
|
||||
FirstStoreAlign, &IsFastSt) &&
|
||||
TLI.allowsMemoryAccess(Context, DL, StoreTy,
|
||||
*FirstInChain->getMemOperand(),
|
||||
&IsFastSt) &&
|
||||
IsFastSt &&
|
||||
TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstLoadAS,
|
||||
FirstLoadAlign, &IsFastLd) &&
|
||||
TLI.allowsMemoryAccess(Context, DL, StoreTy,
|
||||
*FirstLoad->getMemOperand(), &IsFastLd) &&
|
||||
IsFastLd) {
|
||||
LastLegalIntegerType = i + 1;
|
||||
DoIntegerTruncate = true;
|
||||
|
@ -15962,13 +15961,12 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
|
|||
if (((!LegalOperations && !ST->isVolatile()) ||
|
||||
TLI.isOperationLegal(ISD::STORE, SVT)) &&
|
||||
TLI.isStoreBitCastBeneficial(Value.getValueType(), SVT)) {
|
||||
unsigned OrigAlign = ST->getAlignment();
|
||||
bool Fast = false;
|
||||
if (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), SVT,
|
||||
ST->getAddressSpace(), OrigAlign, &Fast) &&
|
||||
*ST->getMemOperand(), &Fast) &&
|
||||
Fast) {
|
||||
return DAG.getStore(Chain, SDLoc(N), Value.getOperand(0), Ptr,
|
||||
ST->getPointerInfo(), OrigAlign,
|
||||
ST->getPointerInfo(), ST->getAlignment(),
|
||||
ST->getMemOperand()->getFlags(), ST->getAAInfo());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -492,10 +492,9 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
|
|||
// If this is an unaligned store and the target doesn't support it,
|
||||
// expand it.
|
||||
EVT MemVT = ST->getMemoryVT();
|
||||
unsigned AS = ST->getAddressSpace();
|
||||
unsigned Align = ST->getAlignment();
|
||||
const DataLayout &DL = DAG.getDataLayout();
|
||||
if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT, AS, Align)) {
|
||||
if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT,
|
||||
*ST->getMemOperand())) {
|
||||
LLVM_DEBUG(dbgs() << "Expanding unsupported unaligned store\n");
|
||||
SDValue Result = TLI.expandUnalignedStore(ST, DAG);
|
||||
ReplaceNode(SDValue(ST, 0), Result);
|
||||
|
@ -607,11 +606,10 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
|
|||
default: llvm_unreachable("This action is not supported yet!");
|
||||
case TargetLowering::Legal: {
|
||||
EVT MemVT = ST->getMemoryVT();
|
||||
unsigned AS = ST->getAddressSpace();
|
||||
unsigned Align = ST->getAlignment();
|
||||
// If this is an unaligned store and the target doesn't support it,
|
||||
// expand it.
|
||||
if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT, AS, Align)) {
|
||||
if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT,
|
||||
*ST->getMemOperand())) {
|
||||
SDValue Result = TLI.expandUnalignedStore(ST, DAG);
|
||||
ReplaceNode(SDValue(ST, 0), Result);
|
||||
}
|
||||
|
@ -668,13 +666,12 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
|
|||
default: llvm_unreachable("This action is not supported yet!");
|
||||
case TargetLowering::Legal: {
|
||||
EVT MemVT = LD->getMemoryVT();
|
||||
unsigned AS = LD->getAddressSpace();
|
||||
unsigned Align = LD->getAlignment();
|
||||
const DataLayout &DL = DAG.getDataLayout();
|
||||
// If this is an unaligned load and the target doesn't support it,
|
||||
// expand it.
|
||||
if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT, AS, Align)) {
|
||||
std::tie(RVal, RChain) = TLI.expandUnalignedLoad(LD, DAG);
|
||||
if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT,
|
||||
*LD->getMemOperand())) {
|
||||
std::tie(RVal, RChain) = TLI.expandUnalignedLoad(LD, DAG);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -860,10 +857,9 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
|
|||
// If this is an unaligned load and the target doesn't support it,
|
||||
// expand it.
|
||||
EVT MemVT = LD->getMemoryVT();
|
||||
unsigned AS = LD->getAddressSpace();
|
||||
unsigned Align = LD->getAlignment();
|
||||
const DataLayout &DL = DAG.getDataLayout();
|
||||
if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT, AS, Align)) {
|
||||
if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT,
|
||||
*LD->getMemOperand())) {
|
||||
std::tie(Value, Chain) = TLI.expandUnalignedLoad(LD, DAG);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1482,6 +1482,14 @@ bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
|
|||
return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Fast);
|
||||
}
|
||||
|
||||
bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
|
||||
const DataLayout &DL, EVT VT,
|
||||
const MachineMemOperand &MMO,
|
||||
bool *Fast) const {
|
||||
return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(),
|
||||
MMO.getAlignment(), Fast);
|
||||
}
|
||||
|
||||
BranchProbability TargetLoweringBase::getPredictableBranchThreshold() const {
|
||||
return BranchProbability(MinPercentageForPredictableBranch, 100);
|
||||
}
|
||||
|
|
|
@ -6769,14 +6769,15 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
|
|||
assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
|
||||
"Custom lowering for non-i32 vectors hasn't been implemented.");
|
||||
|
||||
unsigned Alignment = Load->getAlignment();
|
||||
unsigned AS = Load->getAddressSpace();
|
||||
if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
|
||||
AS, Alignment)) {
|
||||
*Load->getMemOperand())) {
|
||||
SDValue Ops[2];
|
||||
std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
|
||||
return DAG.getMergeValues(Ops, DL);
|
||||
}
|
||||
|
||||
unsigned Alignment = Load->getAlignment();
|
||||
unsigned AS = Load->getAddressSpace();
|
||||
if (Subtarget->hasLDSMisalignedBug() &&
|
||||
AS == AMDGPUAS::FLAT_ADDRESS &&
|
||||
Alignment < MemVT.getStoreSize() && MemVT.getSizeInBits() > 32) {
|
||||
|
@ -7237,12 +7238,12 @@ SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
|
|||
assert(VT.isVector() &&
|
||||
Store->getValue().getValueType().getScalarType() == MVT::i32);
|
||||
|
||||
unsigned AS = Store->getAddressSpace();
|
||||
if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
|
||||
AS, Store->getAlignment())) {
|
||||
*Store->getMemOperand())) {
|
||||
return expandUnalignedStore(Store, DAG);
|
||||
}
|
||||
|
||||
unsigned AS = Store->getAddressSpace();
|
||||
if (Subtarget->hasLDSMisalignedBug() &&
|
||||
AS == AMDGPUAS::FLAT_ADDRESS &&
|
||||
Store->getAlignment() < VT.getStoreSize() && VT.getSizeInBits() > 32) {
|
||||
|
|
|
@ -2620,7 +2620,6 @@ HexagonTargetLowering::LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG)
|
|||
const SDLoc &dl(Op);
|
||||
const DataLayout &DL = DAG.getDataLayout();
|
||||
LLVMContext &Ctx = *DAG.getContext();
|
||||
unsigned AS = LN->getAddressSpace();
|
||||
|
||||
// If the load aligning is disabled or the load can be broken up into two
|
||||
// smaller legal loads, do the default (target-independent) expansion.
|
||||
|
@ -2630,15 +2629,15 @@ HexagonTargetLowering::LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG)
|
|||
DoDefault = true;
|
||||
|
||||
if (!AlignLoads) {
|
||||
if (allowsMemoryAccess(Ctx, DL, LN->getMemoryVT(), AS, HaveAlign))
|
||||
if (allowsMemoryAccess(Ctx, DL, LN->getMemoryVT(), *LN->getMemOperand()))
|
||||
return Op;
|
||||
DoDefault = true;
|
||||
}
|
||||
if (!DoDefault && 2*HaveAlign == NeedAlign) {
|
||||
if (!DoDefault && (2 * HaveAlign) == NeedAlign) {
|
||||
// The PartTy is the equivalent of "getLoadableTypeOfSize(HaveAlign)".
|
||||
MVT PartTy = HaveAlign <= 8 ? MVT::getIntegerVT(8*HaveAlign)
|
||||
MVT PartTy = HaveAlign <= 8 ? MVT::getIntegerVT(8 * HaveAlign)
|
||||
: MVT::getVectorVT(MVT::i8, HaveAlign);
|
||||
DoDefault = allowsMemoryAccess(Ctx, DL, PartTy, AS, HaveAlign);
|
||||
DoDefault = allowsMemoryAccess(Ctx, DL, PartTy, *LN->getMemOperand());
|
||||
}
|
||||
if (DoDefault) {
|
||||
std::pair<SDValue, SDValue> P = expandUnalignedLoad(LN, DAG);
|
||||
|
|
|
@ -2231,7 +2231,7 @@ SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
|
|||
LoadSDNode *Load = cast<LoadSDNode>(Op);
|
||||
EVT MemVT = Load->getMemoryVT();
|
||||
if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
|
||||
Load->getAddressSpace(), Load->getAlignment())) {
|
||||
*Load->getMemOperand())) {
|
||||
SDValue Ops[2];
|
||||
std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
|
||||
return DAG.getMergeValues(Ops, SDLoc(Op));
|
||||
|
@ -2274,7 +2274,7 @@ SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
|
|||
// stores and have to handle it here.
|
||||
if (VT == MVT::v2f16 &&
|
||||
!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
|
||||
Store->getAddressSpace(), Store->getAlignment()))
|
||||
*Store->getMemOperand()))
|
||||
return expandUnalignedStore(Store, DAG);
|
||||
|
||||
if (VT.isVector())
|
||||
|
|
|
@ -38976,13 +38976,12 @@ static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
|
|||
// pre-AVX2 targets as 32-byte loads will lower to regular temporal loads.
|
||||
ISD::LoadExtType Ext = Ld->getExtensionType();
|
||||
bool Fast;
|
||||
unsigned AddressSpace = Ld->getAddressSpace();
|
||||
unsigned Alignment = Ld->getAlignment();
|
||||
if (RegVT.is256BitVector() && !DCI.isBeforeLegalizeOps() &&
|
||||
Ext == ISD::NON_EXTLOAD &&
|
||||
((Ld->isNonTemporal() && !Subtarget.hasInt256() && Alignment >= 16) ||
|
||||
(TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), RegVT,
|
||||
AddressSpace, Alignment, &Fast) && !Fast))) {
|
||||
*Ld->getMemOperand(), &Fast) && !Fast))) {
|
||||
unsigned NumElems = RegVT.getVectorNumElements();
|
||||
if (NumElems < 2)
|
||||
return SDValue();
|
||||
|
@ -39492,11 +39491,9 @@ static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
|
|||
// If we are saving a concatenation of two XMM registers and 32-byte stores
|
||||
// are slow, such as on Sandy Bridge, perform two 16-byte stores.
|
||||
bool Fast;
|
||||
unsigned AddressSpace = St->getAddressSpace();
|
||||
unsigned Alignment = St->getAlignment();
|
||||
if (VT.is256BitVector() && StVT == VT &&
|
||||
TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
|
||||
AddressSpace, Alignment, &Fast) &&
|
||||
*St->getMemOperand(), &Fast) &&
|
||||
!Fast) {
|
||||
unsigned NumElems = VT.getVectorNumElements();
|
||||
if (NumElems < 2)
|
||||
|
@ -42990,11 +42987,9 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
|
|||
// If needed, look through bitcasts to get to the load.
|
||||
if (auto *FirstLd = dyn_cast<LoadSDNode>(peekThroughBitcasts(Op0))) {
|
||||
bool Fast;
|
||||
unsigned Alignment = FirstLd->getAlignment();
|
||||
unsigned AS = FirstLd->getAddressSpace();
|
||||
const X86TargetLowering *TLI = Subtarget.getTargetLowering();
|
||||
if (TLI->allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT, AS,
|
||||
Alignment, &Fast) &&
|
||||
if (TLI->allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
|
||||
*FirstLd->getMemOperand(), &Fast) &&
|
||||
Fast) {
|
||||
if (SDValue Ld =
|
||||
EltsFromConsecutiveLoads(VT, Ops, DL, DAG, Subtarget, false))
|
||||
|
|
Loading…
Reference in New Issue