forked from OSchip/llvm-project
[TargetLowering][AMDGPU] Make scalarizeVectorLoad return a pair of SDValues instead of creating a MERGE_VALUES node. NFCI
This allows us to clean up some places that were peeking through the MERGE_VALUES node after the call. By returning the SDValues directly, we can clean that up. Unfortunately, there are several call sites in AMDGPU that wanted the MERGE_VALUES and now need to create their own.
This commit is contained in:
parent
831898ff8a
commit
787e078f3e
|
@ -4136,8 +4136,9 @@ public:
|
|||
|
||||
/// Turn load of vector type into a load of the individual elements.
|
||||
/// \param LD load to expand
|
||||
/// \returns MERGE_VALUEs of the scalar loads with their chains.
|
||||
SDValue scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const;
|
||||
/// \returns BUILD_VECTOR and TokenFactor nodes.
|
||||
std::pair<SDValue, SDValue> scalarizeVectorLoad(LoadSDNode *LD,
|
||||
SelectionDAG &DAG) const;
|
||||
|
||||
// Turn a store of a vector type into stores of the individual elements.
|
||||
/// \param ST Store with a vector value type
|
||||
|
|
|
@ -752,15 +752,7 @@ SDValue VectorLegalizer::ExpandLoad(SDValue Op) {
|
|||
NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
|
||||
Value = DAG.getBuildVector(Op.getNode()->getValueType(0), dl, Vals);
|
||||
} else {
|
||||
SDValue Scalarized = TLI.scalarizeVectorLoad(LD, DAG);
|
||||
// Skip past MERGE_VALUE node if known.
|
||||
if (Scalarized->getOpcode() == ISD::MERGE_VALUES) {
|
||||
NewChain = Scalarized.getOperand(1);
|
||||
Value = Scalarized.getOperand(0);
|
||||
} else {
|
||||
NewChain = Scalarized.getValue(1);
|
||||
Value = Scalarized.getValue(0);
|
||||
}
|
||||
std::tie(Value, NewChain) = TLI.scalarizeVectorLoad(LD, DAG);
|
||||
}
|
||||
|
||||
AddLegalizedOperand(Op.getValue(0), Value);
|
||||
|
|
|
@ -6497,8 +6497,9 @@ bool TargetLowering::expandABS(SDNode *N, SDValue &Result,
|
|||
return true;
|
||||
}
|
||||
|
||||
SDValue TargetLowering::scalarizeVectorLoad(LoadSDNode *LD,
|
||||
SelectionDAG &DAG) const {
|
||||
std::pair<SDValue, SDValue>
|
||||
TargetLowering::scalarizeVectorLoad(LoadSDNode *LD,
|
||||
SelectionDAG &DAG) const {
|
||||
SDLoc SL(LD);
|
||||
SDValue Chain = LD->getChain();
|
||||
SDValue BasePTR = LD->getBasePtr();
|
||||
|
@ -6532,7 +6533,7 @@ SDValue TargetLowering::scalarizeVectorLoad(LoadSDNode *LD,
|
|||
SDValue NewChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoadChains);
|
||||
SDValue Value = DAG.getBuildVector(LD->getValueType(0), SL, Vals);
|
||||
|
||||
return DAG.getMergeValues({Value, NewChain}, SL);
|
||||
return std::make_pair(Value, NewChain);
|
||||
}
|
||||
|
||||
SDValue TargetLowering::scalarizeVectorStore(StoreSDNode *ST,
|
||||
|
@ -6626,10 +6627,7 @@ TargetLowering::expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const {
|
|||
if (!isOperationLegalOrCustom(ISD::LOAD, intVT) &&
|
||||
LoadedVT.isVector()) {
|
||||
// Scalarize the load and let the individual components be handled.
|
||||
SDValue Scalarized = scalarizeVectorLoad(LD, DAG);
|
||||
if (Scalarized->getOpcode() == ISD::MERGE_VALUES)
|
||||
return std::make_pair(Scalarized.getOperand(0), Scalarized.getOperand(1));
|
||||
return std::make_pair(Scalarized.getValue(0), Scalarized.getValue(1));
|
||||
return scalarizeVectorLoad(LD, DAG);
|
||||
}
|
||||
|
||||
// Expand to a (misaligned) integer load of the same size,
|
||||
|
|
|
@ -1396,16 +1396,19 @@ SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op,
|
|||
SelectionDAG &DAG) const {
|
||||
LoadSDNode *Load = cast<LoadSDNode>(Op);
|
||||
EVT VT = Op.getValueType();
|
||||
SDLoc SL(Op);
|
||||
|
||||
|
||||
// If this is a 2 element vector, we really want to scalarize and not create
|
||||
// weird 1 element vectors.
|
||||
if (VT.getVectorNumElements() == 2)
|
||||
return scalarizeVectorLoad(Load, DAG);
|
||||
if (VT.getVectorNumElements() == 2) {
|
||||
SDValue Ops[2];
|
||||
std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(Load, DAG);
|
||||
return DAG.getMergeValues(Ops, SL);
|
||||
}
|
||||
|
||||
SDValue BasePtr = Load->getBasePtr();
|
||||
EVT MemVT = Load->getMemoryVT();
|
||||
SDLoc SL(Op);
|
||||
|
||||
const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
|
||||
|
||||
|
@ -2869,11 +2872,13 @@ SDValue AMDGPUTargetLowering::performLoadCombine(SDNode *N,
|
|||
// the bytes again are not eliminated in the case of an unaligned copy.
|
||||
if (!allowsMisalignedMemoryAccesses(
|
||||
VT, AS, Align, LN->getMemOperand()->getFlags(), &IsFast)) {
|
||||
if (VT.isVector())
|
||||
return scalarizeVectorLoad(LN, DAG);
|
||||
|
||||
SDValue Ops[2];
|
||||
std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(LN, DAG);
|
||||
|
||||
if (VT.isVector())
|
||||
std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(LN, DAG);
|
||||
else
|
||||
std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(LN, DAG);
|
||||
|
||||
return DAG.getMergeValues(Ops, SDLoc(N));
|
||||
}
|
||||
|
||||
|
|
|
@ -1456,7 +1456,9 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
|
|||
if ((LoadNode->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
|
||||
LoadNode->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) &&
|
||||
VT.isVector()) {
|
||||
return scalarizeVectorLoad(LoadNode, DAG);
|
||||
SDValue Ops[2];
|
||||
std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(LoadNode, DAG);
|
||||
return DAG.getMergeValues(Ops, DL);
|
||||
}
|
||||
|
||||
// This is still used for explicit load from addrspace(8)
|
||||
|
|
|
@ -7489,8 +7489,11 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
|
|||
// resource descriptor, we can only make private accesses up to a certain
|
||||
// size.
|
||||
switch (Subtarget->getMaxPrivateElementSize()) {
|
||||
case 4:
|
||||
return scalarizeVectorLoad(Load, DAG);
|
||||
case 4: {
|
||||
SDValue Ops[2];
|
||||
std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(Load, DAG);
|
||||
return DAG.getMergeValues(Ops, DL);
|
||||
}
|
||||
case 8:
|
||||
if (NumElements > 2)
|
||||
return SplitVectorLoad(Op, DAG);
|
||||
|
|
Loading…
Reference in New Issue