[TargetLowering][AMDGPU] Make scalarizeVectorLoad return a pair of SDValues instead of creating a MERGE_VALUES node. NFCI

This allows us to clean up some places that were peeking through
the MERGE_VALUES node after the call. By returning the SDValues
directly, we can clean that up.

Unfortunately, there are several call sites in AMDGPU that wanted
the MERGE_VALUES and now need to create their own.
This commit is contained in:
Craig Topper 2019-12-30 19:07:36 -08:00
parent 831898ff8a
commit 787e078f3e
6 changed files with 29 additions and 28 deletions

View File

@ -4136,8 +4136,9 @@ public:
/// Turn load of vector type into a load of the individual elements. /// Turn load of vector type into a load of the individual elements.
/// \param LD load to expand /// \param LD load to expand
/// \returns MERGE_VALUEs of the scalar loads with their chains. /// \returns BUILD_VECTOR and TokenFactor nodes.
SDValue scalarizeVectorLoad(LoadSDNode *LD, SelectionDAG &DAG) const; std::pair<SDValue, SDValue> scalarizeVectorLoad(LoadSDNode *LD,
SelectionDAG &DAG) const;
// Turn a store of a vector type into stores of the individual elements. // Turn a store of a vector type into stores of the individual elements.
/// \param ST Store with a vector value type /// \param ST Store with a vector value type

View File

@ -752,15 +752,7 @@ SDValue VectorLegalizer::ExpandLoad(SDValue Op) {
NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains); NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
Value = DAG.getBuildVector(Op.getNode()->getValueType(0), dl, Vals); Value = DAG.getBuildVector(Op.getNode()->getValueType(0), dl, Vals);
} else { } else {
SDValue Scalarized = TLI.scalarizeVectorLoad(LD, DAG); std::tie(Value, NewChain) = TLI.scalarizeVectorLoad(LD, DAG);
// Skip past MERGE_VALUE node if known.
if (Scalarized->getOpcode() == ISD::MERGE_VALUES) {
NewChain = Scalarized.getOperand(1);
Value = Scalarized.getOperand(0);
} else {
NewChain = Scalarized.getValue(1);
Value = Scalarized.getValue(0);
}
} }
AddLegalizedOperand(Op.getValue(0), Value); AddLegalizedOperand(Op.getValue(0), Value);

View File

@ -6497,8 +6497,9 @@ bool TargetLowering::expandABS(SDNode *N, SDValue &Result,
return true; return true;
} }
SDValue TargetLowering::scalarizeVectorLoad(LoadSDNode *LD, std::pair<SDValue, SDValue>
SelectionDAG &DAG) const { TargetLowering::scalarizeVectorLoad(LoadSDNode *LD,
SelectionDAG &DAG) const {
SDLoc SL(LD); SDLoc SL(LD);
SDValue Chain = LD->getChain(); SDValue Chain = LD->getChain();
SDValue BasePTR = LD->getBasePtr(); SDValue BasePTR = LD->getBasePtr();
@ -6532,7 +6533,7 @@ SDValue TargetLowering::scalarizeVectorLoad(LoadSDNode *LD,
SDValue NewChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoadChains); SDValue NewChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoadChains);
SDValue Value = DAG.getBuildVector(LD->getValueType(0), SL, Vals); SDValue Value = DAG.getBuildVector(LD->getValueType(0), SL, Vals);
return DAG.getMergeValues({Value, NewChain}, SL); return std::make_pair(Value, NewChain);
} }
SDValue TargetLowering::scalarizeVectorStore(StoreSDNode *ST, SDValue TargetLowering::scalarizeVectorStore(StoreSDNode *ST,
@ -6626,10 +6627,7 @@ TargetLowering::expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const {
if (!isOperationLegalOrCustom(ISD::LOAD, intVT) && if (!isOperationLegalOrCustom(ISD::LOAD, intVT) &&
LoadedVT.isVector()) { LoadedVT.isVector()) {
// Scalarize the load and let the individual components be handled. // Scalarize the load and let the individual components be handled.
SDValue Scalarized = scalarizeVectorLoad(LD, DAG); return scalarizeVectorLoad(LD, DAG);
if (Scalarized->getOpcode() == ISD::MERGE_VALUES)
return std::make_pair(Scalarized.getOperand(0), Scalarized.getOperand(1));
return std::make_pair(Scalarized.getValue(0), Scalarized.getValue(1));
} }
// Expand to a (misaligned) integer load of the same size, // Expand to a (misaligned) integer load of the same size,

View File

@ -1396,16 +1396,19 @@ SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op,
SelectionDAG &DAG) const { SelectionDAG &DAG) const {
LoadSDNode *Load = cast<LoadSDNode>(Op); LoadSDNode *Load = cast<LoadSDNode>(Op);
EVT VT = Op.getValueType(); EVT VT = Op.getValueType();
SDLoc SL(Op);
// If this is a 2 element vector, we really want to scalarize and not create // If this is a 2 element vector, we really want to scalarize and not create
// weird 1 element vectors. // weird 1 element vectors.
if (VT.getVectorNumElements() == 2) if (VT.getVectorNumElements() == 2) {
return scalarizeVectorLoad(Load, DAG); SDValue Ops[2];
std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(Load, DAG);
return DAG.getMergeValues(Ops, SL);
}
SDValue BasePtr = Load->getBasePtr(); SDValue BasePtr = Load->getBasePtr();
EVT MemVT = Load->getMemoryVT(); EVT MemVT = Load->getMemoryVT();
SDLoc SL(Op);
const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo(); const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo();
@ -2869,11 +2872,13 @@ SDValue AMDGPUTargetLowering::performLoadCombine(SDNode *N,
// the bytes again are not eliminated in the case of an unaligned copy. // the bytes again are not eliminated in the case of an unaligned copy.
if (!allowsMisalignedMemoryAccesses( if (!allowsMisalignedMemoryAccesses(
VT, AS, Align, LN->getMemOperand()->getFlags(), &IsFast)) { VT, AS, Align, LN->getMemOperand()->getFlags(), &IsFast)) {
if (VT.isVector())
return scalarizeVectorLoad(LN, DAG);
SDValue Ops[2]; SDValue Ops[2];
std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(LN, DAG);
if (VT.isVector())
std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(LN, DAG);
else
std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(LN, DAG);
return DAG.getMergeValues(Ops, SDLoc(N)); return DAG.getMergeValues(Ops, SDLoc(N));
} }

View File

@ -1456,7 +1456,9 @@ SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
if ((LoadNode->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS || if ((LoadNode->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
LoadNode->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) && LoadNode->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) &&
VT.isVector()) { VT.isVector()) {
return scalarizeVectorLoad(LoadNode, DAG); SDValue Ops[2];
std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(LoadNode, DAG);
return DAG.getMergeValues(Ops, DL);
} }
// This is still used for explicit load from addrspace(8) // This is still used for explicit load from addrspace(8)

View File

@ -7489,8 +7489,11 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
// resource descriptor, we can only make private accesses up to a certain // resource descriptor, we can only make private accesses up to a certain
// size. // size.
switch (Subtarget->getMaxPrivateElementSize()) { switch (Subtarget->getMaxPrivateElementSize()) {
case 4: case 4: {
return scalarizeVectorLoad(Load, DAG); SDValue Ops[2];
std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(Load, DAG);
return DAG.getMergeValues(Ops, DL);
}
case 8: case 8:
if (NumElements > 2) if (NumElements > 2)
return SplitVectorLoad(Op, DAG); return SplitVectorLoad(Op, DAG);