AMDGPU: Avoid explicitly listing all the memory nodes

This commit is contained in:
Matt Arsenault 2020-08-06 19:59:25 -04:00
parent 648228bcc3
commit 3c0597a9e4
1 changed files with 16 additions and 31 deletions

View File

@ -8564,6 +8564,8 @@ SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
SDValue SITargetLowering::performMemSDNodeCombine(MemSDNode *N,
DAGCombinerInfo &DCI) const {
// FIXME: getBasePtr does not work correctly for intrinsic nodes and will find
// the intrinsic ID, not the pointer.
SDValue Ptr = N->getBasePtr();
SelectionDAG &DAG = DCI.DAG;
SDLoc SL(N);
@ -10477,8 +10479,6 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
if (getTargetMachine().getOptLevel() == CodeGenOpt::None)
return SDValue();
switch (N->getOpcode()) {
default:
return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
case ISD::ADD:
return performAddCombine(N, DCI);
case ISD::SUB:
@ -10505,35 +10505,6 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
return performMinMaxCombine(N, DCI);
case ISD::FMA:
return performFMACombine(N, DCI);
case ISD::LOAD: {
if (SDValue Widended = widenLoad(cast<LoadSDNode>(N), DCI))
return Widended;
LLVM_FALLTHROUGH;
}
case ISD::STORE:
case ISD::ATOMIC_LOAD:
case ISD::ATOMIC_STORE:
case ISD::ATOMIC_CMP_SWAP:
case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
case ISD::ATOMIC_SWAP:
case ISD::ATOMIC_LOAD_ADD:
case ISD::ATOMIC_LOAD_SUB:
case ISD::ATOMIC_LOAD_AND:
case ISD::ATOMIC_LOAD_OR:
case ISD::ATOMIC_LOAD_XOR:
case ISD::ATOMIC_LOAD_NAND:
case ISD::ATOMIC_LOAD_MIN:
case ISD::ATOMIC_LOAD_MAX:
case ISD::ATOMIC_LOAD_UMIN:
case ISD::ATOMIC_LOAD_UMAX:
case ISD::ATOMIC_LOAD_FADD:
case AMDGPUISD::ATOMIC_INC:
case AMDGPUISD::ATOMIC_DEC:
case AMDGPUISD::ATOMIC_LOAD_FMIN:
case AMDGPUISD::ATOMIC_LOAD_FMAX: // TODO: Target mem intrinsics.
if (DCI.isBeforeLegalize())
break;
return performMemSDNodeCombine(cast<MemSDNode>(N), DCI);
case ISD::AND:
return performAndCombine(N, DCI);
case ISD::OR:
@ -10598,7 +10569,21 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
return performExtractVectorEltCombine(N, DCI);
case ISD::INSERT_VECTOR_ELT:
return performInsertVectorEltCombine(N, DCI);
case ISD::LOAD: {
if (SDValue Widended = widenLoad(cast<LoadSDNode>(N), DCI))
return Widended;
LLVM_FALLTHROUGH;
}
default: {
if (!DCI.isBeforeLegalize()) {
if (MemSDNode *MemNode = dyn_cast<MemSDNode>(N))
return performMemSDNodeCombine(MemNode, DCI);
}
break;
}
}
return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
}