SDAG: Implement Select instead of SelectImpl in AMDGPUDAGToDAGISel

- Where we were returning a node before, call ReplaceNode instead.
- Where we would return null to fall back to another selector, rename
  the method to try* and return a bool for success.
- Where we were calling SelectNodeTo, just return afterwards.

Part of llvm.org/pr26808.

llvm-svn: 269349
This commit is contained in:
Justin Bogner 2016-05-12 21:03:32 +00:00
parent 3ac4d831ee
commit 95927c0fd0
1 changed files with 67 additions and 49 deletions

View File

@ -61,7 +61,7 @@ public:
AMDGPUDAGToDAGISel(TargetMachine &TM);
virtual ~AMDGPUDAGToDAGISel();
bool runOnMachineFunction(MachineFunction &MF) override;
SDNode *SelectImpl(SDNode *N) override;
void Select(SDNode *N) override;
const char *getPassName() const override;
void PreprocessISelDAG() override;
void PostprocessISelDAG() override;
@ -160,14 +160,14 @@ private:
SDValue &Clamp,
SDValue &Omod) const;
SDNode *SelectADD_SUB_I64(SDNode *N);
SDNode *SelectDIV_SCALE(SDNode *N);
void SelectADD_SUB_I64(SDNode *N);
void SelectDIV_SCALE(SDNode *N);
SDNode *getS_BFE(unsigned Opcode, SDLoc DL, SDValue Val,
uint32_t Offset, uint32_t Width);
SDNode *SelectS_BFEFromShifts(SDNode *N);
SDNode *SelectS_BFE(SDNode *N);
SDNode *SelectBRCOND(SDNode *N);
void SelectS_BFEFromShifts(SDNode *N);
void SelectS_BFE(SDNode *N);
void SelectBRCOND(SDNode *N);
// Include the pieces autogenerated from the target description.
#include "AMDGPUGenDAGISel.inc"
@ -329,11 +329,11 @@ static unsigned selectSGPRVectorRegClassID(unsigned NumVectorElts) {
llvm_unreachable("invalid vector size");
}
SDNode *AMDGPUDAGToDAGISel::SelectImpl(SDNode *N) {
void AMDGPUDAGToDAGISel::Select(SDNode *N) {
unsigned int Opc = N->getOpcode();
if (N->isMachineOpcode()) {
N->setNodeId(-1);
return nullptr; // Already selected.
return; // Already selected.
}
if (isa<AtomicSDNode>(N) ||
@ -351,7 +351,8 @@ SDNode *AMDGPUDAGToDAGISel::SelectImpl(SDNode *N) {
Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
break;
return SelectADD_SUB_I64(N);
SelectADD_SUB_I64(N);
return;
}
case ISD::SCALAR_TO_VECTOR:
case AMDGPUISD::BUILD_VERTICAL_VECTOR:
@ -385,8 +386,9 @@ SDNode *AMDGPUDAGToDAGISel::SelectImpl(SDNode *N) {
SDValue RegClass = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32);
if (NumVectorElts == 1) {
return CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT,
N->getOperand(0), RegClass);
CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT, N->getOperand(0),
RegClass);
return;
}
assert(NumVectorElts <= 16 && "Vectors with more than 16 elements not "
@ -426,8 +428,8 @@ SDNode *AMDGPUDAGToDAGISel::SelectImpl(SDNode *N) {
if (!IsRegSeq)
break;
return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(),
RegSeqArgs);
CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(), RegSeqArgs);
return;
}
case ISD::BUILD_PAIR: {
SDValue RC, SubReg0, SubReg1;
@ -448,8 +450,9 @@ SDNode *AMDGPUDAGToDAGISel::SelectImpl(SDNode *N) {
}
const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
N->getOperand(1), SubReg1 };
return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE,
DL, N->getValueType(0), Ops);
ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL,
N->getValueType(0), Ops));
return;
}
case ISD::Constant:
@ -478,8 +481,9 @@ SDNode *AMDGPUDAGToDAGISel::SelectImpl(SDNode *N) {
SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32)
};
return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL,
N->getValueType(0), Ops);
ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL,
N->getValueType(0), Ops));
return;
}
case ISD::LOAD:
case ISD::STORE: {
@ -513,11 +517,13 @@ SDNode *AMDGPUDAGToDAGISel::SelectImpl(SDNode *N) {
uint32_t OffsetVal = Offset->getZExtValue();
uint32_t WidthVal = Width->getZExtValue();
return getS_BFE(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32, SDLoc(N),
N->getOperand(0), OffsetVal, WidthVal);
ReplaceNode(N, getS_BFE(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32,
SDLoc(N), N->getOperand(0), OffsetVal, WidthVal));
return;
}
case AMDGPUISD::DIV_SCALE: {
return SelectDIV_SCALE(N);
SelectDIV_SCALE(N);
return;
}
case ISD::CopyToReg: {
const SITargetLowering& Lowering =
@ -533,12 +539,14 @@ SDNode *AMDGPUDAGToDAGISel::SelectImpl(SDNode *N) {
Subtarget->getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS)
break;
return SelectS_BFE(N);
SelectS_BFE(N);
return;
case ISD::BRCOND:
return SelectBRCOND(N);
SelectBRCOND(N);
return;
}
return SelectCode(N);
SelectCode(N);
}
bool AMDGPUDAGToDAGISel::checkType(const Value *Ptr, unsigned AS) {
@ -741,7 +749,7 @@ bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
return true;
}
SDNode *AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
void AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
SDLoc DL(N);
SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1);
@ -781,12 +789,12 @@ SDNode *AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
SDValue(AddHi,0),
Sub1,
};
return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, MVT::i64, Args);
CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, MVT::i64, Args);
}
// We need to handle this here because tablegen doesn't support matching
// instructions with multiple outputs.
SDNode *AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) {
void AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) {
SDLoc SL(N);
EVT VT = N->getValueType(0);
@ -802,7 +810,7 @@ SDNode *AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) {
SelectVOP3Mods0(N->getOperand(0), Ops[1], Ops[0], Ops[6], Ops[7]);
SelectVOP3Mods(N->getOperand(1), Ops[3], Ops[2]);
SelectVOP3Mods(N->getOperand(2), Ops[5], Ops[4]);
return CurDAG->SelectNodeTo(N, Opc, VT, MVT::i1, Ops);
CurDAG->SelectNodeTo(N, Opc, VT, MVT::i1, Ops);
}
bool AMDGPUDAGToDAGISel::isDSOffsetLegal(const SDValue &Base, unsigned Offset,
@ -1340,7 +1348,7 @@ SDNode *AMDGPUDAGToDAGISel::getS_BFE(unsigned Opcode, SDLoc DL, SDValue Val,
return CurDAG->getMachineNode(Opcode, DL, MVT::i32, Val, PackedConst);
}
SDNode *AMDGPUDAGToDAGISel::SelectS_BFEFromShifts(SDNode *N) {
void AMDGPUDAGToDAGISel::SelectS_BFEFromShifts(SDNode *N) {
// "(a << b) srl c)" ---> "BFE_U32 a, (c-b), (32-c)
// "(a << b) sra c)" ---> "BFE_I32 a, (c-b), (32-c)
// Predicate: 0 < b <= c < 32
@ -1357,14 +1365,15 @@ SDNode *AMDGPUDAGToDAGISel::SelectS_BFEFromShifts(SDNode *N) {
bool Signed = N->getOpcode() == ISD::SRA;
unsigned Opcode = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
return getS_BFE(Opcode, SDLoc(N), Shl.getOperand(0),
CVal - BVal, 32 - CVal);
ReplaceNode(N, getS_BFE(Opcode, SDLoc(N), Shl.getOperand(0), CVal - BVal,
32 - CVal));
return;
}
}
return SelectCode(N);
SelectCode(N);
}
SDNode *AMDGPUDAGToDAGISel::SelectS_BFE(SDNode *N) {
void AMDGPUDAGToDAGISel::SelectS_BFE(SDNode *N) {
switch (N->getOpcode()) {
case ISD::AND:
if (N->getOperand(0).getOpcode() == ISD::SRL) {
@ -1381,8 +1390,9 @@ SDNode *AMDGPUDAGToDAGISel::SelectS_BFE(SDNode *N) {
if (isMask_32(MaskVal)) {
uint32_t WidthVal = countPopulation(MaskVal);
return getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N), Srl.getOperand(0),
ShiftVal, WidthVal);
ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N),
Srl.getOperand(0), ShiftVal, WidthVal));
return;
}
}
}
@ -1402,16 +1412,21 @@ SDNode *AMDGPUDAGToDAGISel::SelectS_BFE(SDNode *N) {
if (isMask_32(MaskVal)) {
uint32_t WidthVal = countPopulation(MaskVal);
return getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N), And.getOperand(0),
ShiftVal, WidthVal);
ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N),
And.getOperand(0), ShiftVal, WidthVal));
return;
}
}
} else if (N->getOperand(0).getOpcode() == ISD::SHL)
return SelectS_BFEFromShifts(N);
} else if (N->getOperand(0).getOpcode() == ISD::SHL) {
SelectS_BFEFromShifts(N);
return;
}
break;
case ISD::SRA:
if (N->getOperand(0).getOpcode() == ISD::SHL)
return SelectS_BFEFromShifts(N);
if (N->getOperand(0).getOpcode() == ISD::SHL) {
SelectS_BFEFromShifts(N);
return;
}
break;
case ISD::SIGN_EXTEND_INREG: {
@ -1425,20 +1440,22 @@ SDNode *AMDGPUDAGToDAGISel::SelectS_BFE(SDNode *N) {
break;
unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits();
return getS_BFE(AMDGPU::S_BFE_I32, SDLoc(N), Src.getOperand(0),
Amt->getZExtValue(), Width);
ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_I32, SDLoc(N), Src.getOperand(0),
Amt->getZExtValue(), Width));
return;
}
}
return SelectCode(N);
SelectCode(N);
}
SDNode *AMDGPUDAGToDAGISel::SelectBRCOND(SDNode *N) {
void AMDGPUDAGToDAGISel::SelectBRCOND(SDNode *N) {
SDValue Cond = N->getOperand(1);
if (isCBranchSCC(N)) {
// This brcond will use S_CBRANCH_SCC*, so let tablegen handle it.
return SelectCode(N);
SelectCode(N);
return;
}
// The result of VOPC instructions is or'd against ~EXEC before it is
@ -1457,10 +1474,11 @@ SDNode *AMDGPUDAGToDAGISel::SelectBRCOND(SDNode *N) {
SDValue(MaskedCond, 0),
SDValue()); // Passing SDValue() adds a
// glue output.
return CurDAG->SelectNodeTo(N, AMDGPU::S_CBRANCH_VCCNZ, MVT::Other,
N->getOperand(2), // Basic Block
VCC.getValue(0), // Chain
VCC.getValue(1)); // Glue
CurDAG->SelectNodeTo(N, AMDGPU::S_CBRANCH_VCCNZ, MVT::Other,
N->getOperand(2), // Basic Block
VCC.getValue(0), // Chain
VCC.getValue(1)); // Glue
return;
}
bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src,