AMDGPU: Consistently check for immediates in SIInstrInfo::FoldImmediate

Summary:
The PeepholeOptimizer pass calls this function solely based on checking
DefMI->isMoveImmediate(), which only checks the MoveImm bit of the
instruction description. So it's up to FoldImmediate itself to properly
check that DefMI *actually* moves from an immediate.

I don't have a separate test case for this, but the next patch introduces
a test case which happens to crash without this change.

This error is caught by the assertion in MachineOperand::getImm().

Change-Id: I88e7cdbcf54d75e1a296822e6fe5f9a5f095bbf8

Reviewers: arsenm, rampitec

Subscribers: kzhuravl, wdng, yaxunl, dstuttard, tpr, t-tye, llvm-commits

Differential Revision: https://reviews.llvm.org/D40342

llvm-svn: 319155
This commit is contained in:
Nicolai Haehnle 2017-11-28 08:41:50 +00:00
parent a46522f369
commit 39980dac0b
1 changed files with 22 additions and 23 deletions

View File

@ -1918,28 +1918,29 @@ bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
if (!MRI->hasOneNonDBGUse(Reg))
return false;
switch (DefMI.getOpcode()) {
default:
return false;
case AMDGPU::S_MOV_B64:
// TODO: We could fold 64-bit immediates, but this get compilicated
// when there are sub-registers.
return false;
case AMDGPU::V_MOV_B32_e32:
case AMDGPU::S_MOV_B32:
break;
}
const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0);
assert(ImmOp);
// FIXME: We could handle FrameIndex values here.
if (!ImmOp->isImm())
return false;
unsigned Opc = UseMI.getOpcode();
if (Opc == AMDGPU::COPY) {
bool isVGPRCopy = RI.isVGPR(*MRI, UseMI.getOperand(0).getReg());
switch (DefMI.getOpcode()) {
default:
return false;
case AMDGPU::S_MOV_B64:
// TODO: We could fold 64-bit immediates, but this get compilicated
// when there are sub-registers.
return false;
case AMDGPU::V_MOV_B32_e32:
case AMDGPU::S_MOV_B32:
break;
}
unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0);
assert(ImmOp);
// FIXME: We could handle FrameIndex values here.
if (!ImmOp->isImm()) {
return false;
}
UseMI.setDesc(get(NewOpc));
UseMI.getOperand(1).ChangeToImmediate(ImmOp->getImm());
UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent());
@ -1953,15 +1954,13 @@ bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
if (hasAnyModifiersSet(UseMI))
return false;
const MachineOperand &ImmOp = DefMI.getOperand(1);
// If this is a free constant, there's no reason to do this.
// TODO: We could fold this here instead of letting SIFoldOperands do it
// later.
MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0);
// Any src operand can be used for the legality check.
if (isInlineConstant(UseMI, *Src0, ImmOp))
if (isInlineConstant(UseMI, *Src0, *ImmOp))
return false;
bool IsF32 = Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64;
@ -1979,7 +1978,7 @@ bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
// We need to swap operands 0 and 1 since madmk constant is at operand 1.
const int64_t Imm = DefMI.getOperand(1).getImm();
const int64_t Imm = ImmOp->getImm();
// FIXME: This would be a lot easier if we could return a new instruction
// instead of having to modify in place.
@ -2024,7 +2023,7 @@ bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))
return false;
const int64_t Imm = DefMI.getOperand(1).getImm();
const int64_t Imm = ImmOp->getImm();
// FIXME: This would be a lot easier if we could return a new instruction
// instead of having to modify in place.