R600/SI: Add pattern for bitcasting fp immediates to integers

The backend now assumes that all immediates are integers.  This allows
us to simplify immediate handling code, becasue we no longer need to
handle fp and integer immediates differently.

llvm-svn: 225844
This commit is contained in:
Tom Stellard 2015-01-13 22:59:41 +00:00
parent 703378f156
commit fb77f00be8
8 changed files with 39 additions and 56 deletions

View File

@ -68,18 +68,6 @@ void AMDGPUMCInstLower::lower(const MachineInstr *MI, MCInst &OutMI) const {
switch (MO.getType()) {
default:
llvm_unreachable("unknown operand type");
case MachineOperand::MO_FPImmediate: {
const APFloat &FloatValue = MO.getFPImm()->getValueAPF();
if (&FloatValue.getSemantics() == &APFloat::IEEEsingle)
MCOp = MCOperand::CreateFPImm(FloatValue.convertToFloat());
else if (&FloatValue.getSemantics() == &APFloat::IEEEdouble)
MCOp = MCOperand::CreateFPImm(FloatValue.convertToDouble());
else
llvm_unreachable("Unhandled floating point type");
break;
}
case MachineOperand::MO_Immediate:
MCOp = MCOperand::CreateImm(MO.getImm());
break;

View File

@ -173,7 +173,7 @@ bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
continue;
MachineOperand &OpToFold = MI.getOperand(1);
bool FoldingImm = OpToFold.isImm() || OpToFold.isFPImm();
bool FoldingImm = OpToFold.isImm();
// FIXME: We could also be folding things like FrameIndexes and
// TargetIndexes.
@ -210,12 +210,7 @@ bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
if (FoldingImm) {
const TargetRegisterClass *UseRC = MRI.getRegClass(UseOp.getReg());
if (OpToFold.isFPImm()) {
Imm = OpToFold.getFPImm()->getValueAPF().bitcastToAPInt();
} else {
Imm = APInt(64, OpToFold.getImm());
}
Imm = APInt(64, OpToFold.getImm());
// Split 64-bit constants into 32-bits for folding.
if (UseOp.getSubReg()) {

View File

@ -1077,7 +1077,7 @@ SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const {
const APFloat K1Val(BitsToFloat(0x2f800000));
const SDValue K1 = DAG.getConstantFP(K1Val, MVT::f32);
const SDValue One = DAG.getTargetConstantFP(1.0, MVT::f32);
const SDValue One = DAG.getConstantFP(1.0, MVT::f32);
EVT SetCCVT = getSetCCResultType(*DAG.getContext(), MVT::f32);
@ -1549,7 +1549,7 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
if (LHS.getOpcode() == ISD::FADD) {
SDValue A = LHS.getOperand(0);
if (A == LHS.getOperand(1)) {
const SDValue Two = DAG.getTargetConstantFP(2.0, MVT::f32);
const SDValue Two = DAG.getConstantFP(2.0, MVT::f32);
return DAG.getNode(AMDGPUISD::MAD, DL, VT, Two, A, RHS);
}
}
@ -1558,7 +1558,7 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
if (RHS.getOpcode() == ISD::FADD) {
SDValue A = RHS.getOperand(0);
if (A == RHS.getOperand(1)) {
const SDValue Two = DAG.getTargetConstantFP(2.0, MVT::f32);
const SDValue Two = DAG.getConstantFP(2.0, MVT::f32);
return DAG.getNode(AMDGPUISD::MAD, DL, VT, Two, A, LHS);
}
}
@ -1602,7 +1602,7 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
SDValue A = LHS.getOperand(0);
if (A == LHS.getOperand(1)) {
const SDValue Two = DAG.getTargetConstantFP(2.0, MVT::f32);
const SDValue Two = DAG.getConstantFP(2.0, MVT::f32);
SDValue NegRHS = DAG.getNode(ISD::FNEG, DL, VT, RHS);
return DAG.getNode(AMDGPUISD::MAD, DL, VT, Two, A, NegRHS);
@ -1614,7 +1614,7 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
SDValue A = RHS.getOperand(0);
if (A == RHS.getOperand(1)) {
const SDValue NegTwo = DAG.getTargetConstantFP(-2.0, MVT::f32);
const SDValue NegTwo = DAG.getConstantFP(-2.0, MVT::f32);
return DAG.getNode(AMDGPUISD::MAD, DL, VT, NegTwo, A, LHS);
}
}

View File

@ -736,8 +736,8 @@ MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
}
if (!Src1.isReg()) {
// Allow commuting instructions with Imm or FPImm operands.
if (NewMI || (!Src1.isImm() && !Src1.isFPImm()) ||
// Allow commuting instructions with Imm operands.
if (NewMI || !Src1.isImm() ||
(!isVOP2(MI->getOpcode()) && !isVOP3(MI->getOpcode()))) {
return nullptr;
}
@ -765,8 +765,6 @@ MachineInstr *SIInstrInfo::commuteInstruction(MachineInstr *MI,
unsigned SubReg = Src0.getSubReg();
if (Src1.isImm())
Src0.ChangeToImmediate(Src1.getImm());
else if (Src1.isFPImm())
Src0.ChangeToFPImmediate(Src1.getFPImm());
else
llvm_unreachable("Should only have immediates");
@ -981,16 +979,11 @@ bool SIInstrInfo::isInlineConstant(const MachineOperand &MO) const {
if (MO.isImm())
return isInlineConstant(APInt(32, MO.getImm(), true));
if (MO.isFPImm()) {
APFloat FpImm = MO.getFPImm()->getValueAPF();
return isInlineConstant(FpImm.bitcastToAPInt());
}
return false;
}
bool SIInstrInfo::isLiteralConstant(const MachineOperand &MO) const {
return (MO.isImm() || MO.isFPImm()) && !isInlineConstant(MO);
return MO.isImm() && !isInlineConstant(MO);
}
static bool compareMachineOp(const MachineOperand &Op0,
@ -1003,8 +996,6 @@ static bool compareMachineOp(const MachineOperand &Op0,
return Op0.getReg() == Op1.getReg();
case MachineOperand::MO_Immediate:
return Op0.getImm() == Op1.getImm();
case MachineOperand::MO_FPImmediate:
return Op0.getFPImm() == Op1.getFPImm();
default:
llvm_unreachable("Didn't expect to be comparing these operand types");
}
@ -1014,7 +1005,7 @@ bool SIInstrInfo::isImmOperandLegal(const MachineInstr *MI, unsigned OpNo,
const MachineOperand &MO) const {
const MCOperandInfo &OpInfo = get(MI->getOpcode()).OpInfo[OpNo];
assert(MO.isImm() || MO.isFPImm() || MO.isTargetIndex() || MO.isFI());
assert(MO.isImm() || MO.isTargetIndex() || MO.isFI());
if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE)
return true;
@ -1121,9 +1112,15 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
// Make sure the register classes are correct
for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) {
if (MI->getOperand(i).isFPImm()) {
ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast "
"all fp values to integers.";
return false;
}
switch (Desc.OpInfo[i].OperandType) {
case MCOI::OPERAND_REGISTER: {
if ((MI->getOperand(i).isImm() || MI->getOperand(i).isFPImm()) &&
if (MI->getOperand(i).isImm() &&
!isImmOperandLegal(MI, i, MI->getOperand(i))) {
ErrInfo = "Illegal immediate value for operand.";
return false;
@ -1134,8 +1131,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
// Check if this operand is an immediate.
// FrameIndex operands will be replaced by immediates, so they are
// allowed.
if (!MI->getOperand(i).isImm() && !MI->getOperand(i).isFPImm() &&
!MI->getOperand(i).isFI()) {
if (!MI->getOperand(i).isImm() && !MI->getOperand(i).isFI()) {
ErrInfo = "Expected immediate, but got non-immediate";
return false;
}
@ -1195,7 +1191,7 @@ bool SIInstrInfo::verifyInstruction(const MachineInstr *MI,
// Verify SRC1 for VOP2 and VOPC
if (Src1Idx != -1 && (isVOP2(Opcode) || isVOPC(Opcode))) {
const MachineOperand &Src1 = MI->getOperand(Src1Idx);
if (Src1.isImm() || Src1.isFPImm()) {
if (Src1.isImm()) {
ErrInfo = "VOP[2C] src1 cannot be an immediate.";
return false;
}
@ -1479,7 +1475,7 @@ bool SIInstrInfo::isOperandLegal(const MachineInstr *MI, unsigned OpIdx,
// Handle non-register types that are treated like immediates.
assert(MO->isImm() || MO->isFPImm() || MO->isTargetIndex() || MO->isFI());
assert(MO->isImm() || MO->isTargetIndex() || MO->isFI());
if (!DefinedRC) {
// This operand expects an immediate.

View File

@ -159,6 +159,18 @@ def as_i64imm: SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(N->getSExtValue(), MVT::i64);
}]>;
// Copied from the AArch64 backend:
def bitcast_fpimm_to_i32 : SDNodeXForm<fpimm, [{
return CurDAG->getTargetConstant(
N->getValueAPF().bitcastToAPInt().getZExtValue(), MVT::i32);
}]>;
// Copied from the AArch64 backend:
def bitcast_fpimm_to_i64 : SDNodeXForm<fpimm, [{
return CurDAG->getTargetConstant(
N->getValueAPF().bitcastToAPInt().getZExtValue(), MVT::i64);
}]>;
def IMM8bit : PatLeaf <(imm),
[{return isUInt<8>(N->getZExtValue());}]
>;

View File

@ -2552,7 +2552,7 @@ def : Pat <
def : Pat <
(SGPRImm<(f32 fpimm)>:$imm),
(S_MOV_B32 fpimm:$imm)
(S_MOV_B32 (f32 (bitcast_fpimm_to_i32 $imm)))
>;
def : Pat <
@ -2562,7 +2562,7 @@ def : Pat <
def : Pat <
(f32 fpimm:$imm),
(V_MOV_B32_e32 fpimm:$imm)
(V_MOV_B32_e32 (f32 (bitcast_fpimm_to_i32 $imm)))
>;
def : Pat <
@ -2580,7 +2580,7 @@ def : Pat <
def : Pat <
(f64 InlineFPImm<f64>:$imm),
(S_MOV_B64 InlineFPImm<f64>:$imm)
(S_MOV_B64 (f64 (bitcast_fpimm_to_i64 InlineFPImm<f64>:$imm)))
>;
/********** ===================== **********/

View File

@ -308,10 +308,9 @@ void SILowerControlFlowPass::Kill(MachineInstr &MI) {
#endif
// Clear this thread from the exec mask if the operand is negative
if ((Op.isImm() || Op.isFPImm())) {
if ((Op.isImm())) {
// Constant operand: Set exec mask to 0 or do nothing
if (Op.isImm() ? (Op.getImm() & 0x80000000) :
Op.getFPImm()->isNegative()) {
if (Op.getImm() & 0x80000000) {
BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_MOV_B64), AMDGPU::EXEC)
.addImm(0);
}

View File

@ -130,7 +130,7 @@ static void foldImmediates(MachineInstr &MI, const SIInstrInfo *TII,
// Only one literal constant is allowed per instruction, so if src0 is a
// literal constant then we can't do any folding.
if ((Src0->isImm() || Src0->isFPImm()) && TII->isLiteralConstant(*Src0))
if (Src0->isImm() && TII->isLiteralConstant(*Src0))
return;
@ -151,12 +151,6 @@ static void foldImmediates(MachineInstr &MI, const SIInstrInfo *TII,
if (MovSrc.isImm() && isUInt<32>(MovSrc.getImm())) {
Src0->ChangeToImmediate(MovSrc.getImm());
ConstantFolded = true;
} else if (MovSrc.isFPImm()) {
const ConstantFP *CFP = MovSrc.getFPImm();
if (&CFP->getValueAPF().getSemantics() == &APFloat::IEEEsingle) {
Src0->ChangeToFPImmediate(CFP);
ConstantFolded = true;
}
}
if (ConstantFolded) {
if (MRI.use_empty(Reg))
@ -193,7 +187,6 @@ bool SIShrinkInstructions::runOnMachineFunction(MachineFunction &MF) {
if (MI.getOpcode() == AMDGPU::S_MOV_B32) {
const MachineOperand &Src = MI.getOperand(1);
// TODO: Handle FPImm?
if (Src.isImm()) {
if (isInt<16>(Src.getImm()) && !TII->isInlineConstant(Src))
MI.setDesc(TII->get(AMDGPU::S_MOVK_I32));