forked from OSchip/llvm-project
[ARM] CodeGen: Remove AddDefaultCC. NFC.
Replace all uses of AddDefaultCC with add(condCodeOp()). The transformation has been done automatically with a custom tool based on Clang AST Matchers + RefactoringTool. Differential Revision: https://reviews.llvm.org/D28557 llvm-svn: 291893
This commit is contained in:
parent
5f1f6eceb3
commit
8a73f5562f
|
@ -734,9 +734,10 @@ void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
|
|||
bool GPRSrc = ARM::GPRRegClass.contains(SrcReg);
|
||||
|
||||
if (GPRDest && GPRSrc) {
|
||||
AddDefaultCC(BuildMI(MBB, I, DL, get(ARM::MOVr), DestReg)
|
||||
BuildMI(MBB, I, DL, get(ARM::MOVr), DestReg)
|
||||
.addReg(SrcReg, getKillRegState(KillSrc))
|
||||
.add(predOps(ARMCC::AL)));
|
||||
.add(predOps(ARMCC::AL))
|
||||
.add(condCodeOp());
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -850,7 +851,7 @@ void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
|
|||
Mov = Mov.add(predOps(ARMCC::AL));
|
||||
// MOVr can set CC.
|
||||
if (Opc == ARM::MOVr)
|
||||
Mov = AddDefaultCC(Mov);
|
||||
Mov = Mov.add(condCodeOp());
|
||||
}
|
||||
// Add implicit super-register defs and kills to the last instruction.
|
||||
Mov->addRegisterDefined(DestReg, TRI);
|
||||
|
@ -1966,7 +1967,7 @@ ARMBaseInstrInfo::optimizeSelect(MachineInstr &MI,
|
|||
|
||||
// DefMI is not the -S version that sets CPSR, so add an optional %noreg.
|
||||
if (NewMI->hasOptionalDef())
|
||||
AddDefaultCC(NewMI);
|
||||
NewMI.add(condCodeOp());
|
||||
|
||||
// The output register value when the predicate is false is an implicit
|
||||
// register operand tied to the first def.
|
||||
|
@ -2832,11 +2833,12 @@ bool ARMBaseInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
|
|||
unsigned Reg1 = UseMI.getOperand(OpIdx).getReg();
|
||||
bool isKill = UseMI.getOperand(OpIdx).isKill();
|
||||
unsigned NewReg = MRI->createVirtualRegister(MRI->getRegClass(Reg));
|
||||
AddDefaultCC(BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(),
|
||||
get(NewUseOpc), NewReg)
|
||||
BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), get(NewUseOpc),
|
||||
NewReg)
|
||||
.addReg(Reg1, getKillRegState(isKill))
|
||||
.addImm(SOImmValV1)
|
||||
.add(predOps(ARMCC::AL)));
|
||||
.add(predOps(ARMCC::AL))
|
||||
.add(condCodeOp());
|
||||
UseMI.setDesc(get(NewUseOpc));
|
||||
UseMI.getOperand(1).setReg(NewReg);
|
||||
UseMI.getOperand(1).setIsKill();
|
||||
|
|
|
@ -410,13 +410,13 @@ static inline std::array<MachineOperand, 2> predOps(ARMCC::CondCodes Pred,
|
|||
MachineOperand::CreateReg(PredReg, 0)}};
|
||||
}
|
||||
|
||||
// FIXME: Replace with something that returns a MachineOperand
|
||||
static inline
|
||||
const MachineInstrBuilder &AddDefaultCC(const MachineInstrBuilder &MIB) {
|
||||
return MIB.addReg(0);
|
||||
/// Get the operand corresponding to the conditional code result. By default,
|
||||
/// this is 0 (no register).
|
||||
static inline MachineOperand condCodeOp(unsigned CCReg = 0) {
|
||||
return MachineOperand::CreateReg(CCReg, 0);
|
||||
}
|
||||
|
||||
// FIXME: Replace with something that returns a MachineOperand
|
||||
// FIXME: Replace with something that returns a MachineOperand directly.
|
||||
static inline
|
||||
const MachineInstrBuilder &AddDefaultT1CC(const MachineInstrBuilder &MIB,
|
||||
bool isDead = false) {
|
||||
|
|
|
@ -609,7 +609,7 @@ materializeFrameBaseRegister(MachineBasicBlock *MBB,
|
|||
.addFrameIndex(FrameIdx).addImm(Offset);
|
||||
|
||||
if (!AFI->isThumb1OnlyFunction())
|
||||
AddDefaultCC(MIB.add(predOps(ARMCC::AL)));
|
||||
MIB.add(predOps(ARMCC::AL)).add(condCodeOp());
|
||||
}
|
||||
|
||||
void ARMBaseRegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
|
||||
|
|
|
@ -1189,11 +1189,11 @@ bool ARMExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
|
|||
"bits set.");
|
||||
unsigned bicOpc = AFI->isThumbFunction() ?
|
||||
ARM::t2BICri : ARM::BICri;
|
||||
AddDefaultCC(
|
||||
BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(bicOpc), ARM::R6)
|
||||
.addReg(ARM::R6, RegState::Kill)
|
||||
.addImm(MaxAlign - 1)
|
||||
.add(predOps(ARMCC::AL)));
|
||||
.add(predOps(ARMCC::AL))
|
||||
.add(condCodeOp());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -265,7 +265,7 @@ ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {
|
|||
if (CPSR)
|
||||
AddDefaultT1CC(MIB);
|
||||
else
|
||||
AddDefaultCC(MIB);
|
||||
MIB.add(condCodeOp());
|
||||
}
|
||||
return MIB;
|
||||
}
|
||||
|
@ -2691,7 +2691,7 @@ unsigned ARMFastISel::ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
|
|||
.addImm(ImmEnc)
|
||||
.add(predOps(ARMCC::AL));
|
||||
if (hasS)
|
||||
AddDefaultCC(MIB);
|
||||
MIB.add(condCodeOp());
|
||||
// Second instruction consumes the first's result.
|
||||
SrcReg = ResultReg;
|
||||
}
|
||||
|
|
|
@ -257,23 +257,26 @@ static void emitAligningInstructions(MachineFunction &MF, ARMFunctionInfo *AFI,
|
|||
.addImm(~AlignMask)
|
||||
.add(predOps(ARMCC::AL));
|
||||
} else if (AlignMask <= 255) {
|
||||
AddDefaultCC(BuildMI(MBB, MBBI, DL, TII.get(ARM::BICri), Reg)
|
||||
BuildMI(MBB, MBBI, DL, TII.get(ARM::BICri), Reg)
|
||||
.addReg(Reg, RegState::Kill)
|
||||
.addImm(AlignMask)
|
||||
.add(predOps(ARMCC::AL)));
|
||||
.add(predOps(ARMCC::AL))
|
||||
.add(condCodeOp());
|
||||
} else {
|
||||
assert(!MustBeSingleInstruction &&
|
||||
"Shouldn't call emitAligningInstructions demanding a single "
|
||||
"instruction to be emitted for large stack alignment for a target "
|
||||
"without BFC.");
|
||||
AddDefaultCC(BuildMI(MBB, MBBI, DL, TII.get(ARM::MOVsi), Reg)
|
||||
BuildMI(MBB, MBBI, DL, TII.get(ARM::MOVsi), Reg)
|
||||
.addReg(Reg, RegState::Kill)
|
||||
.addImm(ARM_AM::getSORegOpc(ARM_AM::lsr, NrBitsToZero))
|
||||
.add(predOps(ARMCC::AL)));
|
||||
AddDefaultCC(BuildMI(MBB, MBBI, DL, TII.get(ARM::MOVsi), Reg)
|
||||
.add(predOps(ARMCC::AL))
|
||||
.add(condCodeOp());
|
||||
BuildMI(MBB, MBBI, DL, TII.get(ARM::MOVsi), Reg)
|
||||
.addReg(Reg, RegState::Kill)
|
||||
.addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, NrBitsToZero))
|
||||
.add(predOps(ARMCC::AL)));
|
||||
.add(predOps(ARMCC::AL))
|
||||
.add(condCodeOp());
|
||||
}
|
||||
} else {
|
||||
// Since this is only reached for Thumb-2 targets, the BFC instruction
|
||||
|
@ -484,11 +487,12 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF,
|
|||
break;
|
||||
}
|
||||
|
||||
AddDefaultCC(BuildMI(MBB, MBBI, dl, TII.get(ARM::t2SUBrr), ARM::SP)
|
||||
BuildMI(MBB, MBBI, dl, TII.get(ARM::t2SUBrr), ARM::SP)
|
||||
.addReg(ARM::SP, RegState::Kill)
|
||||
.addReg(ARM::R4, RegState::Kill)
|
||||
.setMIFlags(MachineInstr::FrameSetup)
|
||||
.add(predOps(ARMCC::AL)));
|
||||
.add(predOps(ARMCC::AL))
|
||||
.add(condCodeOp());
|
||||
NumBytes = 0;
|
||||
}
|
||||
|
||||
|
@ -1120,10 +1124,11 @@ static void emitAlignedDPRCS2Spills(MachineBasicBlock &MBB,
|
|||
// sub r4, sp, #numregs * 8
|
||||
// The immediate is <= 64, so it doesn't need any special encoding.
|
||||
unsigned Opc = isThumb ? ARM::t2SUBri : ARM::SUBri;
|
||||
AddDefaultCC(BuildMI(MBB, MI, DL, TII.get(Opc), ARM::R4)
|
||||
BuildMI(MBB, MI, DL, TII.get(Opc), ARM::R4)
|
||||
.addReg(ARM::SP)
|
||||
.addImm(8 * NumAlignedDPRCS2Regs)
|
||||
.add(predOps(ARMCC::AL)));
|
||||
.add(predOps(ARMCC::AL))
|
||||
.add(condCodeOp());
|
||||
|
||||
unsigned MaxAlign = MF.getFrameInfo().getMaxAlignment();
|
||||
// We must set parameter MustBeSingleInstruction to true, since
|
||||
|
@ -1142,7 +1147,7 @@ static void emitAlignedDPRCS2Spills(MachineBasicBlock &MBB,
|
|||
.addReg(ARM::R4)
|
||||
.add(predOps(ARMCC::AL));
|
||||
if (!isThumb)
|
||||
AddDefaultCC(MIB);
|
||||
MIB.add(condCodeOp());
|
||||
|
||||
// Now spill NumAlignedDPRCS2Regs registers starting from d8.
|
||||
// r4 holds the stack slot address.
|
||||
|
@ -1270,10 +1275,11 @@ static void emitAlignedDPRCS2Restores(MachineBasicBlock &MBB,
|
|||
assert(!AFI->isThumb1OnlyFunction() && "Can't realign stack for thumb1");
|
||||
|
||||
unsigned Opc = isThumb ? ARM::t2ADDri : ARM::ADDri;
|
||||
AddDefaultCC(BuildMI(MBB, MI, DL, TII.get(Opc), ARM::R4)
|
||||
BuildMI(MBB, MI, DL, TII.get(Opc), ARM::R4)
|
||||
.addFrameIndex(D8SpillFI)
|
||||
.addImm(0)
|
||||
.add(predOps(ARMCC::AL)));
|
||||
.add(predOps(ARMCC::AL))
|
||||
.add(condCodeOp());
|
||||
|
||||
// Now restore NumAlignedDPRCS2Regs registers starting from d8.
|
||||
unsigned NextReg = ARM::D8;
|
||||
|
@ -2149,7 +2155,8 @@ void ARMFrameLowering::adjustForSegmentedStacks(
|
|||
|
||||
// sub SR1, sp, #StackSize
|
||||
if (!CompareStackPointer && Thumb) {
|
||||
AddDefaultCC(BuildMI(McrMBB, DL, TII.get(ARM::tSUBi8), ScratchReg1))
|
||||
BuildMI(McrMBB, DL, TII.get(ARM::tSUBi8), ScratchReg1)
|
||||
.add(condCodeOp())
|
||||
.addReg(ScratchReg1)
|
||||
.addImm(AlignedStackSize)
|
||||
.add(predOps(ARMCC::AL));
|
||||
|
@ -2223,7 +2230,8 @@ void ARMFrameLowering::adjustForSegmentedStacks(
|
|||
// Pass first argument for the __morestack by Scratch Register #0.
|
||||
// The amount size of stack required
|
||||
if (Thumb) {
|
||||
AddDefaultCC(BuildMI(AllocMBB, DL, TII.get(ARM::tMOVi8), ScratchReg0))
|
||||
BuildMI(AllocMBB, DL, TII.get(ARM::tMOVi8), ScratchReg0)
|
||||
.add(condCodeOp())
|
||||
.addImm(AlignedStackSize)
|
||||
.add(predOps(ARMCC::AL));
|
||||
} else {
|
||||
|
@ -2235,7 +2243,8 @@ void ARMFrameLowering::adjustForSegmentedStacks(
|
|||
// Pass second argument for the __morestack by Scratch Register #1.
|
||||
// The amount size of stack consumed to save function arguments.
|
||||
if (Thumb) {
|
||||
AddDefaultCC(BuildMI(AllocMBB, DL, TII.get(ARM::tMOVi8), ScratchReg1))
|
||||
BuildMI(AllocMBB, DL, TII.get(ARM::tMOVi8), ScratchReg1)
|
||||
.add(condCodeOp())
|
||||
.addImm(alignToARMConstant(ARMFI->getArgumentStackSize()))
|
||||
.add(predOps(ARMCC::AL));
|
||||
} else {
|
||||
|
|
|
@ -7854,10 +7854,11 @@ void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
|
|||
.add(predOps(ARMCC::AL));
|
||||
// Set the low bit because of thumb mode.
|
||||
unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
|
||||
AddDefaultCC(BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2)
|
||||
BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2)
|
||||
.addReg(NewVReg1, RegState::Kill)
|
||||
.addImm(0x01)
|
||||
.add(predOps(ARMCC::AL)));
|
||||
.add(predOps(ARMCC::AL))
|
||||
.add(condCodeOp());
|
||||
unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
|
||||
BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3)
|
||||
.addReg(NewVReg2, RegState::Kill)
|
||||
|
@ -8084,11 +8085,12 @@ void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
|
|||
.add(predOps(ARMCC::AL));
|
||||
|
||||
unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
|
||||
AddDefaultCC(BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4)
|
||||
BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4)
|
||||
.addReg(NewVReg3, RegState::Kill)
|
||||
.addReg(NewVReg1)
|
||||
.addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2))
|
||||
.add(predOps(ARMCC::AL)));
|
||||
.add(predOps(ARMCC::AL))
|
||||
.add(condCodeOp());
|
||||
|
||||
BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT))
|
||||
.addReg(NewVReg4, RegState::Kill)
|
||||
|
@ -8237,10 +8239,11 @@ void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
|
|||
.addReg(ARM::CPSR);
|
||||
|
||||
unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
|
||||
AddDefaultCC(BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3)
|
||||
BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3)
|
||||
.addReg(NewVReg1)
|
||||
.addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2))
|
||||
.add(predOps(ARMCC::AL)));
|
||||
.add(predOps(ARMCC::AL))
|
||||
.add(condCodeOp());
|
||||
unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
|
||||
BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4)
|
||||
.addJumpTableIndex(MJTI)
|
||||
|
@ -8676,7 +8679,10 @@ ARMTargetLowering::EmitStructByval(MachineInstr &MI,
|
|||
MachineInstrBuilder MIB =
|
||||
BuildMI(*BB, BB->end(), dl,
|
||||
TII->get(IsThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop);
|
||||
AddDefaultCC(MIB.addReg(varPhi).addImm(UnitSize).add(predOps(ARMCC::AL)));
|
||||
MIB.addReg(varPhi)
|
||||
.addImm(UnitSize)
|
||||
.add(predOps(ARMCC::AL))
|
||||
.add(condCodeOp());
|
||||
MIB->getOperand(5).setReg(ARM::CPSR);
|
||||
MIB->getOperand(5).setIsDef(true);
|
||||
}
|
||||
|
@ -8770,11 +8776,12 @@ ARMTargetLowering::EmitLowered__chkstk(MachineInstr &MI,
|
|||
}
|
||||
}
|
||||
|
||||
AddDefaultCC(BuildMI(*MBB, MI, DL, TII.get(ARM::t2SUBrr), ARM::SP)
|
||||
BuildMI(*MBB, MI, DL, TII.get(ARM::t2SUBrr), ARM::SP)
|
||||
.addReg(ARM::SP, RegState::Kill)
|
||||
.addReg(ARM::R4, RegState::Kill)
|
||||
.setMIFlags(MachineInstr::FrameSetup)
|
||||
.add(predOps(ARMCC::AL)));
|
||||
.add(predOps(ARMCC::AL))
|
||||
.add(condCodeOp());
|
||||
|
||||
MI.eraseFromParent();
|
||||
return MBB;
|
||||
|
|
|
@ -89,13 +89,13 @@ bool ARMInstructionSelector::select(MachineInstr &I) const {
|
|||
switch (I.getOpcode()) {
|
||||
case G_ADD:
|
||||
I.setDesc(TII.get(ARM::ADDrr));
|
||||
AddDefaultCC(MIB.add(predOps(ARMCC::AL)));
|
||||
MIB.add(predOps(ARMCC::AL)).add(condCodeOp());
|
||||
break;
|
||||
case G_FRAME_INDEX:
|
||||
// Add 0 to the given frame index and hope it will eventually be folded into
|
||||
// the user(s).
|
||||
I.setDesc(TII.get(ARM::ADDri));
|
||||
AddDefaultCC(MIB.addImm(0).add(predOps(ARMCC::AL)));
|
||||
MIB.addImm(0).add(predOps(ARMCC::AL)).add(condCodeOp());
|
||||
break;
|
||||
case G_LOAD:
|
||||
I.setDesc(TII.get(ARM::LDRi12));
|
||||
|
|
|
@ -350,7 +350,7 @@ void llvm::emitT2RegPlusImmediate(MachineBasicBlock &MBB,
|
|||
.add(predOps(ARMCC::AL))
|
||||
.setMIFlags(MIFlags);
|
||||
if (HasCCOut)
|
||||
AddDefaultCC(MIB);
|
||||
MIB.add(condCodeOp());
|
||||
|
||||
BaseReg = DestReg;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue