forked from OSchip/llvm-project
Clean up 80 column violations. No functional change.
llvm-svn: 105350
This commit is contained in:
parent
7db953e396
commit
84511e1526
|
@ -199,9 +199,9 @@ ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
|
|||
|
||||
bool
|
||||
ARMBaseInstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MI,
|
||||
const std::vector<CalleeSavedInfo> &CSI,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
MachineBasicBlock::iterator MI,
|
||||
const std::vector<CalleeSavedInfo> &CSI,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
if (CSI.empty())
|
||||
return false;
|
||||
|
||||
|
@ -347,8 +347,8 @@ unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
|
|||
|
||||
unsigned
|
||||
ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
|
||||
MachineBasicBlock *FBB,
|
||||
const SmallVectorImpl<MachineOperand> &Cond) const {
|
||||
MachineBasicBlock *FBB,
|
||||
const SmallVectorImpl<MachineOperand> &Cond) const {
|
||||
// FIXME this should probably have a DebugLoc argument
|
||||
DebugLoc dl;
|
||||
|
||||
|
|
|
@ -288,7 +288,7 @@ public:
|
|||
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
|
||||
virtual MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
|
|
|
@ -148,8 +148,8 @@ public:
|
|||
virtual bool canSimplifyCallFramePseudos(MachineFunction &MF) const;
|
||||
|
||||
virtual void eliminateCallFramePseudoInstr(MachineFunction &MF,
|
||||
MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator I) const;
|
||||
MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator I) const;
|
||||
|
||||
virtual unsigned eliminateFrameIndex(MachineBasicBlock::iterator II,
|
||||
int SPAdj, FrameIndexValue *Value = NULL,
|
||||
|
|
|
@ -147,7 +147,8 @@ namespace {
|
|||
}
|
||||
|
||||
/// getMovi32Value - Return binary encoding of operand for movw/movt. If the
|
||||
/// machine operand requires relocation, record the relocation and return zero.
|
||||
/// machine operand requires relocation, record the relocation and return
|
||||
/// zero.
|
||||
unsigned getMovi32Value(const MachineInstr &MI,const MachineOperand &MO,
|
||||
unsigned Reloc);
|
||||
unsigned getMovi32Value(const MachineInstr &MI, unsigned OpIdx,
|
||||
|
|
|
@ -418,7 +418,8 @@ void ARMConstantIslands::DoInitialPlacement(MachineFunction &MF,
|
|||
static bool BBHasFallthrough(MachineBasicBlock *MBB) {
|
||||
// Get the next machine basic block in the function.
|
||||
MachineFunction::iterator MBBI = MBB;
|
||||
if (llvm::next(MBBI) == MBB->getParent()->end()) // Can't fall off end of function.
|
||||
// Can't fall off end of function.
|
||||
if (llvm::next(MBBI) == MBB->getParent()->end())
|
||||
return false;
|
||||
|
||||
MachineBasicBlock *NextBB = llvm::next(MBBI);
|
||||
|
|
|
@ -144,13 +144,15 @@ bool ARMExpandPseudo::ExpandMBB(MachineBasicBlock &MBB) {
|
|||
MachineInstrBuilder Even =
|
||||
AddDefaultPred(BuildMI(MBB, MBBI, MI.getDebugLoc(),
|
||||
TII->get(ARM::VMOVQ))
|
||||
.addReg(EvenDst, getDefRegState(true) | getDeadRegState(DstIsDead))
|
||||
.addReg(EvenSrc, getKillRegState(SrcIsKill)));
|
||||
.addReg(EvenDst,
|
||||
getDefRegState(true) | getDeadRegState(DstIsDead))
|
||||
.addReg(EvenSrc, getKillRegState(SrcIsKill)));
|
||||
MachineInstrBuilder Odd =
|
||||
AddDefaultPred(BuildMI(MBB, MBBI, MI.getDebugLoc(),
|
||||
TII->get(ARM::VMOVQ))
|
||||
.addReg(OddDst, getDefRegState(true) | getDeadRegState(DstIsDead))
|
||||
.addReg(OddSrc, getKillRegState(SrcIsKill)));
|
||||
.addReg(OddDst,
|
||||
getDefRegState(true) | getDeadRegState(DstIsDead))
|
||||
.addReg(OddSrc, getKillRegState(SrcIsKill)));
|
||||
TransferImpOps(MI, Even, Odd);
|
||||
MI.eraseFromParent();
|
||||
Modified = true;
|
||||
|
|
|
@ -788,8 +788,9 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm8s4(SDNode *Op, SDValue N,
|
|||
if (N.getOpcode() == ISD::ADD) {
|
||||
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
|
||||
int RHSC = (int)RHS->getZExtValue();
|
||||
// 8 bits.
|
||||
if (((RHSC & 0x3) == 0) &&
|
||||
((RHSC >= 0 && RHSC < 0x400) || (RHSC < 0 && RHSC > -0x400))) { // 8 bits.
|
||||
((RHSC >= 0 && RHSC < 0x400) || (RHSC < 0 && RHSC > -0x400))) {
|
||||
Base = N.getOperand(0);
|
||||
OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
|
||||
return true;
|
||||
|
@ -798,7 +799,8 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm8s4(SDNode *Op, SDValue N,
|
|||
} else if (N.getOpcode() == ISD::SUB) {
|
||||
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
|
||||
int RHSC = (int)RHS->getZExtValue();
|
||||
if (((RHSC & 0x3) == 0) && (RHSC >= 0 && RHSC < 0x400)) { // 8 bits.
|
||||
// 8 bits.
|
||||
if (((RHSC & 0x3) == 0) && (RHSC >= 0 && RHSC < 0x400)) {
|
||||
Base = N.getOperand(0);
|
||||
OffImm = CurDAG->getTargetConstant(-RHSC, MVT::i32);
|
||||
return true;
|
||||
|
@ -1548,8 +1550,8 @@ SDNode *ARMDAGToDAGISel::SelectVLDSTLane(SDNode *N, bool IsLoad,
|
|||
RegSeq = SDValue(QuadDRegs(MVT::v4i64, V0, V1, V2, V3), 0);
|
||||
}
|
||||
} else {
|
||||
// For 128-bit vectors, take the 64-bit results of the load and insert them
|
||||
// as subregs into the result.
|
||||
// For 128-bit vectors, take the 64-bit results of the load and insert
|
||||
// them as subregs into the result.
|
||||
SDValue V[8];
|
||||
for (unsigned Vec = 0, i = 0; Vec < NumVecs; ++Vec, i+=2) {
|
||||
if (Even) {
|
||||
|
@ -2015,7 +2017,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
|
|||
SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
|
||||
getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
|
||||
CurDAG->getRegister(0, MVT::i32) };
|
||||
return CurDAG->getMachineNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32, Ops,4);
|
||||
return CurDAG->getMachineNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32,Ops,4);
|
||||
} else {
|
||||
SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
|
||||
getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
|
||||
|
@ -2029,7 +2031,7 @@ SDNode *ARMDAGToDAGISel::Select(SDNode *N) {
|
|||
if (Subtarget->isThumb()) {
|
||||
SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
|
||||
getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
|
||||
return CurDAG->getMachineNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32, Ops,4);
|
||||
return CurDAG->getMachineNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32,Ops,4);
|
||||
} else {
|
||||
SDValue Ops[] = { N->getOperand(0), N->getOperand(1),
|
||||
getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
|
||||
|
|
|
@ -1898,8 +1898,8 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
|
|||
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
|
||||
SDValue Store =
|
||||
DAG.getStore(Val.getValue(1), dl, Val, FIN,
|
||||
PseudoSourceValue::getFixedStack(AFI->getVarArgsFrameIndex()), 0,
|
||||
false, false, 0);
|
||||
PseudoSourceValue::getFixedStack(AFI->getVarArgsFrameIndex()),
|
||||
0, false, false, 0);
|
||||
MemOps.push_back(Store);
|
||||
FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), FIN,
|
||||
DAG.getConstant(4, getPointerTy()));
|
||||
|
@ -3907,7 +3907,8 @@ static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
|
|||
// Narrowing shifts require an immediate right shift.
|
||||
if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt))
|
||||
break;
|
||||
llvm_unreachable("invalid shift count for narrowing vector shift intrinsic");
|
||||
llvm_unreachable("invalid shift count for narrowing vector shift "
|
||||
"intrinsic");
|
||||
|
||||
default:
|
||||
llvm_unreachable("unhandled vector shift");
|
||||
|
|
|
@ -189,9 +189,9 @@ namespace llvm {
|
|||
bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
|
||||
|
||||
/// isLegalICmpImmediate - Return true if the specified immediate is legal
|
||||
/// icmp immediate, that is the target has icmp instructions which can compare
|
||||
/// a register against the immediate without having to materialize the
|
||||
/// immediate into a register.
|
||||
/// icmp immediate, that is the target has icmp instructions which can
|
||||
/// compare a register against the immediate without having to materialize
|
||||
/// the immediate into a register.
|
||||
virtual bool isLegalICmpImmediate(int64_t Imm) const;
|
||||
|
||||
/// getPreIndexedAddressParts - returns true by value, base pointer and
|
||||
|
@ -282,7 +282,8 @@ namespace llvm {
|
|||
SDValue &Root, SelectionDAG &DAG,
|
||||
DebugLoc dl) const;
|
||||
|
||||
CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return, bool isVarArg) const;
|
||||
CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return,
|
||||
bool isVarArg) const;
|
||||
SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
|
||||
DebugLoc dl, SelectionDAG &DAG,
|
||||
const CCValAssign &VA,
|
||||
|
|
|
@ -2534,11 +2534,11 @@ let Defs =
|
|||
def Int_eh_sjlj_setjmp : XI<(outs), (ins GPR:$src, GPR:$val),
|
||||
AddrModeNone, SizeSpecial, IndexModeNone,
|
||||
Pseudo, NoItinerary,
|
||||
"add\t$val, pc, #8\t${:comment} eh_setjmp begin\n\t"
|
||||
"str\t$val, [$src, #+4]\n\t"
|
||||
"mov\tr0, #0\n\t"
|
||||
"add\tpc, pc, #0\n\t"
|
||||
"mov\tr0, #1 ${:comment} eh_setjmp end", "",
|
||||
"add\t$val, pc, #8\t${:comment} eh_setjmp begin\n\t"
|
||||
"str\t$val, [$src, #+4]\n\t"
|
||||
"mov\tr0, #0\n\t"
|
||||
"add\tpc, pc, #0\n\t"
|
||||
"mov\tr0, #1 ${:comment} eh_setjmp end", "",
|
||||
[(set R0, (ARMeh_sjlj_setjmp GPR:$src, GPR:$val))]>,
|
||||
Requires<[IsARM, HasVFP2]>;
|
||||
}
|
||||
|
@ -2549,11 +2549,11 @@ let Defs =
|
|||
def Int_eh_sjlj_setjmp_nofp : XI<(outs), (ins GPR:$src, GPR:$val),
|
||||
AddrModeNone, SizeSpecial, IndexModeNone,
|
||||
Pseudo, NoItinerary,
|
||||
"add\t$val, pc, #8\n ${:comment} eh_setjmp begin\n\t"
|
||||
"str\t$val, [$src, #+4]\n\t"
|
||||
"mov\tr0, #0\n\t"
|
||||
"add\tpc, pc, #0\n\t"
|
||||
"mov\tr0, #1 ${:comment} eh_setjmp end", "",
|
||||
"add\t$val, pc, #8\n ${:comment} eh_setjmp begin\n\t"
|
||||
"str\t$val, [$src, #+4]\n\t"
|
||||
"mov\tr0, #0\n\t"
|
||||
"add\tpc, pc, #0\n\t"
|
||||
"mov\tr0, #1 ${:comment} eh_setjmp end", "",
|
||||
[(set R0, (ARMeh_sjlj_setjmp GPR:$src, GPR:$val))]>,
|
||||
Requires<[IsARM, NoVFP]>;
|
||||
}
|
||||
|
|
|
@ -1037,7 +1037,8 @@ def : T1Pat<(i32 imm0_255_comp:$src),
|
|||
// scheduling.
|
||||
let isReMaterializable = 1 in
|
||||
def tLDRpci_pic : PseudoInst<(outs GPR:$dst), (ins i32imm:$addr, pclabel:$cp),
|
||||
NoItinerary, "${:comment} ldr.n\t$dst, $addr\n$cp:\n\tadd\t$dst, pc",
|
||||
NoItinerary,
|
||||
"${:comment} ldr.n\t$dst, $addr\n$cp:\n\tadd\t$dst, pc",
|
||||
[(set GPR:$dst, (ARMpic_add (load (ARMWrapper tconstpool:$addr)),
|
||||
imm:$cp))]>,
|
||||
Requires<[IsThumb1Only]>;
|
||||
|
|
|
@ -2690,7 +2690,8 @@ def : T2Pat<(ARMWrapperJT tjumptable:$dst, imm:$id),
|
|||
// scheduling.
|
||||
let canFoldAsLoad = 1, isReMaterializable = 1 in
|
||||
def t2LDRpci_pic : PseudoInst<(outs GPR:$dst), (ins i32imm:$addr, pclabel:$cp),
|
||||
NoItinerary, "${:comment} ldr.w\t$dst, $addr\n$cp:\n\tadd\t$dst, pc",
|
||||
NoItinerary,
|
||||
"${:comment} ldr.w\t$dst, $addr\n$cp:\n\tadd\t$dst, pc",
|
||||
[(set GPR:$dst, (ARMpic_add (load (ARMWrapper tconstpool:$addr)),
|
||||
imm:$cp))]>,
|
||||
Requires<[IsThumb2]>;
|
||||
|
|
|
@ -255,25 +255,25 @@ def VCVTSD : VFPAI<(outs SPR:$dst), (ins DPR:$a), VFPUnaryFrm,
|
|||
|
||||
// Between half-precision and single-precision. For disassembly only.
|
||||
|
||||
def VCVTBSH : ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
|
||||
def VCVTBSH: ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
|
||||
/* FIXME */ IIC_fpCVTSH, "vcvtb", ".f32.f16\t$dst, $a",
|
||||
[/* For disassembly only; pattern left blank */]>;
|
||||
|
||||
def : ARMPat<(f32_to_f16 SPR:$a),
|
||||
(i32 (COPY_TO_REGCLASS (VCVTBSH SPR:$a), GPR))>;
|
||||
|
||||
def VCVTBHS : ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
|
||||
def VCVTBHS: ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$dst), (ins SPR:$a),
|
||||
/* FIXME */ IIC_fpCVTHS, "vcvtb", ".f16.f32\t$dst, $a",
|
||||
[/* For disassembly only; pattern left blank */]>;
|
||||
|
||||
def : ARMPat<(f16_to_f32 GPR:$a),
|
||||
(VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>;
|
||||
|
||||
def VCVTTSH : ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
|
||||
def VCVTTSH: ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
|
||||
/* FIXME */ IIC_fpCVTSH, "vcvtt", ".f32.f16\t$dst, $a",
|
||||
[/* For disassembly only; pattern left blank */]>;
|
||||
|
||||
def VCVTTHS : ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
|
||||
def VCVTTHS: ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$dst), (ins SPR:$a),
|
||||
/* FIXME */ IIC_fpCVTHS, "vcvtt", ".f16.f32\t$dst, $a",
|
||||
[/* For disassembly only; pattern left blank */]>;
|
||||
|
||||
|
|
|
@ -143,7 +143,8 @@ namespace llvm {
|
|||
JumpTableId2AddrMap[JTI] = Addr;
|
||||
}
|
||||
|
||||
/// getPCLabelAddr - Retrieve the address of the PC label of the specified id.
|
||||
/// getPCLabelAddr - Retrieve the address of the PC label of the
|
||||
/// specified id.
|
||||
intptr_t getPCLabelAddr(unsigned Id) const {
|
||||
DenseMap<unsigned, intptr_t>::const_iterator I = PCLabelMap.find(Id);
|
||||
assert(I != PCLabelMap.end());
|
||||
|
|
|
@ -968,8 +968,8 @@ bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
|
|||
Pred, PredReg, TII, isT2);
|
||||
} else {
|
||||
if (OddReg == EvenReg && EvenDeadKill) {
|
||||
// If the two source operands are the same, the kill marker is probably
|
||||
// on the first one. e.g.
|
||||
// If the two source operands are the same, the kill marker is
|
||||
// probably on the first one. e.g.
|
||||
// t2STRDi8 %R5<kill>, %R5, %R9<kill>, 0, 14, %reg0
|
||||
EvenDeadKill = false;
|
||||
OddDeadKill = true;
|
||||
|
|
|
@ -183,7 +183,8 @@ let CompositeIndices = [(qsub_2 qqsub_1, qsub_0), (qsub_3 qqsub_1, qsub_1),
|
|||
(ssub_8 qqsub_1, ssub_0), (ssub_9 qqsub_1, ssub_1),
|
||||
(ssub_10 qqsub_1, ssub_2), (ssub_11 qqsub_1, ssub_3),
|
||||
(ssub_12 qqsub_1, ssub_4), (ssub_13 qqsub_1, ssub_5),
|
||||
(ssub_14 qqsub_1, ssub_6), (ssub_15 qqsub_1, ssub_7)] in {
|
||||
(ssub_14 qqsub_1, ssub_6), (ssub_15 qqsub_1, ssub_7)] in
|
||||
{
|
||||
def QQQQ0 : ARMReg<0, "qqqq0", [QQ0, QQ1]>;
|
||||
def QQQQ1 : ARMReg<1, "qqqq1", [QQ2, QQ3]>;
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
// Functional Units
|
||||
def V6_Pipe : FuncUnit; // pipeline
|
||||
|
||||
// Scheduling information derived from "ARM1176JZF-S Technical Reference Manual".
|
||||
// Scheduling information derived from "ARM1176JZF-S Technical Reference Manual"
|
||||
//
|
||||
def ARMV6Itineraries : ProcessorItineraries<
|
||||
[V6_Pipe], [
|
||||
|
|
|
@ -51,7 +51,8 @@ public:
|
|||
// could not be handled directly in MI.
|
||||
int rewriteFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
|
||||
unsigned FrameReg, int Offset,
|
||||
unsigned MOVOpc, unsigned ADDriOpc, unsigned SUBriOpc) const;
|
||||
unsigned MOVOpc, unsigned ADDriOpc,
|
||||
unsigned SUBriOpc) const;
|
||||
|
||||
bool saveScavengerRegister(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator I,
|
||||
|
|
|
@ -61,7 +61,8 @@ Thumb2InstrInfo::copyRegToReg(MachineBasicBlock &MBB,
|
|||
}
|
||||
|
||||
// Handle SPR, DPR, and QPR copies.
|
||||
return ARMBaseInstrInfo::copyRegToReg(MBB, I, DestReg, SrcReg, DestRC, SrcRC, DL);
|
||||
return ARMBaseInstrInfo::copyRegToReg(MBB, I, DestReg, SrcReg, DestRC,
|
||||
SrcRC, DL);
|
||||
}
|
||||
|
||||
void Thumb2InstrInfo::
|
||||
|
|
Loading…
Reference in New Issue