forked from OSchip/llvm-project
parent
87c7b09d8d
commit
f54f60f3ce
|
@ -136,12 +136,12 @@ void MipsInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
|
|||
printRegName(O, Op.getReg());
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
if (Op.isImm()) {
|
||||
O << Op.getImm();
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
assert(Op.isExpr() && "unknown operand kind in printOperand");
|
||||
printExpr(Op.getExpr(), O);
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
namespace llvm {
|
||||
// These enumeration declarations were orignally in MipsInstrInfo.h but
|
||||
// had to be moved here to avoid circular dependencies between
|
||||
// LLVMMipsCodeGen and LLVMMipsAsmPrinter.
|
||||
// LLVMMipsCodeGen and LLVMMipsAsmPrinter.
|
||||
namespace Mips {
|
||||
// Mips Branch Codes
|
||||
enum FPBranchCode {
|
||||
|
@ -78,16 +78,16 @@ class TargetMachine;
|
|||
class MipsInstPrinter : public MCInstPrinter {
|
||||
public:
|
||||
MipsInstPrinter(const MCAsmInfo &MAI) : MCInstPrinter(MAI) {}
|
||||
|
||||
|
||||
// Autogenerated by tblgen.
|
||||
void printInstruction(const MCInst *MI, raw_ostream &O);
|
||||
static const char *getInstructionName(unsigned Opcode);
|
||||
static const char *getRegisterName(unsigned RegNo);
|
||||
|
||||
|
||||
virtual StringRef getOpcodeName(unsigned Opcode) const;
|
||||
virtual void printRegName(raw_ostream &OS, unsigned RegNo) const;
|
||||
virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot);
|
||||
|
||||
|
||||
private:
|
||||
void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
|
||||
void printUnsignedImm(const MCInst *MI, int opNum, raw_ostream &O);
|
||||
|
|
|
@ -178,7 +178,7 @@ public:
|
|||
/// \parm Res [output] - On return, the relaxed instruction.
|
||||
void relaxInstruction(const MCInst &Inst, MCInst &Res) const {
|
||||
}
|
||||
|
||||
|
||||
/// @}
|
||||
|
||||
/// WriteNopData - Write an (optimal) nop sequence of Count bytes
|
||||
|
|
|
@ -191,7 +191,7 @@ inline static unsigned getMipsRegisterNumbering(unsigned RegEnum)
|
|||
case Mips::HWR29:
|
||||
return 29;
|
||||
case Mips::FP: case Mips::FP_64: case Mips::F30: case Mips::D30_64:
|
||||
case Mips::D15:
|
||||
case Mips::D15:
|
||||
return 30;
|
||||
case Mips::RA: case Mips::RA_64: case Mips::F31: case Mips::D31_64:
|
||||
return 31;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//===- Mips.td - Describe the Mips Target Machine ----------*- tablegen -*-===//
|
||||
//===-- Mips.td - Describe the Mips Target Machine ---------*- tablegen -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
|
|
|
@ -108,7 +108,7 @@ let Predicates = [HasMips64r2] in {
|
|||
}
|
||||
|
||||
/// Load and Store Instructions
|
||||
/// aligned
|
||||
/// aligned
|
||||
defm LB64 : LoadM64<0x20, "lb", sextloadi8>;
|
||||
defm LBu64 : LoadM64<0x24, "lbu", zextloadi8>;
|
||||
defm LH64 : LoadM64<0x21, "lh", sextloadi16_a>;
|
||||
|
@ -267,7 +267,7 @@ def : Pat<(MipsDynAlloc addr:$f), (DynAlloc64 addr:$f)>, Requires<[IsN64]>;
|
|||
// truncate
|
||||
def : Pat<(i32 (trunc CPU64Regs:$src)),
|
||||
(SLL (EXTRACT_SUBREG CPU64Regs:$src, sub_32), 0)>, Requires<[IsN64]>;
|
||||
|
||||
|
||||
// 32-to-64-bit extension
|
||||
def : Pat<(i64 (anyext CPURegs:$src)), (SLL64_32 CPURegs:$src)>;
|
||||
def : Pat<(i64 (zext CPURegs:$src)), (DSRL (DSLL64_32 CPURegs:$src), 32)>;
|
||||
|
|
|
@ -77,7 +77,7 @@ void MipsAnalyzeImmediate::GetInstSeqLs(int64_t Imm, unsigned RemSize,
|
|||
}
|
||||
|
||||
// Replace a ADDiu & SLL pair with a LUi.
|
||||
// e.g. the following two instructions
|
||||
// e.g. the following two instructions
|
||||
// ADDiu 0x0111
|
||||
// SLL 18
|
||||
// are replaced with
|
||||
|
@ -149,5 +149,5 @@ const MipsAnalyzeImmediate::InstSeq
|
|||
// Set Insts to the shortest instruction sequence.
|
||||
GetShortestSeq(SeqLs, Insts);
|
||||
|
||||
return Insts;
|
||||
return Insts;
|
||||
}
|
||||
|
|
|
@ -51,7 +51,7 @@ namespace llvm {
|
|||
void ReplaceADDiuSLLWithLUi(InstSeq &Seq);
|
||||
|
||||
/// GetShortestSeq - Find the shortest instruction sequence in SeqLs and
|
||||
/// return it in Insts.
|
||||
/// return it in Insts.
|
||||
void GetShortestSeq(InstSeqLs &SeqLs, InstSeq &Insts);
|
||||
|
||||
unsigned Size;
|
||||
|
|
|
@ -56,7 +56,7 @@ static bool isUnalignedLoadStore(unsigned Opc) {
|
|||
Opc == Mips::ULHu64 || Opc == Mips::USD || Opc == Mips::USW64 ||
|
||||
Opc == Mips::USH64 ||
|
||||
Opc == Mips::ULD_P8 || Opc == Mips::ULW64_P8 ||
|
||||
Opc == Mips::ULH64_P8 || Opc == Mips::ULHu64_P8 ||
|
||||
Opc == Mips::ULH64_P8 || Opc == Mips::ULHu64_P8 ||
|
||||
Opc == Mips::USD_P8 || Opc == Mips::USW64_P8 ||
|
||||
Opc == Mips::USH64_P8;
|
||||
}
|
||||
|
@ -109,7 +109,7 @@ void MipsAsmPrinter::EmitInstruction(const MachineInstr *MI) {
|
|||
MCInstLowering.LowerCPLOAD(MI, MCInsts);
|
||||
else if (Opc == Mips::CPRESTORE)
|
||||
MCInstLowering.LowerCPRESTORE(MI, MCInsts);
|
||||
|
||||
|
||||
if (!MCInsts.empty()) {
|
||||
for (SmallVector<MCInst, 4>::iterator I = MCInsts.begin();
|
||||
I != MCInsts.end(); ++I)
|
||||
|
@ -244,7 +244,7 @@ void MipsAsmPrinter::emitFrameDirective() {
|
|||
unsigned returnReg = RI.getRARegister();
|
||||
unsigned stackSize = MF->getFrameInfo()->getStackSize();
|
||||
|
||||
if (OutStreamer.hasRawTextSupport())
|
||||
if (OutStreamer.hasRawTextSupport())
|
||||
OutStreamer.EmitRawText("\t.frame\t$" +
|
||||
StringRef(MipsInstPrinter::getRegisterName(stackReg)).lower() +
|
||||
"," + Twine(stackSize) + ",$" +
|
||||
|
@ -263,7 +263,7 @@ const char *MipsAsmPrinter::getCurrentABIString() const {
|
|||
}
|
||||
|
||||
void MipsAsmPrinter::EmitFunctionEntryLabel() {
|
||||
if (OutStreamer.hasRawTextSupport())
|
||||
if (OutStreamer.hasRawTextSupport())
|
||||
OutStreamer.EmitRawText("\t.ent\t" + Twine(CurrentFnSym->getName()));
|
||||
OutStreamer.EmitLabel(CurrentFnSym);
|
||||
}
|
||||
|
@ -316,18 +316,18 @@ bool MipsAsmPrinter::isBlockOnlyReachableByFallthrough(const MachineBasicBlock*
|
|||
// If there isn't exactly one predecessor, it can't be a fall through.
|
||||
MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(), PI2 = PI;
|
||||
++PI2;
|
||||
|
||||
|
||||
if (PI2 != MBB->pred_end())
|
||||
return false;
|
||||
return false;
|
||||
|
||||
// The predecessor has to be immediately before this block.
|
||||
if (!Pred->isLayoutSuccessor(MBB))
|
||||
return false;
|
||||
|
||||
|
||||
// If the block is completely empty, then it definitely does fall through.
|
||||
if (Pred->empty())
|
||||
return true;
|
||||
|
||||
|
||||
// Otherwise, check the last instruction.
|
||||
// Check if the last terminator is an unconditional branch.
|
||||
MachineBasicBlock::const_iterator I = Pred->end();
|
||||
|
@ -354,7 +354,7 @@ bool MipsAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
|
|||
raw_ostream &O) {
|
||||
if (ExtraCode && ExtraCode[0])
|
||||
return true; // Unknown modifier.
|
||||
|
||||
|
||||
const MachineOperand &MO = MI->getOperand(OpNum);
|
||||
assert(MO.isReg() && "unexpected inline asm memory operand");
|
||||
O << "0($" << MipsInstPrinter::getRegisterName(MO.getReg()) << ")";
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//===- MipsCallingConv.td - Calling Conventions for Mips ---*- tablegen -*-===//
|
||||
//===-- MipsCallingConv.td - Calling Conventions for Mips --*- tablegen -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
|
@ -37,7 +37,7 @@ def RetCC_MipsO32 : CallingConv<[
|
|||
def CC_MipsN : CallingConv<[
|
||||
// Handles byval parameters.
|
||||
CCIfByVal<CCCustom<"CC_Mips64Byval">>,
|
||||
|
||||
|
||||
// Promote i8/i16 arguments to i32.
|
||||
CCIfType<[i8, i16], CCPromoteToType<i32>>,
|
||||
|
||||
|
@ -74,7 +74,7 @@ def CC_MipsN : CallingConv<[
|
|||
def CC_MipsN_VarArg : CallingConv<[
|
||||
// Handles byval parameters.
|
||||
CCIfByVal<CCCustom<"CC_Mips64Byval">>,
|
||||
|
||||
|
||||
// Promote i8/i16 arguments to i32.
|
||||
CCIfType<[i8, i16], CCPromoteToType<i32>>,
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//===- MipsCondMov.td - Describe Mips Conditional Moves ---*- tablegen -*--===//
|
||||
//===-- MipsCondMov.td - Describe Mips Conditional Moves --*- tablegen -*--===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
|
|
|
@ -105,7 +105,7 @@ runOnMachineBasicBlock(MachineBasicBlock &MBB) {
|
|||
if (EnableDelaySlotFiller && findDelayInstr(MBB, I, D)) {
|
||||
MBB.splice(llvm::next(I), &MBB, D);
|
||||
++UsefulSlots;
|
||||
} else
|
||||
} else
|
||||
BuildMI(MBB, llvm::next(I), I->getDebugLoc(), TII->get(Mips::NOP));
|
||||
|
||||
// Record the filler instruction that filled the delay slot.
|
||||
|
@ -173,7 +173,7 @@ bool Filler::delayHasHazard(MachineBasicBlock::iterator candidate,
|
|||
return true;
|
||||
|
||||
// Loads or stores cannot be moved past a store to the delay slot
|
||||
// and stores cannot be moved past a load.
|
||||
// and stores cannot be moved past a load.
|
||||
if (candidate->mayLoad()) {
|
||||
if (sawStore)
|
||||
return true;
|
||||
|
@ -220,8 +220,8 @@ void Filler::insertDefsUses(MachineBasicBlock::iterator MI,
|
|||
MCInstrDesc MCID = MI->getDesc();
|
||||
unsigned e = MI->isCall() || MI->isReturn() ? MCID.getNumOperands() :
|
||||
MI->getNumOperands();
|
||||
|
||||
// Add RA to RegDefs to prevent users of RA from going into delay slot.
|
||||
|
||||
// Add RA to RegDefs to prevent users of RA from going into delay slot.
|
||||
if (MI->isCall())
|
||||
RegDefs.insert(Mips::RA);
|
||||
|
||||
|
|
|
@ -63,7 +63,7 @@ bool Inserter::runOnMachineFunction(MachineFunction &F) {
|
|||
if (MBB.isLandingPad()) {
|
||||
// Find EH_LABEL first.
|
||||
for (; I->getOpcode() != TargetOpcode::EH_LABEL; ++I) ;
|
||||
|
||||
|
||||
// Insert lw.
|
||||
++I;
|
||||
DebugLoc dl = I != MBB.end() ? I->getDebugLoc() : DebugLoc();
|
||||
|
@ -84,7 +84,7 @@ bool Inserter::runOnMachineFunction(MachineFunction &F) {
|
|||
.addImm(0);
|
||||
Changed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Changed;
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ bool MipsExpandPseudo::runOnMachineBasicBlock(MachineBasicBlock& MBB) {
|
|||
const MCInstrDesc& MCid = I->getDesc();
|
||||
|
||||
switch(MCid.getOpcode()) {
|
||||
default:
|
||||
default:
|
||||
++I;
|
||||
continue;
|
||||
case Mips::SETGP2:
|
||||
|
@ -79,7 +79,7 @@ bool MipsExpandPseudo::runOnMachineBasicBlock(MachineBasicBlock& MBB) {
|
|||
case Mips::ExtractElementF64:
|
||||
ExpandExtractElementF64(MBB, I);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// delete original instr
|
||||
MBB.erase(I++);
|
||||
|
@ -90,7 +90,7 @@ bool MipsExpandPseudo::runOnMachineBasicBlock(MachineBasicBlock& MBB) {
|
|||
}
|
||||
|
||||
void MipsExpandPseudo::ExpandBuildPairF64(MachineBasicBlock& MBB,
|
||||
MachineBasicBlock::iterator I) {
|
||||
MachineBasicBlock::iterator I) {
|
||||
unsigned DstReg = I->getOperand(0).getReg();
|
||||
unsigned LoReg = I->getOperand(1).getReg(), HiReg = I->getOperand(2).getReg();
|
||||
const MCInstrDesc& Mtc1Tdd = TII->get(Mips::MTC1);
|
||||
|
@ -116,7 +116,7 @@ void MipsExpandPseudo::ExpandExtractElementF64(MachineBasicBlock& MBB,
|
|||
BuildMI(MBB, I, dl, Mfc1Tdd, DstReg).addReg(*(SubReg + N));
|
||||
}
|
||||
|
||||
/// createMipsMipsExpandPseudoPass - Returns a pass that expands pseudo
|
||||
/// createMipsMipsExpandPseudoPass - Returns a pass that expands pseudo
|
||||
/// instrs into real instrs
|
||||
FunctionPass *llvm::createMipsExpandPseudoPass(MipsTargetMachine &tm) {
|
||||
return new MipsExpandPseudo(tm);
|
||||
|
|
|
@ -101,7 +101,7 @@ static void expandLargeImm(unsigned Reg, int64_t Imm, bool IsN64,
|
|||
MachineBasicBlock::iterator II, DebugLoc DL) {
|
||||
unsigned LUi = IsN64 ? Mips::LUi64 : Mips::LUi;
|
||||
unsigned ADDu = IsN64 ? Mips::DADDu : Mips::ADDu;
|
||||
unsigned ZEROReg = IsN64 ? Mips::ZERO_64 : Mips::ZERO;
|
||||
unsigned ZEROReg = IsN64 ? Mips::ZERO_64 : Mips::ZERO;
|
||||
unsigned ATReg = IsN64 ? Mips::AT_64 : Mips::AT;
|
||||
MipsAnalyzeImmediate AnalyzeImm;
|
||||
const MipsAnalyzeImmediate::InstSeq &Seq =
|
||||
|
@ -125,7 +125,7 @@ static void expandLargeImm(unsigned Reg, int64_t Imm, bool IsN64,
|
|||
for (++Inst; Inst != Seq.end(); ++Inst)
|
||||
BuildMI(MBB, II, DL, TII.get(Inst->Opc), ATReg).addReg(ATReg)
|
||||
.addImm(SignExtend64<16>(Inst->ImmOpnd));
|
||||
|
||||
|
||||
BuildMI(MBB, II, DL, TII.get(ADDu), Reg).addReg(Reg).addReg(ATReg);
|
||||
BuildMI(MBB, II, DL, TII.get(Mips::ATMACRO));
|
||||
}
|
||||
|
@ -150,15 +150,15 @@ void MipsFrameLowering::emitPrologue(MachineFunction &MF) const {
|
|||
// First, compute final stack size.
|
||||
unsigned RegSize = STI.isGP32bit() ? 4 : 8;
|
||||
unsigned StackAlign = getStackAlignment();
|
||||
unsigned LocalVarAreaOffset = MipsFI->needGPSaveRestore() ?
|
||||
unsigned LocalVarAreaOffset = MipsFI->needGPSaveRestore() ?
|
||||
(MFI->getObjectOffset(MipsFI->getGPFI()) + RegSize) :
|
||||
MipsFI->getMaxCallFrameSize();
|
||||
uint64_t StackSize = RoundUpToAlignment(LocalVarAreaOffset, StackAlign) +
|
||||
RoundUpToAlignment(MFI->getStackSize(), StackAlign);
|
||||
|
||||
// Update stack size
|
||||
MFI->setStackSize(StackSize);
|
||||
|
||||
MFI->setStackSize(StackSize);
|
||||
|
||||
BuildMI(MBB, MBBI, dl, TII.get(Mips::NOREORDER));
|
||||
BuildMI(MBB, MBBI, dl, TII.get(Mips::NOMACRO));
|
||||
|
||||
|
@ -201,13 +201,13 @@ void MipsFrameLowering::emitPrologue(MachineFunction &MF) const {
|
|||
// register to the stack.
|
||||
for (unsigned i = 0; i < CSI.size(); ++i)
|
||||
++MBBI;
|
||||
|
||||
|
||||
// Iterate over list of callee-saved registers and emit .cfi_offset
|
||||
// directives.
|
||||
MCSymbol *CSLabel = MMI.getContext().CreateTempSymbol();
|
||||
BuildMI(MBB, MBBI, dl,
|
||||
TII.get(TargetOpcode::PROLOG_LABEL)).addSym(CSLabel);
|
||||
|
||||
|
||||
for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(),
|
||||
E = CSI.end(); I != E; ++I) {
|
||||
int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());
|
||||
|
@ -235,14 +235,14 @@ void MipsFrameLowering::emitPrologue(MachineFunction &MF) const {
|
|||
Moves.push_back(MachineMove(CSLabel, DstML, SrcML));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if framepointer enabled, set it to point to the stack pointer.
|
||||
if (hasFP(MF)) {
|
||||
// Insert instruction "move $fp, $sp" at this location.
|
||||
// Insert instruction "move $fp, $sp" at this location.
|
||||
BuildMI(MBB, MBBI, dl, TII.get(ADDu), FP).addReg(SP).addReg(ZERO);
|
||||
|
||||
// emit ".cfi_def_cfa_register $fp"
|
||||
// emit ".cfi_def_cfa_register $fp"
|
||||
MCSymbol *SetFPLabel = MMI.getContext().CreateTempSymbol();
|
||||
BuildMI(MBB, MBBI, dl,
|
||||
TII.get(TargetOpcode::PROLOG_LABEL)).addSym(SetFPLabel);
|
||||
|
@ -280,7 +280,7 @@ void MipsFrameLowering::emitEpilogue(MachineFunction &MF,
|
|||
if (hasFP(MF)) {
|
||||
// Find the first instruction that restores a callee-saved register.
|
||||
MachineBasicBlock::iterator I = MBBI;
|
||||
|
||||
|
||||
for (unsigned i = 0; i < MFI->getCalleeSavedInfo().size(); ++i)
|
||||
--I;
|
||||
|
||||
|
@ -314,7 +314,7 @@ processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
|
|||
if (hasFP(MF))
|
||||
MRI.setPhysRegUsed(FP);
|
||||
|
||||
// The register allocator might determine $ra is used after seeing
|
||||
// The register allocator might determine $ra is used after seeing
|
||||
// instruction "jr $ra", but we do not want PrologEpilogInserter to insert
|
||||
// instructions to save/restore $ra unless there is a function call.
|
||||
// To correct this, $ra is explicitly marked unused if there is no
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//==--- MipsFrameLowering.h - Define frame lowering for Mips --*- C++ -*---===//
|
||||
//===-- MipsFrameLowering.h - Define frame lowering for Mips ----*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
|
|
|
@ -114,7 +114,7 @@ private:
|
|||
// passes from moving them.
|
||||
void MipsDAGToDAGISel::InitGlobalBaseReg(MachineFunction &MF) {
|
||||
MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
|
||||
|
||||
|
||||
if (!MipsFI->globalBaseRegSet())
|
||||
return;
|
||||
|
||||
|
@ -132,8 +132,8 @@ void MipsDAGToDAGISel::InitGlobalBaseReg(MachineFunction &MF) {
|
|||
else {
|
||||
const TargetRegisterClass *RC;
|
||||
RC = Subtarget.isABI_N64() ?
|
||||
Mips::CPU64RegsRegisterClass : Mips::CPURegsRegisterClass;
|
||||
|
||||
Mips::CPU64RegsRegisterClass : Mips::CPURegsRegisterClass;
|
||||
|
||||
V0 = RegInfo.createVirtualRegister(RC);
|
||||
V1 = RegInfo.createVirtualRegister(RC);
|
||||
}
|
||||
|
@ -178,12 +178,12 @@ void MipsDAGToDAGISel::InitGlobalBaseReg(MachineFunction &MF) {
|
|||
BuildMI(MBB, I, DL, TII.get(Mips::SETGP2), GlobalBaseReg)
|
||||
.addReg(Mips::T9);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool MipsDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
|
||||
bool Ret = SelectionDAGISel::runOnMachineFunction(MF);
|
||||
|
||||
|
||||
InitGlobalBaseReg(MF);
|
||||
|
||||
return Ret;
|
||||
|
@ -251,7 +251,7 @@ SelectAddr(SDNode *Parent, SDValue Addr, SDValue &Base, SDValue &Offset) {
|
|||
// lwc1 $f0, %lo($CPI1_0)($2)
|
||||
if (Addr.getOperand(1).getOpcode() == MipsISD::Lo) {
|
||||
SDValue LoVal = Addr.getOperand(1);
|
||||
if (isa<ConstantPoolSDNode>(LoVal.getOperand(0)) ||
|
||||
if (isa<ConstantPoolSDNode>(LoVal.getOperand(0)) ||
|
||||
isa<GlobalAddressSDNode>(LoVal.getOperand(0))) {
|
||||
Base = Addr.getOperand(0);
|
||||
Offset = LoVal.getOperand(0);
|
||||
|
@ -273,7 +273,7 @@ SelectAddr(SDNode *Parent, SDValue Addr, SDValue &Base, SDValue &Offset) {
|
|||
|
||||
/// Select multiply instructions.
|
||||
std::pair<SDNode*, SDNode*>
|
||||
MipsDAGToDAGISel::SelectMULT(SDNode *N, unsigned Opc, DebugLoc dl, EVT Ty,
|
||||
MipsDAGToDAGISel::SelectMULT(SDNode *N, unsigned Opc, DebugLoc dl, EVT Ty,
|
||||
bool HasLo, bool HasHi) {
|
||||
SDNode *Lo = 0, *Hi = 0;
|
||||
SDNode *Mul = CurDAG->getMachineNode(Opc, dl, MVT::Glue, N->getOperand(0),
|
||||
|
@ -288,7 +288,7 @@ MipsDAGToDAGISel::SelectMULT(SDNode *N, unsigned Opc, DebugLoc dl, EVT Ty,
|
|||
if (HasHi)
|
||||
Hi = CurDAG->getMachineNode(Ty == MVT::i32 ? Mips::MFHI : Mips::MFHI64, dl,
|
||||
Ty, InFlag);
|
||||
|
||||
|
||||
return std::make_pair(Lo, Hi);
|
||||
}
|
||||
|
||||
|
@ -420,7 +420,7 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
|
|||
|
||||
const MipsAnalyzeImmediate::InstSeq &Seq =
|
||||
AnalyzeImm.Analyze(Imm, Size, false);
|
||||
|
||||
|
||||
MipsAnalyzeImmediate::InstSeq::const_iterator Inst = Seq.begin();
|
||||
DebugLoc DL = CN->getDebugLoc();
|
||||
SDNode *RegOpnd;
|
||||
|
@ -462,7 +462,7 @@ SDNode* MipsDAGToDAGISel::Select(SDNode *Node) {
|
|||
SrcReg = Mips::HWR29_64;
|
||||
DestReg = Mips::V1_64;
|
||||
}
|
||||
|
||||
|
||||
SDNode *Rdhwr =
|
||||
CurDAG->getMachineNode(RdhwrOpc, Node->getDebugLoc(),
|
||||
Node->getValueType(0),
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//=== --MipsISelLowering.cpp - Mips DAG Lowering Implementation -----------===//
|
||||
//===-- MipsISelLowering.cpp - Mips DAG Lowering Implementation -----------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
|
@ -36,9 +36,9 @@
|
|||
#include "llvm/Support/ErrorHandling.h"
|
||||
using namespace llvm;
|
||||
|
||||
// If I is a shifted mask, set the size (Size) and the first bit of the
|
||||
// If I is a shifted mask, set the size (Size) and the first bit of the
|
||||
// mask (Pos), and return true.
|
||||
// For example, if I is 0x003ff800, (Pos, Size) = (11, 11).
|
||||
// For example, if I is 0x003ff800, (Pos, Size) = (11, 11).
|
||||
static bool IsShiftedMask(uint64_t I, uint64_t &Pos, uint64_t &Size) {
|
||||
if (!isShiftedMask_64(I))
|
||||
return false;
|
||||
|
@ -215,12 +215,12 @@ MipsTargetLowering(MipsTargetMachine &TM)
|
|||
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
|
||||
|
||||
setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom);
|
||||
setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
|
||||
setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
|
||||
|
||||
setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Expand);
|
||||
setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand);
|
||||
setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Expand);
|
||||
setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
|
||||
setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Expand);
|
||||
setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand);
|
||||
setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Expand);
|
||||
setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
|
||||
|
||||
setInsertFencesForAtomic(true);
|
||||
|
||||
|
@ -261,7 +261,7 @@ MipsTargetLowering(MipsTargetMachine &TM)
|
|||
|
||||
bool MipsTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const {
|
||||
MVT::SimpleValueType SVT = VT.getSimpleVT().SimpleTy;
|
||||
|
||||
|
||||
switch (SVT) {
|
||||
case MVT::i64:
|
||||
case MVT::i32:
|
||||
|
@ -458,8 +458,8 @@ static SDValue PerformDivRemCombine(SDNode *N, SelectionDAG& DAG,
|
|||
return SDValue();
|
||||
|
||||
EVT Ty = N->getValueType(0);
|
||||
unsigned LO = (Ty == MVT::i32) ? Mips::LO : Mips::LO64;
|
||||
unsigned HI = (Ty == MVT::i32) ? Mips::HI : Mips::HI64;
|
||||
unsigned LO = (Ty == MVT::i32) ? Mips::LO : Mips::LO64;
|
||||
unsigned HI = (Ty == MVT::i32) ? Mips::HI : Mips::HI64;
|
||||
unsigned opc = N->getOpcode() == ISD::SDIVREM ? MipsISD::DivRem :
|
||||
MipsISD::DivRemU;
|
||||
DebugLoc dl = N->getDebugLoc();
|
||||
|
@ -597,7 +597,7 @@ static SDValue PerformANDCombine(SDNode *N, SelectionDAG& DAG,
|
|||
ConstantSDNode *CN;
|
||||
if (!(CN = dyn_cast<ConstantSDNode>(ShiftRight.getOperand(1))))
|
||||
return SDValue();
|
||||
|
||||
|
||||
uint64_t Pos = CN->getZExtValue();
|
||||
uint64_t SMPos, SMSize;
|
||||
|
||||
|
@ -616,13 +616,13 @@ static SDValue PerformANDCombine(SDNode *N, SelectionDAG& DAG,
|
|||
ShiftRight.getOperand(0), DAG.getConstant(Pos, MVT::i32),
|
||||
DAG.getConstant(SMSize, MVT::i32));
|
||||
}
|
||||
|
||||
|
||||
static SDValue PerformORCombine(SDNode *N, SelectionDAG& DAG,
|
||||
TargetLowering::DAGCombinerInfo &DCI,
|
||||
const MipsSubtarget* Subtarget) {
|
||||
// Pattern match INS.
|
||||
// $dst = or (and $src1 , mask0), (and (shl $src, pos), mask1),
|
||||
// where mask1 = (2**size - 1) << pos, mask0 = ~mask1
|
||||
// where mask1 = (2**size - 1) << pos, mask0 = ~mask1
|
||||
// => ins $dst, $src, size, pos, $src1
|
||||
if (DCI.isBeforeLegalizeOps() || !Subtarget->hasMips32r2())
|
||||
return SDValue();
|
||||
|
@ -642,7 +642,7 @@ static SDValue PerformORCombine(SDNode *N, SelectionDAG& DAG,
|
|||
// See if Op's second operand matches (and (shl $src, pos), mask1).
|
||||
if (And1.getOpcode() != ISD::AND)
|
||||
return SDValue();
|
||||
|
||||
|
||||
if (!(CN = dyn_cast<ConstantSDNode>(And1.getOperand(1))) ||
|
||||
!IsShiftedMask(CN->getZExtValue(), SMPos1, SMSize1))
|
||||
return SDValue();
|
||||
|
@ -661,16 +661,16 @@ static SDValue PerformORCombine(SDNode *N, SelectionDAG& DAG,
|
|||
unsigned Shamt = CN->getZExtValue();
|
||||
|
||||
// Return if the shift amount and the first bit position of mask are not the
|
||||
// same.
|
||||
// same.
|
||||
EVT ValTy = N->getValueType(0);
|
||||
if ((Shamt != SMPos0) || (SMPos0 + SMSize0 > ValTy.getSizeInBits()))
|
||||
return SDValue();
|
||||
|
||||
|
||||
return DAG.getNode(MipsISD::Ins, N->getDebugLoc(), ValTy, Shl.getOperand(0),
|
||||
DAG.getConstant(SMPos0, MVT::i32),
|
||||
DAG.getConstant(SMSize0, MVT::i32), And0.getOperand(0));
|
||||
}
|
||||
|
||||
|
||||
SDValue MipsTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI)
|
||||
const {
|
||||
SelectionDAG &DAG = DCI.DAG;
|
||||
|
@ -1143,7 +1143,7 @@ MipsTargetLowering::EmitAtomicBinaryPartword(MachineInstr *MI,
|
|||
// and newval, incr2, mask
|
||||
BuildMI(BB, dl, TII->get(Mips::AND), NewVal).addReg(Incr2).addReg(Mask);
|
||||
}
|
||||
|
||||
|
||||
BuildMI(BB, dl, TII->get(Mips::AND), MaskedOldVal0)
|
||||
.addReg(OldVal).addReg(Mask2);
|
||||
BuildMI(BB, dl, TII->get(Mips::OR), StoreVal)
|
||||
|
@ -1480,7 +1480,7 @@ SDValue MipsTargetLowering::LowerGlobalAddress(SDValue Op,
|
|||
SelectionDAG &DAG) const {
|
||||
// FIXME there isn't actually debug info here
|
||||
DebugLoc dl = Op.getDebugLoc();
|
||||
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
|
||||
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
|
||||
|
||||
if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !IsN64) {
|
||||
SDVTList VTs = DAG.getVTList(MVT::i32);
|
||||
|
@ -1583,7 +1583,7 @@ LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
|
|||
Entry.Node = Argument;
|
||||
Entry.Ty = PtrTy;
|
||||
Args.push_back(Entry);
|
||||
|
||||
|
||||
std::pair<SDValue, SDValue> CallResult =
|
||||
LowerCallTo(DAG.getEntryNode(), PtrTy,
|
||||
false, false, false, false, 0, CallingConv::C,
|
||||
|
@ -1720,7 +1720,7 @@ SDValue MipsTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
|
|||
return DAG.getStore(Op.getOperand(0), dl, FI, Op.getOperand(1),
|
||||
MachinePointerInfo(SV), false, false, 0);
|
||||
}
|
||||
|
||||
|
||||
// Called if the size of integer registers is large enough to hold the whole
|
||||
// floating point number.
|
||||
static SDValue LowerFCOPYSIGNLargeIntReg(SDValue Op, SelectionDAG &DAG) {
|
||||
|
@ -1779,7 +1779,7 @@ MipsTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
|
|||
|
||||
if (Ty == MVT::f32 || HasMips64)
|
||||
return LowerFCOPYSIGNLargeIntReg(Op, DAG);
|
||||
|
||||
|
||||
return LowerFCOPYSIGNSmallIntReg(Op, DAG, Subtarget->isLittle());
|
||||
}
|
||||
|
||||
|
@ -1943,7 +1943,7 @@ static bool CC_Mips64Byval(unsigned ValNo, MVT ValVT, MVT LocVT,
|
|||
|
||||
assert(Align <= 16 && "Cannot handle alignments larger than 16.");
|
||||
|
||||
// If byval is 16-byte aligned, the first arg register must be even.
|
||||
// If byval is 16-byte aligned, the first arg register must be even.
|
||||
if ((Align == 16) && (FirstIdx % 2)) {
|
||||
State.AllocateReg(Mips64IntRegs[FirstIdx], Mips64DPRegs[FirstIdx]);
|
||||
++FirstIdx;
|
||||
|
@ -1955,10 +1955,10 @@ static bool CC_Mips64Byval(unsigned ValNo, MVT ValVT, MVT LocVT,
|
|||
|
||||
// Allocate space on caller's stack.
|
||||
unsigned Offset = State.AllocateStack(Size, Align);
|
||||
|
||||
|
||||
if (FirstIdx < 8)
|
||||
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Mips64IntRegs[FirstIdx],
|
||||
LocVT, LocInfo));
|
||||
LocVT, LocInfo));
|
||||
else
|
||||
State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
|
||||
|
||||
|
@ -1980,7 +1980,7 @@ AnalyzeMips64CallOperands(CCState &CCInfo,
|
|||
R = CC_MipsN(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo);
|
||||
else
|
||||
R = CC_MipsN_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo);
|
||||
|
||||
|
||||
if (R) {
|
||||
#ifndef NDEBUG
|
||||
dbgs() << "Call operand #" << i << " has unhandled type "
|
||||
|
@ -2065,7 +2065,7 @@ WriteByValArg(SDValue& ByValChain, SDValue Chain, DebugLoc dl,
|
|||
// Read second subword if necessary.
|
||||
if (RemainingSize != 0) {
|
||||
assert(RemainingSize == 1 && "There must be one byte remaining.");
|
||||
LoadPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, Arg,
|
||||
LoadPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, Arg,
|
||||
DAG.getConstant(Offset, MVT::i32));
|
||||
unsigned Alignment = std::min(ByValAlign, (unsigned )2);
|
||||
SDValue Subword = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, Chain,
|
||||
|
@ -2131,7 +2131,7 @@ PassByValArg64(SDValue& ByValChain, SDValue Chain, DebugLoc dl,
|
|||
RegsToPass.push_back(std::make_pair(*Reg, LoadVal));
|
||||
}
|
||||
|
||||
// Return if the struct has been fully copied.
|
||||
// Return if the struct has been fully copied.
|
||||
if (!(MemCpySize = ByValSize - Offset))
|
||||
return;
|
||||
|
||||
|
@ -2146,10 +2146,10 @@ PassByValArg64(SDValue& ByValChain, SDValue Chain, DebugLoc dl,
|
|||
|
||||
if (RemSize < LoadSize)
|
||||
continue;
|
||||
|
||||
|
||||
SDValue LoadPtr = DAG.getNode(ISD::ADD, dl, PtrTy, Arg,
|
||||
DAG.getConstant(Offset, PtrTy));
|
||||
SDValue LoadVal =
|
||||
SDValue LoadVal =
|
||||
DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i64, Chain, LoadPtr,
|
||||
MachinePointerInfo(), MVT::getIntegerVT(LoadSize * 8),
|
||||
false, false, Alignment);
|
||||
|
@ -2160,13 +2160,13 @@ PassByValArg64(SDValue& ByValChain, SDValue Chain, DebugLoc dl,
|
|||
unsigned Shamt = isLittle ? OffsetDW : 64 - (OffsetDW + LoadSize * 8);
|
||||
SDValue Shift = DAG.getNode(ISD::SHL, dl, MVT::i64, LoadVal,
|
||||
DAG.getConstant(Shamt, MVT::i32));
|
||||
|
||||
|
||||
Val = Val.getNode() ? DAG.getNode(ISD::OR, dl, MVT::i64, Val, Shift) :
|
||||
Shift;
|
||||
Offset += LoadSize;
|
||||
Alignment = std::min(Alignment, LoadSize);
|
||||
}
|
||||
|
||||
|
||||
RegsToPass.push_back(std::make_pair(*Reg, Val));
|
||||
return;
|
||||
}
|
||||
|
@ -2286,11 +2286,11 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee,
|
|||
Subtarget->isLittle());
|
||||
else
|
||||
PassByValArg64(ByValChain, Chain, dl, RegsToPass, MemOpChains, LastFI,
|
||||
MFI, DAG, Arg, VA, Flags, getPointerTy(),
|
||||
MFI, DAG, Arg, VA, Flags, getPointerTy(),
|
||||
Subtarget->isLittle());
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
// Promote the value if needed.
|
||||
switch (VA.getLocInfo()) {
|
||||
default: llvm_unreachable("Unknown loc info!");
|
||||
|
@ -2306,7 +2306,7 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee,
|
|||
Arg, DAG.getConstant(1, MVT::i32));
|
||||
if (!Subtarget->isLittle())
|
||||
std::swap(Lo, Hi);
|
||||
unsigned LocRegLo = VA.getLocReg();
|
||||
unsigned LocRegLo = VA.getLocReg();
|
||||
unsigned LocRegHigh = getNextIntArgReg(LocRegLo);
|
||||
RegsToPass.push_back(std::make_pair(LocRegLo, Lo));
|
||||
RegsToPass.push_back(std::make_pair(LocRegHigh, Hi));
|
||||
|
@ -2421,7 +2421,7 @@ MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee,
|
|||
}
|
||||
}
|
||||
|
||||
// T9 should contain the address of the callee function if
|
||||
// T9 should contain the address of the callee function if
|
||||
// -reloction-model=pic or it is an indirect call.
|
||||
if (IsPICCall || !GlobalOrExternal) {
|
||||
// copy to T9
|
||||
|
@ -2562,7 +2562,7 @@ CopyMips64ByValRegs(MachineFunction &MF, SDValue Chain, DebugLoc dl,
|
|||
false, 0);
|
||||
OutChains.push_back(Store);
|
||||
}
|
||||
|
||||
|
||||
return LastFI;
|
||||
}
|
||||
|
||||
|
@ -2931,6 +2931,6 @@ bool MipsTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
|
|||
unsigned MipsTargetLowering::getJumpTableEncoding() const {
|
||||
if (IsN64)
|
||||
return MachineJumpTableInfo::EK_GPRel64BlockAddress;
|
||||
|
||||
|
||||
return TargetLowering::getJumpTableEncoding();
|
||||
}
|
||||
|
|
|
@ -109,7 +109,7 @@ namespace llvm {
|
|||
private:
|
||||
// Subtarget Info
|
||||
const MipsSubtarget *Subtarget;
|
||||
|
||||
|
||||
bool HasMips64, IsN64, IsO32;
|
||||
|
||||
// Lower Operand helpers
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//===- MipsInstrFPU.td - Mips FPU Instruction Information --*- tablegen -*-===//
|
||||
//===-- MipsInstrFPU.td - Mips FPU Instruction Information -*- tablegen -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
|
@ -267,12 +267,12 @@ let Predicates = [HasMips32r2Or64] in {
|
|||
}
|
||||
|
||||
let Predicates = [HasMips32r2, NotMips64] in {
|
||||
def LDXC1 : FPIdxLoad<0x1, "ldxc1", AFGR64, CPURegs, load_a>;
|
||||
def LDXC1 : FPIdxLoad<0x1, "ldxc1", AFGR64, CPURegs, load_a>;
|
||||
def SDXC1 : FPIdxStore<0x9, "sdxc1", AFGR64, CPURegs, store_a>;
|
||||
}
|
||||
|
||||
let Predicates = [HasMips64, NotN64] in {
|
||||
def LDXC164 : FPIdxLoad<0x1, "ldxc1", FGR64, CPURegs, load_a>;
|
||||
def LDXC164 : FPIdxLoad<0x1, "ldxc1", FGR64, CPURegs, load_a>;
|
||||
def SDXC164 : FPIdxStore<0x9, "sdxc1", FGR64, CPURegs, store_a>;
|
||||
}
|
||||
|
||||
|
@ -280,7 +280,7 @@ let Predicates = [HasMips64, NotN64] in {
|
|||
let Predicates = [IsN64] in {
|
||||
def LWXC1_P8 : FPIdxLoad<0x0, "lwxc1", FGR32, CPU64Regs, load_a>;
|
||||
def LUXC1_P8 : FPIdxLoad<0x5, "luxc1", FGR32, CPU64Regs, load_u>;
|
||||
def LDXC164_P8 : FPIdxLoad<0x1, "ldxc1", FGR64, CPU64Regs, load_a>;
|
||||
def LDXC164_P8 : FPIdxLoad<0x1, "ldxc1", FGR64, CPU64Regs, load_a>;
|
||||
def SWXC1_P8 : FPIdxStore<0x8, "swxc1", FGR32, CPU64Regs, store_a>;
|
||||
def SUXC1_P8 : FPIdxStore<0xd, "suxc1", FGR32, CPU64Regs, store_u>;
|
||||
def SDXC164_P8 : FPIdxStore<0x9, "sdxc1", FGR64, CPU64Regs, store_a>;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//===- MipsInstrFormats.td - Mips Instruction Formats ------*- tablegen -*-===//
|
||||
//===-- MipsInstrFormats.td - Mips Instruction Formats -----*- tablegen -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
|
|
|
@ -32,7 +32,7 @@ MipsInstrInfo::MipsInstrInfo(MipsTargetMachine &tm)
|
|||
RI(*TM.getSubtargetImpl(), *this),
|
||||
UncondBrOpc(TM.getRelocationModel() == Reloc::PIC_ ? Mips::B : Mips::J) {}
|
||||
|
||||
const MipsRegisterInfo &MipsInstrInfo::getRegisterInfo() const {
|
||||
const MipsRegisterInfo &MipsInstrInfo::getRegisterInfo() const {
|
||||
return RI;
|
||||
}
|
||||
|
||||
|
@ -157,7 +157,7 @@ copyPhysReg(MachineBasicBlock &MBB,
|
|||
assert(Opc && "Cannot copy registers");
|
||||
|
||||
MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc));
|
||||
|
||||
|
||||
if (DestReg)
|
||||
MIB.addReg(DestReg, RegState::Define);
|
||||
|
||||
|
@ -173,7 +173,7 @@ static MachineMemOperand* GetMemOperand(MachineBasicBlock &MBB, int FI,
|
|||
MachineFunction &MF = *MBB.getParent();
|
||||
MachineFrameInfo &MFI = *MF.getFrameInfo();
|
||||
unsigned Align = MFI.getObjectAlignment(FI);
|
||||
|
||||
|
||||
return MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), Flag,
|
||||
MFI.getObjectSize(FI), Align);
|
||||
}
|
||||
|
@ -283,7 +283,7 @@ static void AnalyzeCondBr(const MachineInstr* Inst, unsigned Opc,
|
|||
SmallVectorImpl<MachineOperand>& Cond) {
|
||||
assert(GetAnalyzableBrOpc(Opc) && "Not an analyzable branch");
|
||||
int NumOp = Inst->getNumExplicitOperands();
|
||||
|
||||
|
||||
// for both int and fp branches, the last explicit operand is the
|
||||
// MBB.
|
||||
BB = Inst->getOperand(NumOp-1).getMBB();
|
||||
|
@ -371,8 +371,8 @@ bool MipsInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
|
|||
FBB = LastInst->getOperand(0).getMBB();
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void MipsInstrInfo::BuildCondBr(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock *TBB, DebugLoc DL,
|
||||
const SmallVectorImpl<MachineOperand>& Cond)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//===- MipsInstrInfo.h - Mips Instruction Information -----------*- C++ -*-===//
|
||||
//===-- MipsInstrInfo.h - Mips Instruction Information ----------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
|
|
|
@ -103,7 +103,7 @@ def MipsDivRemU : SDNode<"MipsISD::DivRemU", SDT_MipsDivRem,
|
|||
// target constant nodes that would otherwise remain unchanged with ADDiu
|
||||
// nodes. Without these wrapper node patterns, the following conditional move
|
||||
// instrucion is emitted when function cmov2 in test/CodeGen/Mips/cmov.ll is
|
||||
// compiled:
|
||||
// compiled:
|
||||
// movn %got(d)($gp), %got(c)($gp), $4
|
||||
// This instruction is illegal since movn can take only register operands.
|
||||
|
||||
|
@ -407,7 +407,7 @@ multiclass LoadM32<bits<6> op, string instr_asm, PatFrag OpNode,
|
|||
Requires<[NotN64]>;
|
||||
def _P8 : LoadM<op, instr_asm, OpNode, CPURegs, mem64, Pseudo>,
|
||||
Requires<[IsN64]>;
|
||||
}
|
||||
}
|
||||
|
||||
// 64-bit load.
|
||||
multiclass LoadM64<bits<6> op, string instr_asm, PatFrag OpNode,
|
||||
|
@ -416,7 +416,7 @@ multiclass LoadM64<bits<6> op, string instr_asm, PatFrag OpNode,
|
|||
Requires<[NotN64]>;
|
||||
def _P8 : LoadM<op, instr_asm, OpNode, CPU64Regs, mem64, Pseudo>,
|
||||
Requires<[IsN64]>;
|
||||
}
|
||||
}
|
||||
|
||||
// 32-bit load.
|
||||
multiclass LoadUnAlign32<bits<6> op> {
|
||||
|
@ -497,7 +497,7 @@ class JumpFJ<bits<6> op, string instr_asm>:
|
|||
let isTerminator=1;
|
||||
let isBarrier=1;
|
||||
let hasDelaySlot = 1;
|
||||
let Predicates = [RelocStatic];
|
||||
let Predicates = [RelocStatic];
|
||||
}
|
||||
|
||||
// Unconditional branch
|
||||
|
@ -510,7 +510,7 @@ class UncondBranch<bits<6> op, string instr_asm>:
|
|||
let isTerminator = 1;
|
||||
let isBarrier = 1;
|
||||
let hasDelaySlot = 1;
|
||||
let Predicates = [RelocPIC];
|
||||
let Predicates = [RelocPIC];
|
||||
}
|
||||
|
||||
let isBranch=1, isTerminator=1, isBarrier=1, rd=0, hasDelaySlot = 1,
|
||||
|
@ -647,7 +647,7 @@ class ReadHardware<RegisterClass CPURegClass, RegisterClass HWRegClass>
|
|||
|
||||
// Ext and Ins
|
||||
class ExtBase<bits<6> _funct, string instr_asm, RegisterClass RC>:
|
||||
FR<0x1f, _funct, (outs RC:$rt), (ins RC:$rs, uimm16:$pos, size_ext:$sz),
|
||||
FR<0x1f, _funct, (outs RC:$rt), (ins RC:$rs, uimm16:$pos, size_ext:$sz),
|
||||
!strconcat(instr_asm, " $rt, $rs, $pos, $sz"),
|
||||
[(set RC:$rt, (MipsExt RC:$rs, imm:$pos, imm:$sz))], NoItinerary> {
|
||||
bits<5> pos;
|
||||
|
@ -1133,7 +1133,7 @@ defm : SetgeImmPats<CPURegs, SLTi, SLTiu>;
|
|||
def : Pat<(MipsDynAlloc addr:$f), (DynAlloc addr:$f)>;
|
||||
|
||||
// bswap pattern
|
||||
def : Pat<(bswap CPURegs:$rt), (ROTR (WSBH CPURegs:$rt), 16)>;
|
||||
def : Pat<(bswap CPURegs:$rt), (ROTR (WSBH CPURegs:$rt), 16)>;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Floating Point Support
|
||||
|
|
|
@ -89,7 +89,7 @@ MCOperand MipsMCInstLower::LowerSymbolOperand(const MachineOperand &MO,
|
|||
default:
|
||||
llvm_unreachable("<unknown operand type>");
|
||||
}
|
||||
|
||||
|
||||
const MCSymbolRefExpr *MCSym = MCSymbolRefExpr::Create(Symbol, Kind, Ctx);
|
||||
|
||||
if (!Offset)
|
||||
|
@ -97,7 +97,7 @@ MCOperand MipsMCInstLower::LowerSymbolOperand(const MachineOperand &MO,
|
|||
|
||||
// Assume offset is never negative.
|
||||
assert(Offset > 0);
|
||||
|
||||
|
||||
const MCConstantExpr *OffsetExpr = MCConstantExpr::Create(Offset, Ctx);
|
||||
const MCBinaryExpr *AddExpr = MCBinaryExpr::CreateAdd(MCSym, OffsetExpr, Ctx);
|
||||
return MCOperand::CreateExpr(AddExpr);
|
||||
|
@ -148,7 +148,7 @@ void MipsMCInstLower::LowerCPRESTORE(const MachineInstr *MI,
|
|||
MCInst Sw;
|
||||
|
||||
if (Offset >= 0x8000) {
|
||||
unsigned Hi = (Offset >> 16) + ((Offset & 0x8000) != 0);
|
||||
unsigned Hi = (Offset >> 16) + ((Offset & 0x8000) != 0);
|
||||
Offset &= 0xffff;
|
||||
Reg = Mips::AT;
|
||||
|
||||
|
@ -163,7 +163,7 @@ void MipsMCInstLower::LowerCPRESTORE(const MachineInstr *MI,
|
|||
MCInsts[1].addOperand(MCOperand::CreateReg(Mips::AT));
|
||||
MCInsts[1].addOperand(MCOperand::CreateReg(Mips::SP));
|
||||
}
|
||||
|
||||
|
||||
Sw.setOpcode(Mips::SW);
|
||||
Sw.addOperand(MCOperand::CreateReg(Mips::GP));
|
||||
Sw.addOperand(MCOperand::CreateReg(Reg));
|
||||
|
@ -172,9 +172,9 @@ void MipsMCInstLower::LowerCPRESTORE(const MachineInstr *MI,
|
|||
}
|
||||
|
||||
MCOperand MipsMCInstLower::LowerOperand(const MachineOperand& MO,
|
||||
unsigned offset) const {
|
||||
unsigned offset) const {
|
||||
MachineOperandType MOTy = MO.getType();
|
||||
|
||||
|
||||
switch (MOTy) {
|
||||
default: llvm_unreachable("unknown operand type");
|
||||
case MachineOperand::MO_Register:
|
||||
|
@ -199,7 +199,7 @@ MCOperand MipsMCInstLower::LowerOperand(const MachineOperand& MO,
|
|||
|
||||
void MipsMCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
|
||||
OutMI.setOpcode(MI->getOpcode());
|
||||
|
||||
|
||||
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
||||
const MachineOperand &MO = MI->getOperand(i);
|
||||
MCOperand MCOp = LowerOperand(MO);
|
||||
|
@ -210,8 +210,8 @@ void MipsMCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
|
|||
}
|
||||
|
||||
void MipsMCInstLower::LowerUnalignedLoadStore(const MachineInstr *MI,
|
||||
SmallVector<MCInst,
|
||||
4>& MCInsts) {
|
||||
SmallVector<MCInst,
|
||||
4>& MCInsts) {
|
||||
unsigned Opc = MI->getOpcode();
|
||||
MCInst Instr1, Instr2, Instr3, Move;
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ namespace llvm {
|
|||
class MachineFunction;
|
||||
class Mangler;
|
||||
class MipsAsmPrinter;
|
||||
|
||||
|
||||
/// MipsMCInstLower - This class is used to lower an MachineInstr into an
|
||||
// MCInst.
|
||||
class LLVM_LIBRARY_VISIBILITY MipsMCInstLower {
|
||||
|
@ -33,10 +33,10 @@ class LLVM_LIBRARY_VISIBILITY MipsMCInstLower {
|
|||
MipsAsmPrinter &AsmPrinter;
|
||||
public:
|
||||
MipsMCInstLower(Mangler *mang, const MachineFunction &MF,
|
||||
MipsAsmPrinter &asmprinter);
|
||||
MipsAsmPrinter &asmprinter);
|
||||
void Lower(const MachineInstr *MI, MCInst &OutMI) const;
|
||||
void LowerCPLOAD(const MachineInstr *MI, SmallVector<MCInst, 4>& MCInsts);
|
||||
void LowerCPRESTORE(const MachineInstr *MI, SmallVector<MCInst, 4>& MCInsts);
|
||||
void LowerCPRESTORE(const MachineInstr *MI, SmallVector<MCInst, 4>& MCInsts);
|
||||
void LowerUnalignedLoadStore(const MachineInstr *MI,
|
||||
SmallVector<MCInst, 4>& MCInsts);
|
||||
void LowerSETGP01(const MachineInstr *MI, SmallVector<MCInst, 4>& MCInsts);
|
||||
|
|
|
@ -43,7 +43,7 @@ unsigned MipsFunctionInfo::getGlobalBaseReg() {
|
|||
const TargetRegisterClass *RC;
|
||||
RC = ST.isABI_N64() ?
|
||||
Mips::CPU64RegsRegisterClass : Mips::CPURegsRegisterClass;
|
||||
|
||||
|
||||
return GlobalBaseReg = MF.getRegInfo().createVirtualRegister(RC);
|
||||
}
|
||||
|
||||
|
|
|
@ -44,10 +44,10 @@ class MipsFunctionInfo : public MachineFunctionInfo {
|
|||
// InArgFIRange: Range of indices of all frame objects created during call to
|
||||
// LowerFormalArguments.
|
||||
// OutArgFIRange: Range of indices of all frame objects created during call to
|
||||
// LowerCall except for the frame object for restoring $gp.
|
||||
// LowerCall except for the frame object for restoring $gp.
|
||||
std::pair<int, int> InArgFIRange, OutArgFIRange;
|
||||
int GPFI; // Index of the frame object for restoring $gp
|
||||
mutable int DynAllocFI; // Frame index of dynamically allocated stack area.
|
||||
int GPFI; // Index of the frame object for restoring $gp
|
||||
mutable int DynAllocFI; // Frame index of dynamically allocated stack area.
|
||||
unsigned MaxCallFrameSize;
|
||||
|
||||
public:
|
||||
|
@ -63,7 +63,7 @@ public:
|
|||
}
|
||||
void setLastInArgFI(int FI) { InArgFIRange.second = FI; }
|
||||
|
||||
bool isOutArgFI(int FI) const {
|
||||
bool isOutArgFI(int FI) const {
|
||||
return FI <= OutArgFIRange.first && FI >= OutArgFIRange.second;
|
||||
}
|
||||
void extendOutArgFIRange(int FirstFI, int LastFI) {
|
||||
|
|
|
@ -92,20 +92,20 @@ getCalleeSavedRegs(const MachineFunction *MF) const
|
|||
return Mips32CalleeSavedRegs;
|
||||
else if (Subtarget.isABI_N32())
|
||||
return N32CalleeSavedRegs;
|
||||
|
||||
|
||||
assert(Subtarget.isABI_N64());
|
||||
return N64CalleeSavedRegs;
|
||||
return N64CalleeSavedRegs;
|
||||
}
|
||||
|
||||
BitVector MipsRegisterInfo::
|
||||
getReservedRegs(const MachineFunction &MF) const {
|
||||
static const unsigned ReservedCPURegs[] = {
|
||||
Mips::ZERO, Mips::AT, Mips::K0, Mips::K1,
|
||||
Mips::ZERO, Mips::AT, Mips::K0, Mips::K1,
|
||||
Mips::SP, Mips::FP, Mips::RA
|
||||
};
|
||||
|
||||
static const unsigned ReservedCPU64Regs[] = {
|
||||
Mips::ZERO_64, Mips::AT_64, Mips::K0_64, Mips::K1_64,
|
||||
Mips::ZERO_64, Mips::AT_64, Mips::K0_64, Mips::K1_64,
|
||||
Mips::SP_64, Mips::FP_64, Mips::RA_64
|
||||
};
|
||||
|
||||
|
@ -134,8 +134,8 @@ getReservedRegs(const MachineFunction &MF) const {
|
|||
Reg != Mips::FGR64RegisterClass->end(); ++Reg)
|
||||
Reserved.set(*Reg);
|
||||
}
|
||||
|
||||
// If GP is dedicated as a global base register, reserve it.
|
||||
|
||||
// If GP is dedicated as a global base register, reserve it.
|
||||
if (MF.getInfo<MipsFunctionInfo>()->globalBaseRegFixed()) {
|
||||
Reserved.set(Mips::GP);
|
||||
Reserved.set(Mips::GP_64);
|
||||
|
@ -195,7 +195,7 @@ eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
|
|||
// 1. Outgoing arguments.
|
||||
// 2. Pointer to dynamically allocated stack space.
|
||||
// 3. Locations for callee-saved registers.
|
||||
// Everything else is referenced relative to whatever register
|
||||
// Everything else is referenced relative to whatever register
|
||||
// getFrameRegister() returns.
|
||||
unsigned FrameReg;
|
||||
|
||||
|
@ -203,15 +203,15 @@ eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
|
|||
(FrameIndex >= MinCSFI && FrameIndex <= MaxCSFI))
|
||||
FrameReg = Subtarget.isABI_N64() ? Mips::SP_64 : Mips::SP;
|
||||
else
|
||||
FrameReg = getFrameRegister(MF);
|
||||
|
||||
FrameReg = getFrameRegister(MF);
|
||||
|
||||
// Calculate final offset.
|
||||
// - There is no need to change the offset if the frame object is one of the
|
||||
// following: an outgoing argument, pointer to a dynamically allocated
|
||||
// stack space or a $gp restore location,
|
||||
// - If the frame object is any of the following, its offset must be adjusted
|
||||
// by adding the size of the stack:
|
||||
// incoming argument, callee-saved register location or local variable.
|
||||
// incoming argument, callee-saved register location or local variable.
|
||||
int64_t Offset;
|
||||
|
||||
if (MipsFI->isOutArgFI(FrameIndex) || MipsFI->isGPFI(FrameIndex) ||
|
||||
|
@ -225,7 +225,7 @@ eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
|
|||
DEBUG(errs() << "Offset : " << Offset << "\n" << "<--------->\n");
|
||||
|
||||
// If MI is not a debug value, make sure Offset fits in the 16-bit immediate
|
||||
// field.
|
||||
// field.
|
||||
if (!MI.isDebugValue() && !isInt<16>(Offset)) {
|
||||
MachineBasicBlock &MBB = *MI.getParent();
|
||||
DebugLoc DL = II->getDebugLoc();
|
||||
|
@ -233,7 +233,7 @@ eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
|
|||
unsigned Size = Subtarget.isABI_N64() ? 64 : 32;
|
||||
unsigned LUi = Subtarget.isABI_N64() ? Mips::LUi64 : Mips::LUi;
|
||||
unsigned ADDu = Subtarget.isABI_N64() ? Mips::DADDu : Mips::ADDu;
|
||||
unsigned ZEROReg = Subtarget.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO;
|
||||
unsigned ZEROReg = Subtarget.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO;
|
||||
unsigned ATReg = Subtarget.isABI_N64() ? Mips::AT_64 : Mips::AT;
|
||||
const MipsAnalyzeImmediate::InstSeq &Seq =
|
||||
AnalyzeImm.Analyze(Offset, Size, true /* LastInstrIsADDiu */);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//===- MipsRegisterInfo.h - Mips Register Information Impl ------*- C++ -*-===//
|
||||
//===-- MipsRegisterInfo.h - Mips Register Information Impl -----*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
|
|
|
@ -1,16 +1,16 @@
|
|||
//===- MipsRelocations.h - Mips Code Relocations ---------------*- C++ -*-===//
|
||||
//===-- MipsRelocations.h - Mips Code Relocations ---------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===---------------------------------------------------------------------===//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines the Mips target-specific relocation types
|
||||
// (for relocation-model=static).
|
||||
//
|
||||
//===---------------------------------------------------------------------===//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef MIPSRELOCATIONS_H_
|
||||
#define MIPSRELOCATIONS_H_
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//===- MipsSchedule.td - Mips Scheduling Definitions -------*- tablegen -*-===//
|
||||
//===-- MipsSchedule.td - Mips Scheduling Definitions ------*- tablegen -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
|
|
|
@ -26,7 +26,7 @@ void MipsSubtarget::anchor() { }
|
|||
MipsSubtarget::MipsSubtarget(const std::string &TT, const std::string &CPU,
|
||||
const std::string &FS, bool little) :
|
||||
MipsGenSubtargetInfo(TT, CPU, FS),
|
||||
MipsArchVersion(Mips32), MipsABI(UnknownABI), IsLittle(little),
|
||||
MipsArchVersion(Mips32), MipsABI(UnknownABI), IsLittle(little),
|
||||
IsSingleFloat(false), IsFP64bit(false), IsGP64bit(false), HasVFPU(false),
|
||||
IsLinux(true), HasSEInReg(false), HasCondMov(false), HasMulDivAdd(false),
|
||||
HasMinMax(false), HasSwap(false), HasBitCount(false)
|
||||
|
@ -43,7 +43,7 @@ MipsSubtarget::MipsSubtarget(const std::string &TT, const std::string &CPU,
|
|||
|
||||
// Set MipsABI if it hasn't been set yet.
|
||||
if (MipsABI == UnknownABI)
|
||||
MipsABI = hasMips64() ? N64 : O32;
|
||||
MipsABI = hasMips64() ? N64 : O32;
|
||||
|
||||
// Check if Architecture and ABI are compatible.
|
||||
assert(((!hasMips64() && (isABI_O32() || isABI_EABI())) ||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//=====-- MipsSubtarget.h - Define Subtarget for the Mips -----*- C++ -*--====//
|
||||
//===-- MipsSubtarget.h - Define Subtarget for the Mips ---------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
|
|
Loading…
Reference in New Issue