2012-02-18 20:03:15 +08:00
|
|
|
//===-- ARMBaseInstrInfo.cpp - ARM Instruction Information ----------------===//
|
2009-07-09 00:09:28 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file contains the Base ARM implementation of the TargetInstrInfo class.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "ARMBaseInstrInfo.h"
|
|
|
|
#include "ARM.h"
|
2009-11-07 12:04:34 +08:00
|
|
|
#include "ARMConstantPoolValue.h"
|
2010-12-06 06:04:16 +08:00
|
|
|
#include "ARMHazardRecognizer.h"
|
2009-07-09 00:09:28 +08:00
|
|
|
#include "ARMMachineFunctionInfo.h"
|
2009-11-02 08:10:38 +08:00
|
|
|
#include "ARMRegisterInfo.h"
|
2011-07-21 07:34:39 +08:00
|
|
|
#include "MCTargetDesc/ARMAddressingModes.h"
|
2009-11-08 08:15:23 +08:00
|
|
|
#include "llvm/Constants.h"
|
|
|
|
#include "llvm/Function.h"
|
|
|
|
#include "llvm/GlobalValue.h"
|
2009-07-09 00:09:28 +08:00
|
|
|
#include "llvm/CodeGen/LiveVariables.h"
|
2009-11-07 12:04:34 +08:00
|
|
|
#include "llvm/CodeGen/MachineConstantPool.h"
|
2009-07-09 00:09:28 +08:00
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
|
|
|
#include "llvm/CodeGen/MachineJumpTableInfo.h"
|
2009-10-07 08:06:35 +08:00
|
|
|
#include "llvm/CodeGen/MachineMemOperand.h"
|
2010-05-22 09:47:14 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2011-07-21 07:34:39 +08:00
|
|
|
#include "llvm/CodeGen/SelectionDAGNodes.h"
|
2009-08-23 04:48:53 +08:00
|
|
|
#include "llvm/MC/MCAsmInfo.h"
|
2011-07-10 10:58:07 +08:00
|
|
|
#include "llvm/Support/BranchProbability.h"
|
2009-07-09 00:09:28 +08:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2009-11-02 08:10:38 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2009-07-12 04:10:48 +08:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
When we look at instructions to convert to setting the 's' flag, we need to look
at more than those which define CPSR. You can have this situation:
(1) subs ...
(2) sub r6, r5, r4
(3) movge ...
(4) cmp r6, 0
(5) movge ...
We cannot convert (2) to "subs" because (3) is using the CPSR set by
(1). There's an analogous situation here:
(1) sub r1, r2, r3
(2) sub r4, r5, r6
(3) cmp r4, ...
(5) movge ...
(6) cmp r1, ...
(7) movge ...
We cannot convert (1) to "subs" because of the intervening use of CPSR.
llvm-svn: 117950
2010-11-02 04:41:43 +08:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2011-06-29 04:07:07 +08:00
|
|
|
|
2011-07-02 01:57:27 +08:00
|
|
|
#define GET_INSTRINFO_CTOR
|
2011-06-29 04:07:07 +08:00
|
|
|
#include "ARMGenInstrInfo.inc"
|
|
|
|
|
2009-07-09 00:09:28 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
static cl::opt<bool>
|
|
|
|
EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
|
|
|
|
cl::desc("Enable ARM 2-addr to 3-addr conv"));
|
|
|
|
|
2011-09-01 01:00:02 +08:00
|
|
|
static cl::opt<bool>
|
2011-11-16 07:53:18 +08:00
|
|
|
WidenVMOVS("widen-vmovs", cl::Hidden, cl::init(true),
|
2011-09-01 01:00:02 +08:00
|
|
|
cl::desc("Widen ARM vmovs to vmovd when possible"));
|
|
|
|
|
2010-12-06 06:04:16 +08:00
|
|
|
/// ARM_MLxEntry - Record information about MLA / MLS instructions.
|
|
|
|
struct ARM_MLxEntry {
|
|
|
|
unsigned MLxOpc; // MLA / MLS opcode
|
|
|
|
unsigned MulOpc; // Expanded multiplication opcode
|
|
|
|
unsigned AddSubOpc; // Expanded add / sub opcode
|
|
|
|
bool NegAcc; // True if the acc is negated before the add / sub.
|
|
|
|
bool HasLane; // True if instruction has an extra "lane" operand.
|
|
|
|
};
|
|
|
|
|
|
|
|
static const ARM_MLxEntry ARM_MLxTable[] = {
|
|
|
|
// MLxOpc, MulOpc, AddSubOpc, NegAcc, HasLane
|
|
|
|
// fp scalar ops
|
|
|
|
{ ARM::VMLAS, ARM::VMULS, ARM::VADDS, false, false },
|
|
|
|
{ ARM::VMLSS, ARM::VMULS, ARM::VSUBS, false, false },
|
|
|
|
{ ARM::VMLAD, ARM::VMULD, ARM::VADDD, false, false },
|
|
|
|
{ ARM::VMLSD, ARM::VMULD, ARM::VSUBD, false, false },
|
|
|
|
{ ARM::VNMLAS, ARM::VNMULS, ARM::VSUBS, true, false },
|
|
|
|
{ ARM::VNMLSS, ARM::VMULS, ARM::VSUBS, true, false },
|
|
|
|
{ ARM::VNMLAD, ARM::VNMULD, ARM::VSUBD, true, false },
|
|
|
|
{ ARM::VNMLSD, ARM::VMULD, ARM::VSUBD, true, false },
|
|
|
|
|
|
|
|
// fp SIMD ops
|
|
|
|
{ ARM::VMLAfd, ARM::VMULfd, ARM::VADDfd, false, false },
|
|
|
|
{ ARM::VMLSfd, ARM::VMULfd, ARM::VSUBfd, false, false },
|
|
|
|
{ ARM::VMLAfq, ARM::VMULfq, ARM::VADDfq, false, false },
|
|
|
|
{ ARM::VMLSfq, ARM::VMULfq, ARM::VSUBfq, false, false },
|
|
|
|
{ ARM::VMLAslfd, ARM::VMULslfd, ARM::VADDfd, false, true },
|
|
|
|
{ ARM::VMLSslfd, ARM::VMULslfd, ARM::VSUBfd, false, true },
|
|
|
|
{ ARM::VMLAslfq, ARM::VMULslfq, ARM::VADDfq, false, true },
|
|
|
|
{ ARM::VMLSslfq, ARM::VMULslfq, ARM::VSUBfq, false, true },
|
|
|
|
};
|
|
|
|
|
2009-11-02 08:10:38 +08:00
|
|
|
ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI)
|
2011-07-02 01:57:27 +08:00
|
|
|
: ARMGenInstrInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP),
|
2009-11-02 08:10:38 +08:00
|
|
|
Subtarget(STI) {
|
2010-12-06 06:04:16 +08:00
|
|
|
for (unsigned i = 0, e = array_lengthof(ARM_MLxTable); i != e; ++i) {
|
|
|
|
if (!MLxEntryMap.insert(std::make_pair(ARM_MLxTable[i].MLxOpc, i)).second)
|
|
|
|
assert(false && "Duplicated entries?");
|
|
|
|
MLxHazardOpcodes.insert(ARM_MLxTable[i].AddSubOpc);
|
|
|
|
MLxHazardOpcodes.insert(ARM_MLxTable[i].MulOpc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Various bits of framework needed for precise machine-level selection
DAG scheduling during isel. Most new functionality is currently
guarded by -enable-sched-cycles and -enable-sched-hazard.
Added InstrItineraryData::IssueWidth field, currently derived from
ARM itineraries, but could be initialized differently on other targets.
Added ScheduleHazardRecognizer::MaxLookAhead to indicate whether it is
active, and if so how many cycles of state it holds.
Added SchedulingPriorityQueue::HasReadyFilter to allowing gating entry
into the scheduler's available queue.
ScoreboardHazardRecognizer now accesses the ScheduleDAG in order to
get information about it's SUnits, provides RecedeCycle for bottom-up
scheduling, correctly computes scoreboard depth, tracks IssueCount, and
considers potential stall cycles when checking for hazards.
ScheduleDAGRRList now models machine cycles and hazards (under
flags). It tracks MinAvailableCycle, drives the hazard recognizer and
priority queue's ready filter, manages a new PendingQueue, properly
accounts for stall cycles, etc.
llvm-svn: 122541
2010-12-24 13:03:26 +08:00
|
|
|
// Use a ScoreboardHazardRecognizer for prepass ARM scheduling. TargetInstrImpl
|
|
|
|
// currently defaults to no prepass hazard recognizer.
|
2010-12-06 06:04:16 +08:00
|
|
|
ScheduleHazardRecognizer *ARMBaseInstrInfo::
|
Various bits of framework needed for precise machine-level selection
DAG scheduling during isel. Most new functionality is currently
guarded by -enable-sched-cycles and -enable-sched-hazard.
Added InstrItineraryData::IssueWidth field, currently derived from
ARM itineraries, but could be initialized differently on other targets.
Added ScheduleHazardRecognizer::MaxLookAhead to indicate whether it is
active, and if so how many cycles of state it holds.
Added SchedulingPriorityQueue::HasReadyFilter to allowing gating entry
into the scheduler's available queue.
ScoreboardHazardRecognizer now accesses the ScheduleDAG in order to
get information about it's SUnits, provides RecedeCycle for bottom-up
scheduling, correctly computes scoreboard depth, tracks IssueCount, and
considers potential stall cycles when checking for hazards.
ScheduleDAGRRList now models machine cycles and hazards (under
flags). It tracks MinAvailableCycle, drives the hazard recognizer and
priority queue's ready filter, manages a new PendingQueue, properly
accounts for stall cycles, etc.
llvm-svn: 122541
2010-12-24 13:03:26 +08:00
|
|
|
CreateTargetHazardRecognizer(const TargetMachine *TM,
|
|
|
|
const ScheduleDAG *DAG) const {
|
2011-01-21 13:51:33 +08:00
|
|
|
if (usePreRAHazardRecognizer()) {
|
Various bits of framework needed for precise machine-level selection
DAG scheduling during isel. Most new functionality is currently
guarded by -enable-sched-cycles and -enable-sched-hazard.
Added InstrItineraryData::IssueWidth field, currently derived from
ARM itineraries, but could be initialized differently on other targets.
Added ScheduleHazardRecognizer::MaxLookAhead to indicate whether it is
active, and if so how many cycles of state it holds.
Added SchedulingPriorityQueue::HasReadyFilter to allowing gating entry
into the scheduler's available queue.
ScoreboardHazardRecognizer now accesses the ScheduleDAG in order to
get information about it's SUnits, provides RecedeCycle for bottom-up
scheduling, correctly computes scoreboard depth, tracks IssueCount, and
considers potential stall cycles when checking for hazards.
ScheduleDAGRRList now models machine cycles and hazards (under
flags). It tracks MinAvailableCycle, drives the hazard recognizer and
priority queue's ready filter, manages a new PendingQueue, properly
accounts for stall cycles, etc.
llvm-svn: 122541
2010-12-24 13:03:26 +08:00
|
|
|
const InstrItineraryData *II = TM->getInstrItineraryData();
|
|
|
|
return new ScoreboardHazardRecognizer(II, DAG, "pre-RA-sched");
|
|
|
|
}
|
|
|
|
return TargetInstrInfoImpl::CreateTargetHazardRecognizer(TM, DAG);
|
|
|
|
}
|
|
|
|
|
|
|
|
ScheduleHazardRecognizer *ARMBaseInstrInfo::
|
|
|
|
CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
|
|
|
|
const ScheduleDAG *DAG) const {
|
2010-12-06 06:04:16 +08:00
|
|
|
if (Subtarget.isThumb2() || Subtarget.hasVFP2())
|
|
|
|
return (ScheduleHazardRecognizer *)
|
Various bits of framework needed for precise machine-level selection
DAG scheduling during isel. Most new functionality is currently
guarded by -enable-sched-cycles and -enable-sched-hazard.
Added InstrItineraryData::IssueWidth field, currently derived from
ARM itineraries, but could be initialized differently on other targets.
Added ScheduleHazardRecognizer::MaxLookAhead to indicate whether it is
active, and if so how many cycles of state it holds.
Added SchedulingPriorityQueue::HasReadyFilter to allowing gating entry
into the scheduler's available queue.
ScoreboardHazardRecognizer now accesses the ScheduleDAG in order to
get information about it's SUnits, provides RecedeCycle for bottom-up
scheduling, correctly computes scoreboard depth, tracks IssueCount, and
considers potential stall cycles when checking for hazards.
ScheduleDAGRRList now models machine cycles and hazards (under
flags). It tracks MinAvailableCycle, drives the hazard recognizer and
priority queue's ready filter, manages a new PendingQueue, properly
accounts for stall cycles, etc.
llvm-svn: 122541
2010-12-24 13:03:26 +08:00
|
|
|
new ARMHazardRecognizer(II, *this, getRegisterInfo(), Subtarget, DAG);
|
|
|
|
return TargetInstrInfoImpl::CreateTargetPostRAHazardRecognizer(II, DAG);
|
2009-07-09 00:09:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
MachineInstr *
|
|
|
|
ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
|
|
|
|
MachineBasicBlock::iterator &MBBI,
|
|
|
|
LiveVariables *LV) const {
|
2009-07-28 02:44:00 +08:00
|
|
|
// FIXME: Thumb2 support.
|
|
|
|
|
2009-07-09 00:09:28 +08:00
|
|
|
if (!EnableARM3Addr)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
MachineInstr *MI = MBBI;
|
|
|
|
MachineFunction &MF = *MI->getParent()->getParent();
|
2010-06-09 06:51:23 +08:00
|
|
|
uint64_t TSFlags = MI->getDesc().TSFlags;
|
2009-07-09 00:09:28 +08:00
|
|
|
bool isPre = false;
|
|
|
|
switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
|
|
|
|
default: return NULL;
|
|
|
|
case ARMII::IndexModePre:
|
|
|
|
isPre = true;
|
|
|
|
break;
|
|
|
|
case ARMII::IndexModePost:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try splitting an indexed load/store to an un-indexed one plus an add/sub
|
|
|
|
// operation.
|
|
|
|
unsigned MemOpc = getUnindexedOpcode(MI->getOpcode());
|
|
|
|
if (MemOpc == 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
MachineInstr *UpdateMI = NULL;
|
|
|
|
MachineInstr *MemMI = NULL;
|
|
|
|
unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
|
2011-06-29 03:10:37 +08:00
|
|
|
const MCInstrDesc &MCID = MI->getDesc();
|
|
|
|
unsigned NumOps = MCID.getNumOperands();
|
2011-12-07 15:15:52 +08:00
|
|
|
bool isLoad = !MI->mayStore();
|
2009-07-09 00:09:28 +08:00
|
|
|
const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0);
|
|
|
|
const MachineOperand &Base = MI->getOperand(2);
|
|
|
|
const MachineOperand &Offset = MI->getOperand(NumOps-3);
|
|
|
|
unsigned WBReg = WB.getReg();
|
|
|
|
unsigned BaseReg = Base.getReg();
|
|
|
|
unsigned OffReg = Offset.getReg();
|
|
|
|
unsigned OffImm = MI->getOperand(NumOps-2).getImm();
|
|
|
|
ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm();
|
|
|
|
switch (AddrMode) {
|
2012-02-07 10:50:20 +08:00
|
|
|
default: llvm_unreachable("Unknown indexed op!");
|
2009-07-09 00:09:28 +08:00
|
|
|
case ARMII::AddrMode2: {
|
|
|
|
bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
|
|
|
|
unsigned Amt = ARM_AM::getAM2Offset(OffImm);
|
|
|
|
if (OffReg == 0) {
|
2009-07-09 05:03:57 +08:00
|
|
|
if (ARM_AM::getSOImmVal(Amt) == -1)
|
2009-07-09 00:09:28 +08:00
|
|
|
// Can't encode it in a so_imm operand. This transformation will
|
|
|
|
// add more than 1 instruction. Abandon!
|
|
|
|
return NULL;
|
|
|
|
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
|
2009-07-28 02:44:00 +08:00
|
|
|
get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
|
2009-07-09 05:03:57 +08:00
|
|
|
.addReg(BaseReg).addImm(Amt)
|
2009-07-09 00:09:28 +08:00
|
|
|
.addImm(Pred).addReg(0).addReg(0);
|
|
|
|
} else if (Amt != 0) {
|
|
|
|
ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm);
|
|
|
|
unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
|
|
|
|
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
|
2011-07-22 02:54:16 +08:00
|
|
|
get(isSub ? ARM::SUBrsi : ARM::ADDrsi), WBReg)
|
2009-07-09 00:09:28 +08:00
|
|
|
.addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc)
|
|
|
|
.addImm(Pred).addReg(0).addReg(0);
|
|
|
|
} else
|
|
|
|
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
|
2009-07-28 02:44:00 +08:00
|
|
|
get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
|
2009-07-09 00:09:28 +08:00
|
|
|
.addReg(BaseReg).addReg(OffReg)
|
|
|
|
.addImm(Pred).addReg(0).addReg(0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case ARMII::AddrMode3 : {
|
|
|
|
bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub;
|
|
|
|
unsigned Amt = ARM_AM::getAM3Offset(OffImm);
|
|
|
|
if (OffReg == 0)
|
|
|
|
// Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
|
|
|
|
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
|
2009-07-28 02:44:00 +08:00
|
|
|
get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
|
2009-07-09 00:09:28 +08:00
|
|
|
.addReg(BaseReg).addImm(Amt)
|
|
|
|
.addImm(Pred).addReg(0).addReg(0);
|
|
|
|
else
|
|
|
|
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
|
2009-07-28 02:44:00 +08:00
|
|
|
get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
|
2009-07-09 00:09:28 +08:00
|
|
|
.addReg(BaseReg).addReg(OffReg)
|
|
|
|
.addImm(Pred).addReg(0).addReg(0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<MachineInstr*> NewMIs;
|
|
|
|
if (isPre) {
|
|
|
|
if (isLoad)
|
|
|
|
MemMI = BuildMI(MF, MI->getDebugLoc(),
|
|
|
|
get(MemOpc), MI->getOperand(0).getReg())
|
2010-10-27 06:37:02 +08:00
|
|
|
.addReg(WBReg).addImm(0).addImm(Pred);
|
2009-07-09 00:09:28 +08:00
|
|
|
else
|
|
|
|
MemMI = BuildMI(MF, MI->getDebugLoc(),
|
|
|
|
get(MemOpc)).addReg(MI->getOperand(1).getReg())
|
|
|
|
.addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
|
|
|
|
NewMIs.push_back(MemMI);
|
|
|
|
NewMIs.push_back(UpdateMI);
|
|
|
|
} else {
|
|
|
|
if (isLoad)
|
|
|
|
MemMI = BuildMI(MF, MI->getDebugLoc(),
|
|
|
|
get(MemOpc), MI->getOperand(0).getReg())
|
2010-10-27 06:37:02 +08:00
|
|
|
.addReg(BaseReg).addImm(0).addImm(Pred);
|
2009-07-09 00:09:28 +08:00
|
|
|
else
|
|
|
|
MemMI = BuildMI(MF, MI->getDebugLoc(),
|
|
|
|
get(MemOpc)).addReg(MI->getOperand(1).getReg())
|
|
|
|
.addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
|
|
|
|
if (WB.isDead())
|
|
|
|
UpdateMI->getOperand(0).setIsDead();
|
|
|
|
NewMIs.push_back(UpdateMI);
|
|
|
|
NewMIs.push_back(MemMI);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Transfer LiveVariables states, kill / dead info.
|
|
|
|
if (LV) {
|
|
|
|
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
|
|
|
MachineOperand &MO = MI->getOperand(i);
|
2011-01-10 10:58:51 +08:00
|
|
|
if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
|
2009-07-09 00:09:28 +08:00
|
|
|
unsigned Reg = MO.getReg();
|
|
|
|
|
|
|
|
LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
|
|
|
|
if (MO.isDef()) {
|
|
|
|
MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
|
|
|
|
if (MO.isDead())
|
|
|
|
LV->addVirtualRegisterDead(Reg, NewMI);
|
|
|
|
}
|
|
|
|
if (MO.isUse() && MO.isKill()) {
|
|
|
|
for (unsigned j = 0; j < 2; ++j) {
|
|
|
|
// Look at the two new MI's in reverse order.
|
|
|
|
MachineInstr *NewMI = NewMIs[j];
|
|
|
|
if (!NewMI->readsRegister(Reg))
|
|
|
|
continue;
|
|
|
|
LV->addVirtualRegisterKilled(Reg, NewMI);
|
|
|
|
if (VI.removeKill(MI))
|
|
|
|
VI.Kills.push_back(NewMI);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
MFI->insert(MBBI, NewMIs[1]);
|
|
|
|
MFI->insert(MBBI, NewMIs[0]);
|
|
|
|
return NewMIs[0];
|
|
|
|
}
|
|
|
|
|
|
|
|
// Branch analysis.
|
|
|
|
bool
|
|
|
|
ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
|
|
|
|
MachineBasicBlock *&FBB,
|
|
|
|
SmallVectorImpl<MachineOperand> &Cond,
|
|
|
|
bool AllowModify) const {
|
|
|
|
// If the block has no terminators, it just falls into the block after it.
|
|
|
|
MachineBasicBlock::iterator I = MBB.end();
|
2010-04-02 09:38:09 +08:00
|
|
|
if (I == MBB.begin())
|
|
|
|
return false;
|
|
|
|
--I;
|
|
|
|
while (I->isDebugValue()) {
|
|
|
|
if (I == MBB.begin())
|
|
|
|
return false;
|
|
|
|
--I;
|
|
|
|
}
|
|
|
|
if (!isUnpredicatedTerminator(I))
|
2009-07-09 00:09:28 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Get the last instruction in the block.
|
|
|
|
MachineInstr *LastInst = I;
|
|
|
|
|
|
|
|
// If there is only one terminator instruction, process it.
|
|
|
|
unsigned LastOpc = LastInst->getOpcode();
|
|
|
|
if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
|
2009-07-28 02:20:05 +08:00
|
|
|
if (isUncondBranchOpcode(LastOpc)) {
|
2009-07-09 00:09:28 +08:00
|
|
|
TBB = LastInst->getOperand(0).getMBB();
|
|
|
|
return false;
|
|
|
|
}
|
2009-07-28 02:20:05 +08:00
|
|
|
if (isCondBranchOpcode(LastOpc)) {
|
2009-07-09 00:09:28 +08:00
|
|
|
// Block ends with fall-through condbranch.
|
|
|
|
TBB = LastInst->getOperand(0).getMBB();
|
|
|
|
Cond.push_back(LastInst->getOperand(1));
|
|
|
|
Cond.push_back(LastInst->getOperand(2));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true; // Can't handle indirect branch.
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the instruction before it if it is a terminator.
|
|
|
|
MachineInstr *SecondLastInst = I;
|
2010-09-23 14:54:40 +08:00
|
|
|
unsigned SecondLastOpc = SecondLastInst->getOpcode();
|
|
|
|
|
|
|
|
// If AllowModify is true and the block ends with two or more unconditional
|
|
|
|
// branches, delete all but the first unconditional branch.
|
|
|
|
if (AllowModify && isUncondBranchOpcode(LastOpc)) {
|
|
|
|
while (isUncondBranchOpcode(SecondLastOpc)) {
|
|
|
|
LastInst->eraseFromParent();
|
|
|
|
LastInst = SecondLastInst;
|
|
|
|
LastOpc = LastInst->getOpcode();
|
2010-09-24 03:42:03 +08:00
|
|
|
if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
|
|
|
|
// Return now the only terminator is an unconditional branch.
|
|
|
|
TBB = LastInst->getOperand(0).getMBB();
|
|
|
|
return false;
|
|
|
|
} else {
|
2010-09-23 14:54:40 +08:00
|
|
|
SecondLastInst = I;
|
|
|
|
SecondLastOpc = SecondLastInst->getOpcode();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2009-07-09 00:09:28 +08:00
|
|
|
|
|
|
|
// If there are three terminators, we don't know what sort of block this is.
|
|
|
|
if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
|
|
|
|
return true;
|
|
|
|
|
2009-07-28 02:20:05 +08:00
|
|
|
// If the block ends with a B and a Bcc, handle it.
|
|
|
|
if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
|
2009-07-09 00:09:28 +08:00
|
|
|
TBB = SecondLastInst->getOperand(0).getMBB();
|
|
|
|
Cond.push_back(SecondLastInst->getOperand(1));
|
|
|
|
Cond.push_back(SecondLastInst->getOperand(2));
|
|
|
|
FBB = LastInst->getOperand(0).getMBB();
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the block ends with two unconditional branches, handle it. The second
|
|
|
|
// one is not executed, so remove it.
|
2009-07-28 02:20:05 +08:00
|
|
|
if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
|
2009-07-09 00:09:28 +08:00
|
|
|
TBB = SecondLastInst->getOperand(0).getMBB();
|
|
|
|
I = LastInst;
|
|
|
|
if (AllowModify)
|
|
|
|
I->eraseFromParent();
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// ...likewise if it ends with a branch table followed by an unconditional
|
|
|
|
// branch. The branch folder can create these, and we must get rid of them for
|
|
|
|
// correctness of Thumb constant islands.
|
2009-10-29 02:26:41 +08:00
|
|
|
if ((isJumpTableBranchOpcode(SecondLastOpc) ||
|
|
|
|
isIndirectBranchOpcode(SecondLastOpc)) &&
|
2009-07-28 02:20:05 +08:00
|
|
|
isUncondBranchOpcode(LastOpc)) {
|
2009-07-09 00:09:28 +08:00
|
|
|
I = LastInst;
|
|
|
|
if (AllowModify)
|
|
|
|
I->eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, can't handle this.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
|
|
|
|
MachineBasicBlock::iterator I = MBB.end();
|
|
|
|
if (I == MBB.begin()) return 0;
|
|
|
|
--I;
|
2010-04-02 09:38:09 +08:00
|
|
|
while (I->isDebugValue()) {
|
|
|
|
if (I == MBB.begin())
|
|
|
|
return 0;
|
|
|
|
--I;
|
|
|
|
}
|
2009-07-28 02:20:05 +08:00
|
|
|
if (!isUncondBranchOpcode(I->getOpcode()) &&
|
|
|
|
!isCondBranchOpcode(I->getOpcode()))
|
2009-07-09 00:09:28 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
// Remove the branch.
|
|
|
|
I->eraseFromParent();
|
|
|
|
|
|
|
|
I = MBB.end();
|
|
|
|
|
|
|
|
if (I == MBB.begin()) return 1;
|
|
|
|
--I;
|
2009-07-28 02:20:05 +08:00
|
|
|
if (!isCondBranchOpcode(I->getOpcode()))
|
2009-07-09 00:09:28 +08:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
// Remove the branch.
|
|
|
|
I->eraseFromParent();
|
|
|
|
return 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned
|
|
|
|
ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
|
2010-06-18 06:43:56 +08:00
|
|
|
MachineBasicBlock *FBB,
|
|
|
|
const SmallVectorImpl<MachineOperand> &Cond,
|
|
|
|
DebugLoc DL) const {
|
2009-07-28 13:48:47 +08:00
|
|
|
ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>();
|
|
|
|
int BOpc = !AFI->isThumbFunction()
|
|
|
|
? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB);
|
|
|
|
int BccOpc = !AFI->isThumbFunction()
|
|
|
|
? ARM::Bcc : (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc);
|
2011-09-10 05:48:23 +08:00
|
|
|
bool isThumb = AFI->isThumbFunction() || AFI->isThumb2Function();
|
2011-09-21 10:17:37 +08:00
|
|
|
|
2009-07-09 00:09:28 +08:00
|
|
|
// Shouldn't be a fall through.
|
|
|
|
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
|
|
|
|
assert((Cond.size() == 2 || Cond.size() == 0) &&
|
|
|
|
"ARM branch conditions have two components!");
|
|
|
|
|
|
|
|
if (FBB == 0) {
|
2011-09-10 07:13:02 +08:00
|
|
|
if (Cond.empty()) { // Unconditional branch?
|
2011-09-10 05:48:23 +08:00
|
|
|
if (isThumb)
|
|
|
|
BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB).addImm(ARMCC::AL).addReg(0);
|
|
|
|
else
|
|
|
|
BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB);
|
2011-09-10 07:13:02 +08:00
|
|
|
} else
|
2010-06-18 06:43:56 +08:00
|
|
|
BuildMI(&MBB, DL, get(BccOpc)).addMBB(TBB)
|
2009-07-09 00:09:28 +08:00
|
|
|
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Two-way conditional branch.
|
2010-06-18 06:43:56 +08:00
|
|
|
BuildMI(&MBB, DL, get(BccOpc)).addMBB(TBB)
|
2009-07-09 00:09:28 +08:00
|
|
|
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
|
2011-09-10 05:48:23 +08:00
|
|
|
if (isThumb)
|
|
|
|
BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB).addImm(ARMCC::AL).addReg(0);
|
|
|
|
else
|
|
|
|
BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB);
|
2009-07-09 00:09:28 +08:00
|
|
|
return 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ARMBaseInstrInfo::
|
|
|
|
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
|
|
|
|
ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
|
|
|
|
Cond[0].setImm(ARMCC::getOppositeCondition(CC));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2011-12-14 10:11:42 +08:00
|
|
|
bool ARMBaseInstrInfo::isPredicated(const MachineInstr *MI) const {
|
|
|
|
if (MI->isBundle()) {
|
|
|
|
MachineBasicBlock::const_instr_iterator I = MI;
|
|
|
|
MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
|
|
|
|
while (++I != E && I->isInsideBundle()) {
|
|
|
|
int PIdx = I->findFirstPredOperandIdx();
|
|
|
|
if (PIdx != -1 && I->getOperand(PIdx).getImm() != ARMCC::AL)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
int PIdx = MI->findFirstPredOperandIdx();
|
|
|
|
return PIdx != -1 && MI->getOperand(PIdx).getImm() != ARMCC::AL;
|
|
|
|
}
|
|
|
|
|
2009-07-09 00:09:28 +08:00
|
|
|
bool ARMBaseInstrInfo::
|
|
|
|
PredicateInstruction(MachineInstr *MI,
|
|
|
|
const SmallVectorImpl<MachineOperand> &Pred) const {
|
|
|
|
unsigned Opc = MI->getOpcode();
|
2009-07-28 02:20:05 +08:00
|
|
|
if (isUncondBranchOpcode(Opc)) {
|
|
|
|
MI->setDesc(get(getMatchingCondBranchOpcode(Opc)));
|
2009-07-09 00:09:28 +08:00
|
|
|
MI->addOperand(MachineOperand::CreateImm(Pred[0].getImm()));
|
|
|
|
MI->addOperand(MachineOperand::CreateReg(Pred[1].getReg(), false));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
int PIdx = MI->findFirstPredOperandIdx();
|
|
|
|
if (PIdx != -1) {
|
|
|
|
MachineOperand &PMO = MI->getOperand(PIdx);
|
|
|
|
PMO.setImm(Pred[0].getImm());
|
|
|
|
MI->getOperand(PIdx+1).setReg(Pred[1].getReg());
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ARMBaseInstrInfo::
|
|
|
|
SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
|
|
|
|
const SmallVectorImpl<MachineOperand> &Pred2) const {
|
|
|
|
if (Pred1.size() > 2 || Pred2.size() > 2)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm();
|
|
|
|
ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm();
|
|
|
|
if (CC1 == CC2)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
switch (CC1) {
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
case ARMCC::AL:
|
|
|
|
return true;
|
|
|
|
case ARMCC::HS:
|
|
|
|
return CC2 == ARMCC::HI;
|
|
|
|
case ARMCC::LS:
|
|
|
|
return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
|
|
|
|
case ARMCC::GE:
|
|
|
|
return CC2 == ARMCC::GT;
|
|
|
|
case ARMCC::LE:
|
|
|
|
return CC2 == ARMCC::LT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI,
|
|
|
|
std::vector<MachineOperand> &Pred) const {
|
|
|
|
bool Found = false;
|
|
|
|
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
|
|
|
const MachineOperand &MO = MI->getOperand(i);
|
2012-02-18 03:23:15 +08:00
|
|
|
if ((MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR)) ||
|
|
|
|
(MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR)) {
|
2009-07-09 00:09:28 +08:00
|
|
|
Pred.push_back(MO);
|
|
|
|
Found = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Found;
|
|
|
|
}
|
|
|
|
|
2009-11-21 14:21:52 +08:00
|
|
|
/// isPredicable - Return true if the specified instruction can be predicated.
|
|
|
|
/// By default, this returns true for every instruction with a
|
|
|
|
/// PredicateOperand.
|
|
|
|
bool ARMBaseInstrInfo::isPredicable(MachineInstr *MI) const {
|
2011-12-07 15:15:52 +08:00
|
|
|
if (!MI->isPredicable())
|
2009-11-21 14:21:52 +08:00
|
|
|
return false;
|
|
|
|
|
2011-12-07 15:15:52 +08:00
|
|
|
if ((MI->getDesc().TSFlags & ARMII::DomainMask) == ARMII::DomainNEON) {
|
2009-11-21 14:21:52 +08:00
|
|
|
ARMFunctionInfo *AFI =
|
|
|
|
MI->getParent()->getParent()->getInfo<ARMFunctionInfo>();
|
2009-11-24 16:06:15 +08:00
|
|
|
return AFI->isThumb2Function();
|
2009-11-21 14:21:52 +08:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2009-07-09 00:09:28 +08:00
|
|
|
|
2009-12-03 14:58:32 +08:00
|
|
|
/// FIXME: Works around a gcc miscompilation with -fstrict-aliasing.
|
2010-10-23 16:40:19 +08:00
|
|
|
LLVM_ATTRIBUTE_NOINLINE
|
2009-07-09 00:09:28 +08:00
|
|
|
static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
|
2009-12-03 14:58:32 +08:00
|
|
|
unsigned JTI);
|
2009-07-09 00:09:28 +08:00
|
|
|
static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
|
|
|
|
unsigned JTI) {
|
2009-12-03 14:58:32 +08:00
|
|
|
assert(JTI < JT.size());
|
2009-07-09 00:09:28 +08:00
|
|
|
return JT[JTI].MBBs.size();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// GetInstSize - Return the size of the specified MachineInstr.
|
|
|
|
///
|
|
|
|
unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
|
|
|
|
const MachineBasicBlock &MBB = *MI->getParent();
|
|
|
|
const MachineFunction *MF = MBB.getParent();
|
2009-08-23 05:43:10 +08:00
|
|
|
const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
|
2009-07-09 00:09:28 +08:00
|
|
|
|
2011-06-29 03:10:37 +08:00
|
|
|
const MCInstrDesc &MCID = MI->getDesc();
|
2011-07-14 07:22:26 +08:00
|
|
|
if (MCID.getSize())
|
|
|
|
return MCID.getSize();
|
2009-07-09 00:09:28 +08:00
|
|
|
|
2012-01-21 05:51:11 +08:00
|
|
|
// If this machine instr is an inline asm, measure it.
|
|
|
|
if (MI->getOpcode() == ARM::INLINEASM)
|
|
|
|
return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
|
|
|
|
if (MI->isLabel())
|
|
|
|
return 0;
|
|
|
|
unsigned Opc = MI->getOpcode();
|
|
|
|
switch (Opc) {
|
|
|
|
case TargetOpcode::IMPLICIT_DEF:
|
|
|
|
case TargetOpcode::KILL:
|
|
|
|
case TargetOpcode::PROLOG_LABEL:
|
|
|
|
case TargetOpcode::EH_LABEL:
|
|
|
|
case TargetOpcode::DBG_VALUE:
|
|
|
|
return 0;
|
|
|
|
case TargetOpcode::BUNDLE:
|
|
|
|
return getInstBundleLength(MI);
|
|
|
|
case ARM::MOVi16_ga_pcrel:
|
|
|
|
case ARM::MOVTi16_ga_pcrel:
|
|
|
|
case ARM::t2MOVi16_ga_pcrel:
|
|
|
|
case ARM::t2MOVTi16_ga_pcrel:
|
|
|
|
return 4;
|
|
|
|
case ARM::MOVi32imm:
|
|
|
|
case ARM::t2MOVi32imm:
|
|
|
|
return 8;
|
|
|
|
case ARM::CONSTPOOL_ENTRY:
|
|
|
|
// If this machine instr is a constant pool entry, its size is recorded as
|
|
|
|
// operand #2.
|
|
|
|
return MI->getOperand(2).getImm();
|
|
|
|
case ARM::Int_eh_sjlj_longjmp:
|
|
|
|
return 16;
|
|
|
|
case ARM::tInt_eh_sjlj_longjmp:
|
|
|
|
return 10;
|
|
|
|
case ARM::Int_eh_sjlj_setjmp:
|
|
|
|
case ARM::Int_eh_sjlj_setjmp_nofp:
|
|
|
|
return 20;
|
|
|
|
case ARM::tInt_eh_sjlj_setjmp:
|
|
|
|
case ARM::t2Int_eh_sjlj_setjmp:
|
|
|
|
case ARM::t2Int_eh_sjlj_setjmp_nofp:
|
|
|
|
return 12;
|
|
|
|
case ARM::BR_JTr:
|
|
|
|
case ARM::BR_JTm:
|
|
|
|
case ARM::BR_JTadd:
|
|
|
|
case ARM::tBR_JTr:
|
|
|
|
case ARM::t2BR_JT:
|
|
|
|
case ARM::t2TBB_JT:
|
|
|
|
case ARM::t2TBH_JT: {
|
|
|
|
// These are jumptable branches, i.e. a branch followed by an inlined
|
|
|
|
// jumptable. The size is 4 + 4 * number of entries. For TBB, each
|
|
|
|
// entry is one byte; TBH two byte each.
|
|
|
|
unsigned EntrySize = (Opc == ARM::t2TBB_JT)
|
|
|
|
? 1 : ((Opc == ARM::t2TBH_JT) ? 2 : 4);
|
|
|
|
unsigned NumOps = MCID.getNumOperands();
|
|
|
|
MachineOperand JTOP =
|
|
|
|
MI->getOperand(NumOps - (MI->isPredicable() ? 3 : 2));
|
|
|
|
unsigned JTI = JTOP.getIndex();
|
|
|
|
const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
|
|
|
|
assert(MJTI != 0);
|
|
|
|
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
|
|
|
|
assert(JTI < JT.size());
|
|
|
|
// Thumb instructions are 2 byte aligned, but JT entries are 4 byte
|
|
|
|
// 4 aligned. The assembler / linker may add 2 byte padding just before
|
|
|
|
// the JT entries. The size does not include this padding; the
|
|
|
|
// constant islands pass does separate bookkeeping for it.
|
|
|
|
// FIXME: If we know the size of the function is less than (1 << 16) *2
|
|
|
|
// bytes, we can use 16-bit entries instead. Then there won't be an
|
|
|
|
// alignment issue.
|
|
|
|
unsigned InstSize = (Opc == ARM::tBR_JTr || Opc == ARM::t2BR_JT) ? 2 : 4;
|
|
|
|
unsigned NumEntries = getNumJTEntries(JT, JTI);
|
|
|
|
if (Opc == ARM::t2TBB_JT && (NumEntries & 1))
|
|
|
|
// Make sure the instruction that follows TBB is 2-byte aligned.
|
|
|
|
// FIXME: Constant island pass should insert an "ALIGN" instruction
|
|
|
|
// instead.
|
|
|
|
++NumEntries;
|
|
|
|
return NumEntries * EntrySize + InstSize;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
// Otherwise, pseudo-instruction sizes are zero.
|
|
|
|
return 0;
|
|
|
|
}
|
2009-07-09 00:09:28 +08:00
|
|
|
}
|
|
|
|
|
2011-12-14 10:11:42 +08:00
|
|
|
unsigned ARMBaseInstrInfo::getInstBundleLength(const MachineInstr *MI) const {
|
|
|
|
unsigned Size = 0;
|
|
|
|
MachineBasicBlock::const_instr_iterator I = MI;
|
|
|
|
MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
|
|
|
|
while (++I != E && I->isInsideBundle()) {
|
|
|
|
assert(!I->isBundle() && "No nested bundle!");
|
|
|
|
Size += GetInstSizeInBytes(&*I);
|
|
|
|
}
|
|
|
|
return Size;
|
|
|
|
}
|
|
|
|
|
2010-07-11 14:33:54 +08:00
|
|
|
void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator I, DebugLoc DL,
|
|
|
|
unsigned DestReg, unsigned SrcReg,
|
|
|
|
bool KillSrc) const {
|
|
|
|
bool GPRDest = ARM::GPRRegClass.contains(DestReg);
|
|
|
|
bool GPRSrc = ARM::GPRRegClass.contains(SrcReg);
|
|
|
|
|
|
|
|
if (GPRDest && GPRSrc) {
|
|
|
|
AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::MOVr), DestReg)
|
|
|
|
.addReg(SrcReg, getKillRegState(KillSrc))));
|
|
|
|
return;
|
2009-08-06 05:02:22 +08:00
|
|
|
}
|
2009-07-09 00:09:28 +08:00
|
|
|
|
2010-07-11 14:33:54 +08:00
|
|
|
bool SPRDest = ARM::SPRRegClass.contains(DestReg);
|
|
|
|
bool SPRSrc = ARM::SPRRegClass.contains(SrcReg);
|
|
|
|
|
2011-08-20 08:17:25 +08:00
|
|
|
unsigned Opc = 0;
|
2011-10-11 08:59:06 +08:00
|
|
|
if (SPRDest && SPRSrc)
|
2010-07-11 14:33:54 +08:00
|
|
|
Opc = ARM::VMOVS;
|
2011-10-11 08:59:06 +08:00
|
|
|
else if (GPRDest && SPRSrc)
|
2010-07-11 14:33:54 +08:00
|
|
|
Opc = ARM::VMOVRS;
|
|
|
|
else if (SPRDest && GPRSrc)
|
|
|
|
Opc = ARM::VMOVSR;
|
|
|
|
else if (ARM::DPRRegClass.contains(DestReg, SrcReg))
|
|
|
|
Opc = ARM::VMOVD;
|
|
|
|
else if (ARM::QPRRegClass.contains(DestReg, SrcReg))
|
2011-07-16 02:46:47 +08:00
|
|
|
Opc = ARM::VORRq;
|
2011-08-20 08:17:25 +08:00
|
|
|
|
|
|
|
if (Opc) {
|
|
|
|
MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc), DestReg);
|
2011-07-16 02:46:47 +08:00
|
|
|
MIB.addReg(SrcReg, getKillRegState(KillSrc));
|
2011-08-20 08:17:25 +08:00
|
|
|
if (Opc == ARM::VORRq)
|
|
|
|
MIB.addReg(SrcReg, getKillRegState(KillSrc));
|
2011-08-20 08:52:40 +08:00
|
|
|
AddDefaultPred(MIB);
|
2011-08-20 08:17:25 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-08-20 08:52:40 +08:00
|
|
|
// Generate instructions for VMOVQQ and VMOVQQQQ pseudos in place.
|
|
|
|
if (ARM::QQPRRegClass.contains(DestReg, SrcReg) ||
|
|
|
|
ARM::QQQQPRRegClass.contains(DestReg, SrcReg)) {
|
2011-08-20 08:17:25 +08:00
|
|
|
const TargetRegisterInfo *TRI = &getRegisterInfo();
|
|
|
|
assert(ARM::qsub_0 + 3 == ARM::qsub_3 && "Expected contiguous enum.");
|
2011-09-21 10:17:37 +08:00
|
|
|
unsigned EndSubReg = ARM::QQPRRegClass.contains(DestReg, SrcReg) ?
|
2011-08-20 08:52:40 +08:00
|
|
|
ARM::qsub_1 : ARM::qsub_3;
|
2011-09-21 10:17:37 +08:00
|
|
|
for (unsigned i = ARM::qsub_0, e = EndSubReg + 1; i != e; ++i) {
|
2011-08-20 08:17:25 +08:00
|
|
|
unsigned Dst = TRI->getSubReg(DestReg, i);
|
|
|
|
unsigned Src = TRI->getSubReg(SrcReg, i);
|
|
|
|
MachineInstrBuilder Mov =
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, I->getDebugLoc(), get(ARM::VORRq))
|
|
|
|
.addReg(Dst, RegState::Define)
|
|
|
|
.addReg(Src, getKillRegState(KillSrc))
|
|
|
|
.addReg(Src, getKillRegState(KillSrc)));
|
2011-08-20 08:52:40 +08:00
|
|
|
if (i == EndSubReg) {
|
2011-08-20 08:17:25 +08:00
|
|
|
Mov->addRegisterDefined(DestReg, TRI);
|
|
|
|
if (KillSrc)
|
|
|
|
Mov->addRegisterKilled(SrcReg, TRI);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
llvm_unreachable("Impossible reg-to-reg copy");
|
2009-07-09 00:09:28 +08:00
|
|
|
}
|
|
|
|
|
2010-05-07 08:24:52 +08:00
|
|
|
static const
|
|
|
|
MachineInstrBuilder &AddDReg(MachineInstrBuilder &MIB,
|
|
|
|
unsigned Reg, unsigned SubIdx, unsigned State,
|
|
|
|
const TargetRegisterInfo *TRI) {
|
|
|
|
if (!SubIdx)
|
|
|
|
return MIB.addReg(Reg, State);
|
|
|
|
|
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(Reg))
|
|
|
|
return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
|
|
|
|
return MIB.addReg(Reg, State, SubIdx);
|
|
|
|
}
|
|
|
|
|
2009-07-09 00:09:28 +08:00
|
|
|
void ARMBaseInstrInfo::
|
|
|
|
storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
|
|
|
|
unsigned SrcReg, bool isKill, int FI,
|
2010-05-07 03:06:44 +08:00
|
|
|
const TargetRegisterClass *RC,
|
|
|
|
const TargetRegisterInfo *TRI) const {
|
2010-04-03 04:16:16 +08:00
|
|
|
DebugLoc DL;
|
2009-07-09 00:09:28 +08:00
|
|
|
if (I != MBB.end()) DL = I->getDebugLoc();
|
2009-10-07 08:06:35 +08:00
|
|
|
MachineFunction &MF = *MBB.getParent();
|
|
|
|
MachineFrameInfo &MFI = *MF.getFrameInfo();
|
2009-11-08 08:27:19 +08:00
|
|
|
unsigned Align = MFI.getObjectAlignment(FI);
|
2009-10-07 08:06:35 +08:00
|
|
|
|
|
|
|
MachineMemOperand *MMO =
|
2011-11-15 15:34:52 +08:00
|
|
|
MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
|
2010-09-21 12:39:43 +08:00
|
|
|
MachineMemOperand::MOStore,
|
2009-10-07 08:06:35 +08:00
|
|
|
MFI.getObjectSize(FI),
|
2009-11-08 08:27:19 +08:00
|
|
|
Align);
|
2009-07-09 00:09:28 +08:00
|
|
|
|
2011-08-11 01:21:20 +08:00
|
|
|
switch (RC->getSize()) {
|
|
|
|
case 4:
|
|
|
|
if (ARM::GPRRegClass.hasSubClassEq(RC)) {
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STRi12))
|
2009-07-09 00:09:28 +08:00
|
|
|
.addReg(SrcReg, getKillRegState(isKill))
|
2010-10-28 07:12:14 +08:00
|
|
|
.addFrameIndex(FI).addImm(0).addMemOperand(MMO));
|
2011-08-11 01:21:20 +08:00
|
|
|
} else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRS))
|
2010-05-06 09:34:11 +08:00
|
|
|
.addReg(SrcReg, getKillRegState(isKill))
|
|
|
|
.addFrameIndex(FI).addImm(0).addMemOperand(MMO));
|
2011-08-11 01:21:20 +08:00
|
|
|
} else
|
|
|
|
llvm_unreachable("Unknown reg class!");
|
|
|
|
break;
|
|
|
|
case 8:
|
|
|
|
if (ARM::DPRRegClass.hasSubClassEq(RC)) {
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRD))
|
2009-07-09 00:09:28 +08:00
|
|
|
.addReg(SrcReg, getKillRegState(isKill))
|
2009-10-07 08:06:35 +08:00
|
|
|
.addFrameIndex(FI).addImm(0).addMemOperand(MMO));
|
2011-08-11 01:21:20 +08:00
|
|
|
} else
|
|
|
|
llvm_unreachable("Unknown reg class!");
|
|
|
|
break;
|
|
|
|
case 16:
|
|
|
|
if (ARM::QPRRegClass.hasSubClassEq(RC)) {
|
2012-01-05 08:26:57 +08:00
|
|
|
// Use aligned spills if the stack can be realigned.
|
|
|
|
if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
|
2011-08-11 01:21:20 +08:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1q64Pseudo))
|
2010-07-07 05:26:18 +08:00
|
|
|
.addFrameIndex(FI).addImm(16)
|
2010-05-13 09:12:06 +08:00
|
|
|
.addReg(SrcReg, getKillRegState(isKill))
|
|
|
|
.addMemOperand(MMO));
|
2011-08-11 01:21:20 +08:00
|
|
|
} else {
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMQIA))
|
2010-05-13 09:12:06 +08:00
|
|
|
.addReg(SrcReg, getKillRegState(isKill))
|
|
|
|
.addFrameIndex(FI)
|
|
|
|
.addMemOperand(MMO));
|
2011-08-11 01:21:20 +08:00
|
|
|
}
|
|
|
|
} else
|
|
|
|
llvm_unreachable("Unknown reg class!");
|
|
|
|
break;
|
|
|
|
case 32:
|
|
|
|
if (ARM::QQPRRegClass.hasSubClassEq(RC)) {
|
|
|
|
if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
|
|
|
|
// FIXME: It's possible to only store part of the QQ register if the
|
|
|
|
// spilled def has a sub-register index.
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1d64QPseudo))
|
2010-09-15 09:48:05 +08:00
|
|
|
.addFrameIndex(FI).addImm(16)
|
|
|
|
.addReg(SrcReg, getKillRegState(isKill))
|
|
|
|
.addMemOperand(MMO));
|
2011-08-11 01:21:20 +08:00
|
|
|
} else {
|
|
|
|
MachineInstrBuilder MIB =
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMDIA))
|
2010-11-16 09:16:36 +08:00
|
|
|
.addFrameIndex(FI))
|
2011-08-11 01:21:20 +08:00
|
|
|
.addMemOperand(MMO);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
|
|
|
|
AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI);
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
llvm_unreachable("Unknown reg class!");
|
|
|
|
break;
|
|
|
|
case 64:
|
|
|
|
if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
|
|
|
|
MachineInstrBuilder MIB =
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMDIA))
|
|
|
|
.addFrameIndex(FI))
|
|
|
|
.addMemOperand(MMO);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_4, 0, TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_5, 0, TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_6, 0, TRI);
|
|
|
|
AddDReg(MIB, SrcReg, ARM::dsub_7, 0, TRI);
|
|
|
|
} else
|
|
|
|
llvm_unreachable("Unknown reg class!");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unknown reg class!");
|
2009-07-09 00:09:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-09-16 00:36:26 +08:00
|
|
|
unsigned
|
|
|
|
ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
|
|
|
|
int &FrameIndex) const {
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: break;
|
2010-10-28 07:12:14 +08:00
|
|
|
case ARM::STRrs:
|
2010-09-16 00:36:26 +08:00
|
|
|
case ARM::t2STRs: // FIXME: don't use t2STRs to access frame.
|
|
|
|
if (MI->getOperand(1).isFI() &&
|
|
|
|
MI->getOperand(2).isReg() &&
|
|
|
|
MI->getOperand(3).isImm() &&
|
|
|
|
MI->getOperand(2).getReg() == 0 &&
|
|
|
|
MI->getOperand(3).getImm() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(1).getIndex();
|
|
|
|
return MI->getOperand(0).getReg();
|
|
|
|
}
|
|
|
|
break;
|
2010-10-28 07:12:14 +08:00
|
|
|
case ARM::STRi12:
|
2010-09-16 00:36:26 +08:00
|
|
|
case ARM::t2STRi12:
|
2011-06-30 04:26:39 +08:00
|
|
|
case ARM::tSTRspi:
|
2010-09-16 00:36:26 +08:00
|
|
|
case ARM::VSTRD:
|
|
|
|
case ARM::VSTRS:
|
|
|
|
if (MI->getOperand(1).isFI() &&
|
|
|
|
MI->getOperand(2).isImm() &&
|
|
|
|
MI->getOperand(2).getImm() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(1).getIndex();
|
|
|
|
return MI->getOperand(0).getReg();
|
|
|
|
}
|
|
|
|
break;
|
2010-09-16 01:27:09 +08:00
|
|
|
case ARM::VST1q64Pseudo:
|
|
|
|
if (MI->getOperand(0).isFI() &&
|
|
|
|
MI->getOperand(2).getSubReg() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(0).getIndex();
|
|
|
|
return MI->getOperand(2).getReg();
|
|
|
|
}
|
2010-09-16 05:40:09 +08:00
|
|
|
break;
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::VSTMQIA:
|
2010-09-16 01:27:09 +08:00
|
|
|
if (MI->getOperand(1).isFI() &&
|
|
|
|
MI->getOperand(0).getSubReg() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(1).getIndex();
|
|
|
|
return MI->getOperand(0).getReg();
|
|
|
|
}
|
|
|
|
break;
|
2010-09-16 00:36:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-08-09 05:45:32 +08:00
|
|
|
unsigned ARMBaseInstrInfo::isStoreToStackSlotPostFE(const MachineInstr *MI,
|
|
|
|
int &FrameIndex) const {
|
|
|
|
const MachineMemOperand *Dummy;
|
2011-12-07 15:15:52 +08:00
|
|
|
return MI->mayStore() && hasStoreToStackSlot(MI, Dummy, FrameIndex);
|
2011-08-09 05:45:32 +08:00
|
|
|
}
|
|
|
|
|
2009-07-09 00:09:28 +08:00
|
|
|
void ARMBaseInstrInfo::
|
|
|
|
loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
|
|
|
|
unsigned DestReg, int FI,
|
2010-05-07 03:06:44 +08:00
|
|
|
const TargetRegisterClass *RC,
|
|
|
|
const TargetRegisterInfo *TRI) const {
|
2010-04-03 04:16:16 +08:00
|
|
|
DebugLoc DL;
|
2009-07-09 00:09:28 +08:00
|
|
|
if (I != MBB.end()) DL = I->getDebugLoc();
|
2009-10-07 08:06:35 +08:00
|
|
|
MachineFunction &MF = *MBB.getParent();
|
|
|
|
MachineFrameInfo &MFI = *MF.getFrameInfo();
|
2009-11-08 08:27:19 +08:00
|
|
|
unsigned Align = MFI.getObjectAlignment(FI);
|
2009-10-07 08:06:35 +08:00
|
|
|
MachineMemOperand *MMO =
|
2010-09-21 12:39:43 +08:00
|
|
|
MF.getMachineMemOperand(
|
2011-11-15 15:34:52 +08:00
|
|
|
MachinePointerInfo::getFixedStack(FI),
|
2010-09-21 12:39:43 +08:00
|
|
|
MachineMemOperand::MOLoad,
|
2009-10-07 08:06:35 +08:00
|
|
|
MFI.getObjectSize(FI),
|
2009-11-08 08:27:19 +08:00
|
|
|
Align);
|
2009-07-09 00:09:28 +08:00
|
|
|
|
2011-08-11 01:21:20 +08:00
|
|
|
switch (RC->getSize()) {
|
|
|
|
case 4:
|
|
|
|
if (ARM::GPRRegClass.hasSubClassEq(RC)) {
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDRi12), DestReg)
|
2010-10-27 06:37:02 +08:00
|
|
|
.addFrameIndex(FI).addImm(0).addMemOperand(MMO));
|
2011-08-11 01:21:20 +08:00
|
|
|
|
|
|
|
} else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg)
|
2010-05-06 09:34:11 +08:00
|
|
|
.addFrameIndex(FI).addImm(0).addMemOperand(MMO));
|
2011-08-11 01:21:20 +08:00
|
|
|
} else
|
|
|
|
llvm_unreachable("Unknown reg class!");
|
2010-06-19 05:32:42 +08:00
|
|
|
break;
|
2011-08-11 01:21:20 +08:00
|
|
|
case 8:
|
|
|
|
if (ARM::DPRRegClass.hasSubClassEq(RC)) {
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg)
|
2009-10-07 08:06:35 +08:00
|
|
|
.addFrameIndex(FI).addImm(0).addMemOperand(MMO));
|
2011-08-11 01:21:20 +08:00
|
|
|
} else
|
|
|
|
llvm_unreachable("Unknown reg class!");
|
2010-06-19 05:32:42 +08:00
|
|
|
break;
|
2011-08-11 01:21:20 +08:00
|
|
|
case 16:
|
|
|
|
if (ARM::QPRRegClass.hasSubClassEq(RC)) {
|
2012-01-05 08:26:57 +08:00
|
|
|
if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
|
2011-08-11 01:21:20 +08:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1q64Pseudo), DestReg)
|
2010-07-07 05:26:18 +08:00
|
|
|
.addFrameIndex(FI).addImm(16)
|
2010-05-13 09:12:06 +08:00
|
|
|
.addMemOperand(MMO));
|
2011-08-11 01:21:20 +08:00
|
|
|
} else {
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMQIA), DestReg)
|
|
|
|
.addFrameIndex(FI)
|
|
|
|
.addMemOperand(MMO));
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
llvm_unreachable("Unknown reg class!");
|
2010-06-19 05:32:42 +08:00
|
|
|
break;
|
2011-08-11 01:21:20 +08:00
|
|
|
case 32:
|
|
|
|
if (ARM::QQPRRegClass.hasSubClassEq(RC)) {
|
|
|
|
if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1d64QPseudo), DestReg)
|
2010-09-15 09:48:05 +08:00
|
|
|
.addFrameIndex(FI).addImm(16)
|
|
|
|
.addMemOperand(MMO));
|
2011-08-11 01:21:20 +08:00
|
|
|
} else {
|
|
|
|
MachineInstrBuilder MIB =
|
2010-11-16 09:16:36 +08:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA))
|
|
|
|
.addFrameIndex(FI))
|
2011-08-11 01:21:20 +08:00
|
|
|
.addMemOperand(MMO);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::Define, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::Define, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::Define, TRI);
|
2011-08-20 08:17:45 +08:00
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::Define, TRI);
|
|
|
|
MIB.addReg(DestReg, RegState::Define | RegState::Implicit);
|
2011-08-11 01:21:20 +08:00
|
|
|
}
|
|
|
|
} else
|
|
|
|
llvm_unreachable("Unknown reg class!");
|
2010-06-19 05:32:42 +08:00
|
|
|
break;
|
2011-08-11 01:21:20 +08:00
|
|
|
case 64:
|
|
|
|
if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
|
|
|
|
MachineInstrBuilder MIB =
|
2010-11-16 09:16:36 +08:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA))
|
|
|
|
.addFrameIndex(FI))
|
2011-08-11 01:21:20 +08:00
|
|
|
.addMemOperand(MMO);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::Define, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::Define, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::Define, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::Define, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_4, RegState::Define, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::Define, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::Define, TRI);
|
2011-08-20 08:17:45 +08:00
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_7, RegState::Define, TRI);
|
|
|
|
MIB.addReg(DestReg, RegState::Define | RegState::Implicit);
|
2011-08-11 01:21:20 +08:00
|
|
|
} else
|
|
|
|
llvm_unreachable("Unknown reg class!");
|
2010-06-19 05:32:42 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unknown regclass!");
|
2009-07-09 00:09:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-09-16 00:36:26 +08:00
|
|
|
unsigned
|
|
|
|
ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
|
|
|
|
int &FrameIndex) const {
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: break;
|
2010-10-27 06:37:02 +08:00
|
|
|
case ARM::LDRrs:
|
2010-09-16 00:36:26 +08:00
|
|
|
case ARM::t2LDRs: // FIXME: don't use t2LDRs to access frame.
|
|
|
|
if (MI->getOperand(1).isFI() &&
|
|
|
|
MI->getOperand(2).isReg() &&
|
|
|
|
MI->getOperand(3).isImm() &&
|
|
|
|
MI->getOperand(2).getReg() == 0 &&
|
|
|
|
MI->getOperand(3).getImm() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(1).getIndex();
|
|
|
|
return MI->getOperand(0).getReg();
|
|
|
|
}
|
|
|
|
break;
|
2010-10-27 06:37:02 +08:00
|
|
|
case ARM::LDRi12:
|
2010-09-16 00:36:26 +08:00
|
|
|
case ARM::t2LDRi12:
|
2011-06-30 04:26:39 +08:00
|
|
|
case ARM::tLDRspi:
|
2010-09-16 00:36:26 +08:00
|
|
|
case ARM::VLDRD:
|
|
|
|
case ARM::VLDRS:
|
|
|
|
if (MI->getOperand(1).isFI() &&
|
|
|
|
MI->getOperand(2).isImm() &&
|
|
|
|
MI->getOperand(2).getImm() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(1).getIndex();
|
2010-09-16 01:27:09 +08:00
|
|
|
return MI->getOperand(0).getReg();
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ARM::VLD1q64Pseudo:
|
|
|
|
if (MI->getOperand(1).isFI() &&
|
|
|
|
MI->getOperand(0).getSubReg() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(1).getIndex();
|
2010-09-16 05:40:11 +08:00
|
|
|
return MI->getOperand(0).getReg();
|
|
|
|
}
|
|
|
|
break;
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::VLDMQIA:
|
2010-09-16 05:40:11 +08:00
|
|
|
if (MI->getOperand(1).isFI() &&
|
|
|
|
MI->getOperand(0).getSubReg() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(1).getIndex();
|
2010-09-16 00:36:26 +08:00
|
|
|
return MI->getOperand(0).getReg();
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-08-09 05:45:32 +08:00
|
|
|
unsigned ARMBaseInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
|
|
|
|
int &FrameIndex) const {
|
|
|
|
const MachineMemOperand *Dummy;
|
2011-12-07 15:15:52 +08:00
|
|
|
return MI->mayLoad() && hasLoadFromStackSlot(MI, Dummy, FrameIndex);
|
2011-08-09 05:45:32 +08:00
|
|
|
}
|
|
|
|
|
2011-10-11 08:59:06 +08:00
|
|
|
bool ARMBaseInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const{
|
|
|
|
// This hook gets to expand COPY instructions before they become
|
|
|
|
// copyPhysReg() calls. Look for VMOVS instructions that can legally be
|
|
|
|
// widened to VMOVD. We prefer the VMOVD when possible because it may be
|
|
|
|
// changed into a VORR that can go down the NEON pipeline.
|
|
|
|
if (!WidenVMOVS || !MI->isCopy())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Look for a copy between even S-registers. That is where we keep floats
|
|
|
|
// when using NEON v2f32 instructions for f32 arithmetic.
|
|
|
|
unsigned DstRegS = MI->getOperand(0).getReg();
|
|
|
|
unsigned SrcRegS = MI->getOperand(1).getReg();
|
|
|
|
if (!ARM::SPRRegClass.contains(DstRegS, SrcRegS))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const TargetRegisterInfo *TRI = &getRegisterInfo();
|
|
|
|
unsigned DstRegD = TRI->getMatchingSuperReg(DstRegS, ARM::ssub_0,
|
|
|
|
&ARM::DPRRegClass);
|
|
|
|
unsigned SrcRegD = TRI->getMatchingSuperReg(SrcRegS, ARM::ssub_0,
|
|
|
|
&ARM::DPRRegClass);
|
|
|
|
if (!DstRegD || !SrcRegD)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// We want to widen this into a DstRegD = VMOVD SrcRegD copy. This is only
|
|
|
|
// legal if the COPY already defines the full DstRegD, and it isn't a
|
|
|
|
// sub-register insertion.
|
|
|
|
if (!MI->definesRegister(DstRegD, TRI) || MI->readsRegister(DstRegD, TRI))
|
|
|
|
return false;
|
|
|
|
|
2011-10-12 08:06:23 +08:00
|
|
|
// A dead copy shouldn't show up here, but reject it just in case.
|
|
|
|
if (MI->getOperand(0).isDead())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// All clear, widen the COPY.
|
2011-10-11 08:59:06 +08:00
|
|
|
DEBUG(dbgs() << "widening: " << *MI);
|
2011-10-12 08:06:23 +08:00
|
|
|
|
|
|
|
// Get rid of the old <imp-def> of DstRegD. Leave it if it defines a Q-reg
|
|
|
|
// or some other super-register.
|
|
|
|
int ImpDefIdx = MI->findRegisterDefOperandIdx(DstRegD);
|
|
|
|
if (ImpDefIdx != -1)
|
|
|
|
MI->RemoveOperand(ImpDefIdx);
|
|
|
|
|
|
|
|
// Change the opcode and operands.
|
2011-10-11 08:59:06 +08:00
|
|
|
MI->setDesc(get(ARM::VMOVD));
|
|
|
|
MI->getOperand(0).setReg(DstRegD);
|
|
|
|
MI->getOperand(1).setReg(SrcRegD);
|
|
|
|
AddDefaultPred(MachineInstrBuilder(MI));
|
2011-10-12 08:06:23 +08:00
|
|
|
|
|
|
|
// We are now reading SrcRegD instead of SrcRegS. This may upset the
|
|
|
|
// register scavenger and machine verifier, so we need to indicate that we
|
|
|
|
// are reading an undefined value from SrcRegD, but a proper value from
|
|
|
|
// SrcRegS.
|
|
|
|
MI->getOperand(1).setIsUndef();
|
|
|
|
MachineInstrBuilder(MI).addReg(SrcRegS, RegState::Implicit);
|
|
|
|
|
|
|
|
// SrcRegD may actually contain an unrelated value in the ssub_1
|
|
|
|
// sub-register. Don't kill it. Only kill the ssub_0 sub-register.
|
|
|
|
if (MI->getOperand(1).isKill()) {
|
|
|
|
MI->getOperand(1).setIsKill(false);
|
|
|
|
MI->addRegisterKilled(SrcRegS, TRI, true);
|
|
|
|
}
|
|
|
|
|
2011-10-11 08:59:06 +08:00
|
|
|
DEBUG(dbgs() << "replaced by: " << *MI);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-04-26 15:39:25 +08:00
|
|
|
MachineInstr*
|
|
|
|
ARMBaseInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
|
2010-04-29 09:13:30 +08:00
|
|
|
int FrameIx, uint64_t Offset,
|
2010-04-26 15:39:25 +08:00
|
|
|
const MDNode *MDPtr,
|
|
|
|
DebugLoc DL) const {
|
|
|
|
MachineInstrBuilder MIB = BuildMI(MF, DL, get(ARM::DBG_VALUE))
|
|
|
|
.addFrameIndex(FrameIx).addImm(0).addImm(Offset).addMetadata(MDPtr);
|
|
|
|
return &*MIB;
|
|
|
|
}
|
|
|
|
|
2010-01-07 07:47:07 +08:00
|
|
|
/// Create a copy of a const pool value. Update CPI to the new index and return
|
|
|
|
/// the label UID.
|
|
|
|
static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) {
|
|
|
|
MachineConstantPool *MCP = MF.getConstantPool();
|
|
|
|
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
|
|
|
|
|
|
|
|
const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPI];
|
|
|
|
assert(MCPE.isMachineConstantPoolEntry() &&
|
|
|
|
"Expecting a machine constantpool entry!");
|
|
|
|
ARMConstantPoolValue *ACPV =
|
|
|
|
static_cast<ARMConstantPoolValue*>(MCPE.Val.MachineCPVal);
|
|
|
|
|
2011-01-17 16:03:18 +08:00
|
|
|
unsigned PCLabelId = AFI->createPICLabelUId();
|
2010-01-07 07:47:07 +08:00
|
|
|
ARMConstantPoolValue *NewCPV = 0;
|
2010-09-11 05:38:22 +08:00
|
|
|
// FIXME: The below assumes PIC relocation model and that the function
|
|
|
|
// is Thumb mode (t1 or t2). PCAdjustment would be 8 for ARM mode PIC, and
|
|
|
|
// zero for non-PIC in ARM or Thumb. The callers are all of thumb LDR
|
|
|
|
// instructions, so that's probably OK, but is PIC always correct when
|
|
|
|
// we get here?
|
2010-01-07 07:47:07 +08:00
|
|
|
if (ACPV->isGlobalValue())
|
2011-10-01 16:00:54 +08:00
|
|
|
NewCPV = ARMConstantPoolConstant::
|
|
|
|
Create(cast<ARMConstantPoolConstant>(ACPV)->getGV(), PCLabelId,
|
|
|
|
ARMCP::CPValue, 4);
|
2010-01-07 07:47:07 +08:00
|
|
|
else if (ACPV->isExtSymbol())
|
2011-10-01 16:58:29 +08:00
|
|
|
NewCPV = ARMConstantPoolSymbol::
|
|
|
|
Create(MF.getFunction()->getContext(),
|
|
|
|
cast<ARMConstantPoolSymbol>(ACPV)->getSymbol(), PCLabelId, 4);
|
2010-01-07 07:47:07 +08:00
|
|
|
else if (ACPV->isBlockAddress())
|
2011-10-01 16:00:54 +08:00
|
|
|
NewCPV = ARMConstantPoolConstant::
|
|
|
|
Create(cast<ARMConstantPoolConstant>(ACPV)->getBlockAddress(), PCLabelId,
|
|
|
|
ARMCP::CPBlockAddress, 4);
|
2010-09-11 05:38:22 +08:00
|
|
|
else if (ACPV->isLSDA())
|
2011-10-01 16:00:54 +08:00
|
|
|
NewCPV = ARMConstantPoolConstant::Create(MF.getFunction(), PCLabelId,
|
|
|
|
ARMCP::CPLSDA, 4);
|
2011-09-30 07:50:42 +08:00
|
|
|
else if (ACPV->isMachineBasicBlock())
|
2011-10-01 17:30:42 +08:00
|
|
|
NewCPV = ARMConstantPoolMBB::
|
|
|
|
Create(MF.getFunction()->getContext(),
|
|
|
|
cast<ARMConstantPoolMBB>(ACPV)->getMBB(), PCLabelId, 4);
|
2010-01-07 07:47:07 +08:00
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected ARM constantpool value type!!");
|
|
|
|
CPI = MCP->getConstantPoolIndex(NewCPV, MCPE.getAlignment());
|
|
|
|
return PCLabelId;
|
|
|
|
}
|
|
|
|
|
2009-11-08 08:15:23 +08:00
|
|
|
void ARMBaseInstrInfo::
|
|
|
|
reMaterialize(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator I,
|
|
|
|
unsigned DestReg, unsigned SubIdx,
|
2009-11-14 10:55:43 +08:00
|
|
|
const MachineInstr *Orig,
|
2010-06-03 06:47:25 +08:00
|
|
|
const TargetRegisterInfo &TRI) const {
|
2009-11-08 08:15:23 +08:00
|
|
|
unsigned Opcode = Orig->getOpcode();
|
|
|
|
switch (Opcode) {
|
|
|
|
default: {
|
|
|
|
MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
|
2010-06-03 06:47:25 +08:00
|
|
|
MI->substituteRegister(Orig->getOperand(0).getReg(), DestReg, SubIdx, TRI);
|
2009-11-08 08:15:23 +08:00
|
|
|
MBB.insert(I, MI);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case ARM::tLDRpci_pic:
|
|
|
|
case ARM::t2LDRpci_pic: {
|
|
|
|
MachineFunction &MF = *MBB.getParent();
|
|
|
|
unsigned CPI = Orig->getOperand(1).getIndex();
|
2010-01-07 07:47:07 +08:00
|
|
|
unsigned PCLabelId = duplicateCPV(MF, CPI);
|
2009-11-08 08:15:23 +08:00
|
|
|
MachineInstrBuilder MIB = BuildMI(MBB, I, Orig->getDebugLoc(), get(Opcode),
|
|
|
|
DestReg)
|
|
|
|
.addConstantPoolIndex(CPI).addImm(PCLabelId);
|
2011-04-29 13:24:29 +08:00
|
|
|
MIB->setMemRefs(Orig->memoperands_begin(), Orig->memoperands_end());
|
2009-11-08 08:15:23 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-01-07 07:47:07 +08:00
|
|
|
MachineInstr *
|
|
|
|
ARMBaseInstrInfo::duplicate(MachineInstr *Orig, MachineFunction &MF) const {
|
|
|
|
MachineInstr *MI = TargetInstrInfoImpl::duplicate(Orig, MF);
|
|
|
|
switch(Orig->getOpcode()) {
|
|
|
|
case ARM::tLDRpci_pic:
|
|
|
|
case ARM::t2LDRpci_pic: {
|
|
|
|
unsigned CPI = Orig->getOperand(1).getIndex();
|
|
|
|
unsigned PCLabelId = duplicateCPV(MF, CPI);
|
|
|
|
Orig->getOperand(1).setIndex(CPI);
|
|
|
|
Orig->getOperand(2).setImm(PCLabelId);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return MI;
|
|
|
|
}
|
|
|
|
|
2010-03-03 09:44:33 +08:00
|
|
|
bool ARMBaseInstrInfo::produceSameValue(const MachineInstr *MI0,
|
2011-01-20 16:34:58 +08:00
|
|
|
const MachineInstr *MI1,
|
|
|
|
const MachineRegisterInfo *MRI) const {
|
2009-11-07 12:04:34 +08:00
|
|
|
int Opcode = MI0->getOpcode();
|
2011-01-21 07:55:07 +08:00
|
|
|
if (Opcode == ARM::t2LDRpci ||
|
2009-11-20 10:10:27 +08:00
|
|
|
Opcode == ARM::t2LDRpci_pic ||
|
|
|
|
Opcode == ARM::tLDRpci ||
|
2011-01-20 16:34:58 +08:00
|
|
|
Opcode == ARM::tLDRpci_pic ||
|
2011-01-22 02:55:51 +08:00
|
|
|
Opcode == ARM::MOV_ga_dyn ||
|
|
|
|
Opcode == ARM::MOV_ga_pcrel ||
|
|
|
|
Opcode == ARM::MOV_ga_pcrel_ldr ||
|
|
|
|
Opcode == ARM::t2MOV_ga_dyn ||
|
|
|
|
Opcode == ARM::t2MOV_ga_pcrel) {
|
2009-11-07 12:04:34 +08:00
|
|
|
if (MI1->getOpcode() != Opcode)
|
|
|
|
return false;
|
|
|
|
if (MI0->getNumOperands() != MI1->getNumOperands())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const MachineOperand &MO0 = MI0->getOperand(1);
|
|
|
|
const MachineOperand &MO1 = MI1->getOperand(1);
|
|
|
|
if (MO0.getOffset() != MO1.getOffset())
|
|
|
|
return false;
|
|
|
|
|
2011-01-22 02:55:51 +08:00
|
|
|
if (Opcode == ARM::MOV_ga_dyn ||
|
|
|
|
Opcode == ARM::MOV_ga_pcrel ||
|
|
|
|
Opcode == ARM::MOV_ga_pcrel_ldr ||
|
|
|
|
Opcode == ARM::t2MOV_ga_dyn ||
|
|
|
|
Opcode == ARM::t2MOV_ga_pcrel)
|
2011-01-20 16:34:58 +08:00
|
|
|
// Ignore the PC labels.
|
|
|
|
return MO0.getGlobal() == MO1.getGlobal();
|
|
|
|
|
2009-11-07 12:04:34 +08:00
|
|
|
const MachineFunction *MF = MI0->getParent()->getParent();
|
|
|
|
const MachineConstantPool *MCP = MF->getConstantPool();
|
|
|
|
int CPI0 = MO0.getIndex();
|
|
|
|
int CPI1 = MO1.getIndex();
|
|
|
|
const MachineConstantPoolEntry &MCPE0 = MCP->getConstants()[CPI0];
|
|
|
|
const MachineConstantPoolEntry &MCPE1 = MCP->getConstants()[CPI1];
|
2011-03-24 14:20:03 +08:00
|
|
|
bool isARMCP0 = MCPE0.isMachineConstantPoolEntry();
|
|
|
|
bool isARMCP1 = MCPE1.isMachineConstantPoolEntry();
|
|
|
|
if (isARMCP0 && isARMCP1) {
|
|
|
|
ARMConstantPoolValue *ACPV0 =
|
|
|
|
static_cast<ARMConstantPoolValue*>(MCPE0.Val.MachineCPVal);
|
|
|
|
ARMConstantPoolValue *ACPV1 =
|
|
|
|
static_cast<ARMConstantPoolValue*>(MCPE1.Val.MachineCPVal);
|
|
|
|
return ACPV0->hasSameValue(ACPV1);
|
|
|
|
} else if (!isARMCP0 && !isARMCP1) {
|
|
|
|
return MCPE0.Val.ConstVal == MCPE1.Val.ConstVal;
|
|
|
|
}
|
|
|
|
return false;
|
2011-01-20 16:34:58 +08:00
|
|
|
} else if (Opcode == ARM::PICLDR) {
|
|
|
|
if (MI1->getOpcode() != Opcode)
|
|
|
|
return false;
|
|
|
|
if (MI0->getNumOperands() != MI1->getNumOperands())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned Addr0 = MI0->getOperand(1).getReg();
|
|
|
|
unsigned Addr1 = MI1->getOperand(1).getReg();
|
|
|
|
if (Addr0 != Addr1) {
|
|
|
|
if (!MRI ||
|
|
|
|
!TargetRegisterInfo::isVirtualRegister(Addr0) ||
|
|
|
|
!TargetRegisterInfo::isVirtualRegister(Addr1))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// This assumes SSA form.
|
|
|
|
MachineInstr *Def0 = MRI->getVRegDef(Addr0);
|
|
|
|
MachineInstr *Def1 = MRI->getVRegDef(Addr1);
|
|
|
|
// Check if the loaded value, e.g. a constantpool of a global address, are
|
|
|
|
// the same.
|
|
|
|
if (!produceSameValue(Def0, Def1, MRI))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned i = 3, e = MI0->getNumOperands(); i != e; ++i) {
|
|
|
|
// %vreg12<def> = PICLDR %vreg11, 0, pred:14, pred:%noreg
|
|
|
|
const MachineOperand &MO0 = MI0->getOperand(i);
|
|
|
|
const MachineOperand &MO1 = MI1->getOperand(i);
|
|
|
|
if (!MO0.isIdenticalTo(MO1))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
2009-11-07 12:04:34 +08:00
|
|
|
}
|
|
|
|
|
2010-03-03 09:44:33 +08:00
|
|
|
return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
|
2009-11-07 12:04:34 +08:00
|
|
|
}
|
|
|
|
|
2010-06-24 07:00:16 +08:00
|
|
|
/// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to
|
|
|
|
/// determine if two loads are loading from the same base address. It should
|
|
|
|
/// only return true if the base pointers are the same and the only differences
|
|
|
|
/// between the two addresses is the offset. It also returns the offsets by
|
|
|
|
/// reference.
|
|
|
|
bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
|
|
|
|
int64_t &Offset1,
|
|
|
|
int64_t &Offset2) const {
|
|
|
|
// Don't worry about Thumb: just ARM and Thumb2.
|
|
|
|
if (Subtarget.isThumb1Only()) return false;
|
|
|
|
|
|
|
|
if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
switch (Load1->getMachineOpcode()) {
|
|
|
|
default:
|
|
|
|
return false;
|
2010-10-27 06:37:02 +08:00
|
|
|
case ARM::LDRi12:
|
2010-10-27 08:19:44 +08:00
|
|
|
case ARM::LDRBi12:
|
2010-06-24 07:00:16 +08:00
|
|
|
case ARM::LDRD:
|
|
|
|
case ARM::LDRH:
|
|
|
|
case ARM::LDRSB:
|
|
|
|
case ARM::LDRSH:
|
|
|
|
case ARM::VLDRD:
|
|
|
|
case ARM::VLDRS:
|
|
|
|
case ARM::t2LDRi8:
|
|
|
|
case ARM::t2LDRDi8:
|
|
|
|
case ARM::t2LDRSHi8:
|
|
|
|
case ARM::t2LDRi12:
|
|
|
|
case ARM::t2LDRSHi12:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (Load2->getMachineOpcode()) {
|
|
|
|
default:
|
|
|
|
return false;
|
2010-10-27 06:37:02 +08:00
|
|
|
case ARM::LDRi12:
|
2010-10-27 08:19:44 +08:00
|
|
|
case ARM::LDRBi12:
|
2010-06-24 07:00:16 +08:00
|
|
|
case ARM::LDRD:
|
|
|
|
case ARM::LDRH:
|
|
|
|
case ARM::LDRSB:
|
|
|
|
case ARM::LDRSH:
|
|
|
|
case ARM::VLDRD:
|
|
|
|
case ARM::VLDRS:
|
|
|
|
case ARM::t2LDRi8:
|
|
|
|
case ARM::t2LDRDi8:
|
|
|
|
case ARM::t2LDRSHi8:
|
|
|
|
case ARM::t2LDRi12:
|
|
|
|
case ARM::t2LDRSHi12:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if base addresses and chain operands match.
|
|
|
|
if (Load1->getOperand(0) != Load2->getOperand(0) ||
|
|
|
|
Load1->getOperand(4) != Load2->getOperand(4))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Index should be Reg0.
|
|
|
|
if (Load1->getOperand(3) != Load2->getOperand(3))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Determine the offsets.
|
|
|
|
if (isa<ConstantSDNode>(Load1->getOperand(1)) &&
|
|
|
|
isa<ConstantSDNode>(Load2->getOperand(1))) {
|
|
|
|
Offset1 = cast<ConstantSDNode>(Load1->getOperand(1))->getSExtValue();
|
|
|
|
Offset2 = cast<ConstantSDNode>(Load2->getOperand(1))->getSExtValue();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
|
2011-04-15 13:18:47 +08:00
|
|
|
/// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should
|
2010-06-24 07:00:16 +08:00
|
|
|
/// be scheduled togther. On some targets if two loads are loading from
|
|
|
|
/// addresses in the same cache line, it's better if they are scheduled
|
|
|
|
/// together. This function takes two integers that represent the load offsets
|
|
|
|
/// from the common base address. It returns true if it decides it's desirable
|
|
|
|
/// to schedule the two loads together. "NumLoads" is the number of loads that
|
|
|
|
/// have already been scheduled after Load1.
|
|
|
|
bool ARMBaseInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
|
|
|
|
int64_t Offset1, int64_t Offset2,
|
|
|
|
unsigned NumLoads) const {
|
|
|
|
// Don't worry about Thumb: just ARM and Thumb2.
|
|
|
|
if (Subtarget.isThumb1Only()) return false;
|
|
|
|
|
|
|
|
assert(Offset2 > Offset1);
|
|
|
|
|
|
|
|
if ((Offset2 - Offset1) / 8 > 64)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (Load1->getMachineOpcode() != Load2->getMachineOpcode())
|
|
|
|
return false; // FIXME: overly conservative?
|
|
|
|
|
|
|
|
// Four loads in a row should be sufficient.
|
|
|
|
if (NumLoads >= 3)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-06-19 07:09:54 +08:00
|
|
|
bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
|
|
|
|
const MachineBasicBlock *MBB,
|
|
|
|
const MachineFunction &MF) const {
|
2010-06-26 02:43:14 +08:00
|
|
|
// Debug info is never a scheduling boundary. It's necessary to be explicit
|
|
|
|
// due to the special treatment of IT instructions below, otherwise a
|
|
|
|
// dbg_value followed by an IT will result in the IT instruction being
|
|
|
|
// considered a scheduling hazard, which is wrong. It should be the actual
|
|
|
|
// instruction preceding the dbg_value instruction(s), just like it is
|
|
|
|
// when debug info is not present.
|
|
|
|
if (MI->isDebugValue())
|
|
|
|
return false;
|
|
|
|
|
2010-06-19 07:09:54 +08:00
|
|
|
// Terminators and labels can't be scheduled around.
|
2011-12-07 15:15:52 +08:00
|
|
|
if (MI->isTerminator() || MI->isLabel())
|
2010-06-19 07:09:54 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
// Treat the start of the IT block as a scheduling boundary, but schedule
|
|
|
|
// t2IT along with all instructions following it.
|
|
|
|
// FIXME: This is a big hammer. But the alternative is to add all potential
|
|
|
|
// true and anti dependencies to IT block instructions as implicit operands
|
|
|
|
// to the t2IT instruction. The added compile time and complexity does not
|
|
|
|
// seem worth it.
|
|
|
|
MachineBasicBlock::const_iterator I = MI;
|
2010-06-26 02:43:14 +08:00
|
|
|
// Make sure to skip any dbg_value instructions
|
|
|
|
while (++I != MBB->end() && I->isDebugValue())
|
|
|
|
;
|
|
|
|
if (I != MBB->end() && I->getOpcode() == ARM::t2IT)
|
2010-06-19 07:09:54 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
// Don't attempt to schedule around any instruction that defines
|
|
|
|
// a stack-oriented pointer, as it's unlikely to be profitable. This
|
|
|
|
// saves compile time, because it doesn't require every single
|
|
|
|
// stack slot reference to depend on the instruction that does the
|
|
|
|
// modification.
|
|
|
|
if (MI->definesRegister(ARM::SP))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2011-07-10 10:58:07 +08:00
|
|
|
bool ARMBaseInstrInfo::
|
|
|
|
isProfitableToIfCvt(MachineBasicBlock &MBB,
|
|
|
|
unsigned NumCycles, unsigned ExtraPredCycles,
|
|
|
|
const BranchProbability &Probability) const {
|
2011-04-13 14:39:16 +08:00
|
|
|
if (!NumCycles)
|
2010-06-26 06:42:03 +08:00
|
|
|
return false;
|
2010-10-05 14:00:33 +08:00
|
|
|
|
2010-09-29 02:32:13 +08:00
|
|
|
// Attempt to estimate the relative costs of predication versus branching.
|
2011-07-10 10:58:07 +08:00
|
|
|
unsigned UnpredCost = Probability.getNumerator() * NumCycles;
|
|
|
|
UnpredCost /= Probability.getDenominator();
|
|
|
|
UnpredCost += 1; // The branch itself
|
|
|
|
UnpredCost += Subtarget.getMispredictionPenalty() / 10;
|
2010-10-05 14:00:33 +08:00
|
|
|
|
2011-07-10 10:58:07 +08:00
|
|
|
return (NumCycles + ExtraPredCycles) <= UnpredCost;
|
2010-06-26 06:42:03 +08:00
|
|
|
}
|
2010-10-05 14:00:33 +08:00
|
|
|
|
2010-06-26 06:42:03 +08:00
|
|
|
bool ARMBaseInstrInfo::
|
2010-11-03 08:45:17 +08:00
|
|
|
isProfitableToIfCvt(MachineBasicBlock &TMBB,
|
|
|
|
unsigned TCycles, unsigned TExtra,
|
|
|
|
MachineBasicBlock &FMBB,
|
|
|
|
unsigned FCycles, unsigned FExtra,
|
2011-07-10 10:58:07 +08:00
|
|
|
const BranchProbability &Probability) const {
|
2010-11-03 08:45:17 +08:00
|
|
|
if (!TCycles || !FCycles)
|
2010-09-29 02:32:13 +08:00
|
|
|
return false;
|
2010-10-05 14:00:33 +08:00
|
|
|
|
2010-09-29 02:32:13 +08:00
|
|
|
// Attempt to estimate the relative costs of predication versus branching.
|
2011-07-10 10:58:07 +08:00
|
|
|
unsigned TUnpredCost = Probability.getNumerator() * TCycles;
|
|
|
|
TUnpredCost /= Probability.getDenominator();
|
2011-09-21 10:17:37 +08:00
|
|
|
|
2011-07-10 10:58:07 +08:00
|
|
|
uint32_t Comp = Probability.getDenominator() - Probability.getNumerator();
|
|
|
|
unsigned FUnpredCost = Comp * FCycles;
|
|
|
|
FUnpredCost /= Probability.getDenominator();
|
|
|
|
|
|
|
|
unsigned UnpredCost = TUnpredCost + FUnpredCost;
|
|
|
|
UnpredCost += 1; // The branch itself
|
|
|
|
UnpredCost += Subtarget.getMispredictionPenalty() / 10;
|
|
|
|
|
|
|
|
return (TCycles + FCycles + TExtra + FExtra) <= UnpredCost;
|
2010-06-26 06:42:03 +08:00
|
|
|
}
|
|
|
|
|
2009-08-08 11:20:32 +08:00
|
|
|
/// getInstrPredicate - If instruction is predicated, returns its predicate
|
|
|
|
/// condition, otherwise returns AL. It also returns the condition code
|
|
|
|
/// register by reference.
|
2009-09-28 17:14:39 +08:00
|
|
|
ARMCC::CondCodes
|
|
|
|
llvm::getInstrPredicate(const MachineInstr *MI, unsigned &PredReg) {
|
2009-08-08 11:20:32 +08:00
|
|
|
int PIdx = MI->findFirstPredOperandIdx();
|
|
|
|
if (PIdx == -1) {
|
|
|
|
PredReg = 0;
|
|
|
|
return ARMCC::AL;
|
|
|
|
}
|
|
|
|
|
|
|
|
PredReg = MI->getOperand(PIdx+1).getReg();
|
|
|
|
return (ARMCC::CondCodes)MI->getOperand(PIdx).getImm();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-07-28 13:48:47 +08:00
|
|
|
int llvm::getMatchingCondBranchOpcode(int Opc) {
|
2009-07-28 02:20:05 +08:00
|
|
|
if (Opc == ARM::B)
|
|
|
|
return ARM::Bcc;
|
2012-01-21 05:51:11 +08:00
|
|
|
if (Opc == ARM::tB)
|
2009-07-28 02:20:05 +08:00
|
|
|
return ARM::tBcc;
|
2012-01-21 05:51:11 +08:00
|
|
|
if (Opc == ARM::t2B)
|
|
|
|
return ARM::t2Bcc;
|
2009-07-28 02:20:05 +08:00
|
|
|
|
|
|
|
llvm_unreachable("Unknown unconditional branch opcode!");
|
|
|
|
}
|
|
|
|
|
2009-07-28 13:48:47 +08:00
|
|
|
|
2011-09-21 10:20:46 +08:00
|
|
|
/// Map pseudo instructions that imply an 'S' bit onto real opcodes. Whether the
|
|
|
|
/// instruction is encoded with an 'S' bit is determined by the optional CPSR
|
|
|
|
/// def operand.
|
|
|
|
///
|
|
|
|
/// This will go away once we can teach tblgen how to set the optional CPSR def
|
|
|
|
/// operand itself.
|
|
|
|
struct AddSubFlagsOpcodePair {
|
|
|
|
unsigned PseudoOpc;
|
|
|
|
unsigned MachineOpc;
|
|
|
|
};
|
|
|
|
|
|
|
|
static AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[] = {
|
|
|
|
{ARM::ADDSri, ARM::ADDri},
|
|
|
|
{ARM::ADDSrr, ARM::ADDrr},
|
|
|
|
{ARM::ADDSrsi, ARM::ADDrsi},
|
|
|
|
{ARM::ADDSrsr, ARM::ADDrsr},
|
|
|
|
|
|
|
|
{ARM::SUBSri, ARM::SUBri},
|
|
|
|
{ARM::SUBSrr, ARM::SUBrr},
|
|
|
|
{ARM::SUBSrsi, ARM::SUBrsi},
|
|
|
|
{ARM::SUBSrsr, ARM::SUBrsr},
|
|
|
|
|
|
|
|
{ARM::RSBSri, ARM::RSBri},
|
|
|
|
{ARM::RSBSrsi, ARM::RSBrsi},
|
|
|
|
{ARM::RSBSrsr, ARM::RSBrsr},
|
|
|
|
|
|
|
|
{ARM::t2ADDSri, ARM::t2ADDri},
|
|
|
|
{ARM::t2ADDSrr, ARM::t2ADDrr},
|
|
|
|
{ARM::t2ADDSrs, ARM::t2ADDrs},
|
|
|
|
|
|
|
|
{ARM::t2SUBSri, ARM::t2SUBri},
|
|
|
|
{ARM::t2SUBSrr, ARM::t2SUBrr},
|
|
|
|
{ARM::t2SUBSrs, ARM::t2SUBrs},
|
|
|
|
|
|
|
|
{ARM::t2RSBSri, ARM::t2RSBri},
|
|
|
|
{ARM::t2RSBSrs, ARM::t2RSBrs},
|
|
|
|
};
|
|
|
|
|
|
|
|
unsigned llvm::convertAddSubFlagsOpcode(unsigned OldOpc) {
|
|
|
|
static const int NPairs =
|
|
|
|
sizeof(AddSubFlagsOpcodeMap) / sizeof(AddSubFlagsOpcodePair);
|
|
|
|
for (AddSubFlagsOpcodePair *OpcPair = &AddSubFlagsOpcodeMap[0],
|
|
|
|
*End = &AddSubFlagsOpcodeMap[NPairs]; OpcPair != End; ++OpcPair) {
|
|
|
|
if (OldOpc == OpcPair->PseudoOpc) {
|
|
|
|
return OpcPair->MachineOpc;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-07-28 13:48:47 +08:00
|
|
|
void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator &MBBI, DebugLoc dl,
|
|
|
|
unsigned DestReg, unsigned BaseReg, int NumBytes,
|
|
|
|
ARMCC::CondCodes Pred, unsigned PredReg,
|
2011-03-06 02:43:32 +08:00
|
|
|
const ARMBaseInstrInfo &TII, unsigned MIFlags) {
|
2009-07-28 13:48:47 +08:00
|
|
|
bool isSub = NumBytes < 0;
|
|
|
|
if (isSub) NumBytes = -NumBytes;
|
|
|
|
|
|
|
|
while (NumBytes) {
|
|
|
|
unsigned RotAmt = ARM_AM::getSOImmValRotate(NumBytes);
|
|
|
|
unsigned ThisVal = NumBytes & ARM_AM::rotr32(0xFF, RotAmt);
|
|
|
|
assert(ThisVal && "Didn't extract field correctly");
|
|
|
|
|
|
|
|
// We will handle these bits from offset, clear them.
|
|
|
|
NumBytes &= ~ThisVal;
|
|
|
|
|
|
|
|
assert(ARM_AM::getSOImmVal(ThisVal) != -1 && "Bit extraction didn't work?");
|
|
|
|
|
|
|
|
// Build the new ADD / SUB.
|
|
|
|
unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri;
|
|
|
|
BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
|
|
|
|
.addReg(BaseReg, RegState::Kill).addImm(ThisVal)
|
2011-03-06 02:43:32 +08:00
|
|
|
.addImm((unsigned)Pred).addReg(PredReg).addReg(0)
|
|
|
|
.setMIFlags(MIFlags);
|
2009-07-28 13:48:47 +08:00
|
|
|
BaseReg = DestReg;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-08-27 09:23:50 +08:00
|
|
|
bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
|
|
|
|
unsigned FrameReg, int &Offset,
|
|
|
|
const ARMBaseInstrInfo &TII) {
|
2009-07-28 13:48:47 +08:00
|
|
|
unsigned Opcode = MI.getOpcode();
|
2011-06-29 03:10:37 +08:00
|
|
|
const MCInstrDesc &Desc = MI.getDesc();
|
2009-07-28 13:48:47 +08:00
|
|
|
unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
|
|
|
|
bool isSub = false;
|
2009-08-11 23:33:49 +08:00
|
|
|
|
2009-07-28 13:48:47 +08:00
|
|
|
// Memory operands in inline assembly always use AddrMode2.
|
|
|
|
if (Opcode == ARM::INLINEASM)
|
|
|
|
AddrMode = ARMII::AddrMode2;
|
2009-08-11 23:33:49 +08:00
|
|
|
|
2009-07-28 13:48:47 +08:00
|
|
|
if (Opcode == ARM::ADDri) {
|
|
|
|
Offset += MI.getOperand(FrameRegIdx+1).getImm();
|
|
|
|
if (Offset == 0) {
|
|
|
|
// Turn it into a move.
|
|
|
|
MI.setDesc(TII.get(ARM::MOVr));
|
|
|
|
MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
|
|
|
|
MI.RemoveOperand(FrameRegIdx+1);
|
2009-08-27 09:23:50 +08:00
|
|
|
Offset = 0;
|
|
|
|
return true;
|
2009-07-28 13:48:47 +08:00
|
|
|
} else if (Offset < 0) {
|
|
|
|
Offset = -Offset;
|
|
|
|
isSub = true;
|
|
|
|
MI.setDesc(TII.get(ARM::SUBri));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Common case: small offset, fits into instruction.
|
|
|
|
if (ARM_AM::getSOImmVal(Offset) != -1) {
|
|
|
|
// Replace the FrameIndex with sp / fp
|
|
|
|
MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
|
|
|
|
MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
|
2009-08-27 09:23:50 +08:00
|
|
|
Offset = 0;
|
|
|
|
return true;
|
2009-07-28 13:48:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, pull as much of the immedidate into this ADDri/SUBri
|
|
|
|
// as possible.
|
|
|
|
unsigned RotAmt = ARM_AM::getSOImmValRotate(Offset);
|
|
|
|
unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xFF, RotAmt);
|
|
|
|
|
|
|
|
// We will handle these bits from offset, clear them.
|
|
|
|
Offset &= ~ThisImmVal;
|
|
|
|
|
|
|
|
// Get the properly encoded SOImmVal field.
|
|
|
|
assert(ARM_AM::getSOImmVal(ThisImmVal) != -1 &&
|
|
|
|
"Bit extraction didn't work?");
|
|
|
|
MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
|
|
|
|
} else {
|
|
|
|
unsigned ImmIdx = 0;
|
|
|
|
int InstrOffs = 0;
|
|
|
|
unsigned NumBits = 0;
|
|
|
|
unsigned Scale = 1;
|
|
|
|
switch (AddrMode) {
|
2010-10-27 06:37:02 +08:00
|
|
|
case ARMII::AddrMode_i12: {
|
|
|
|
ImmIdx = FrameRegIdx + 1;
|
|
|
|
InstrOffs = MI.getOperand(ImmIdx).getImm();
|
|
|
|
NumBits = 12;
|
|
|
|
break;
|
|
|
|
}
|
2009-07-28 13:48:47 +08:00
|
|
|
case ARMII::AddrMode2: {
|
|
|
|
ImmIdx = FrameRegIdx+2;
|
|
|
|
InstrOffs = ARM_AM::getAM2Offset(MI.getOperand(ImmIdx).getImm());
|
|
|
|
if (ARM_AM::getAM2Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
|
|
|
|
InstrOffs *= -1;
|
|
|
|
NumBits = 12;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case ARMII::AddrMode3: {
|
|
|
|
ImmIdx = FrameRegIdx+2;
|
|
|
|
InstrOffs = ARM_AM::getAM3Offset(MI.getOperand(ImmIdx).getImm());
|
|
|
|
if (ARM_AM::getAM3Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
|
|
|
|
InstrOffs *= -1;
|
|
|
|
NumBits = 8;
|
|
|
|
break;
|
|
|
|
}
|
2009-08-08 21:35:48 +08:00
|
|
|
case ARMII::AddrMode4:
|
2009-11-16 05:45:34 +08:00
|
|
|
case ARMII::AddrMode6:
|
2009-08-27 09:23:50 +08:00
|
|
|
// Can't fold any offset even if it's zero.
|
|
|
|
return false;
|
2009-07-28 13:48:47 +08:00
|
|
|
case ARMII::AddrMode5: {
|
|
|
|
ImmIdx = FrameRegIdx+1;
|
|
|
|
InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm());
|
|
|
|
if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
|
|
|
|
InstrOffs *= -1;
|
|
|
|
NumBits = 8;
|
|
|
|
Scale = 4;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unsupported addressing mode!");
|
|
|
|
}
|
|
|
|
|
|
|
|
Offset += InstrOffs * Scale;
|
|
|
|
assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
|
|
|
|
if (Offset < 0) {
|
|
|
|
Offset = -Offset;
|
|
|
|
isSub = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt to fold address comp. if opcode has offset bits
|
|
|
|
if (NumBits > 0) {
|
|
|
|
// Common case: small offset, fits into instruction.
|
|
|
|
MachineOperand &ImmOp = MI.getOperand(ImmIdx);
|
|
|
|
int ImmedOffset = Offset / Scale;
|
|
|
|
unsigned Mask = (1 << NumBits) - 1;
|
|
|
|
if ((unsigned)Offset <= Mask * Scale) {
|
|
|
|
// Replace the FrameIndex with sp
|
|
|
|
MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
|
2010-10-27 09:19:41 +08:00
|
|
|
// FIXME: When addrmode2 goes away, this will simplify (like the
|
|
|
|
// T2 version), as the LDR.i12 versions don't need the encoding
|
|
|
|
// tricks for the offset value.
|
|
|
|
if (isSub) {
|
|
|
|
if (AddrMode == ARMII::AddrMode_i12)
|
|
|
|
ImmedOffset = -ImmedOffset;
|
|
|
|
else
|
|
|
|
ImmedOffset |= 1 << NumBits;
|
|
|
|
}
|
2009-07-28 13:48:47 +08:00
|
|
|
ImmOp.ChangeToImmediate(ImmedOffset);
|
2009-08-27 09:23:50 +08:00
|
|
|
Offset = 0;
|
|
|
|
return true;
|
2009-07-28 13:48:47 +08:00
|
|
|
}
|
2009-08-11 23:33:49 +08:00
|
|
|
|
2009-07-28 13:48:47 +08:00
|
|
|
// Otherwise, it didn't fit. Pull in what we can to simplify the immed.
|
|
|
|
ImmedOffset = ImmedOffset & Mask;
|
2010-10-28 00:50:31 +08:00
|
|
|
if (isSub) {
|
|
|
|
if (AddrMode == ARMII::AddrMode_i12)
|
|
|
|
ImmedOffset = -ImmedOffset;
|
|
|
|
else
|
|
|
|
ImmedOffset |= 1 << NumBits;
|
|
|
|
}
|
2009-07-28 13:48:47 +08:00
|
|
|
ImmOp.ChangeToImmediate(ImmedOffset);
|
|
|
|
Offset &= ~(Mask*Scale);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-08-27 09:23:50 +08:00
|
|
|
Offset = (isSub) ? -Offset : Offset;
|
|
|
|
return Offset == 0;
|
2009-07-28 13:48:47 +08:00
|
|
|
}
|
2010-08-06 09:32:48 +08:00
|
|
|
|
|
|
|
bool ARMBaseInstrInfo::
|
2010-09-28 12:18:29 +08:00
|
|
|
AnalyzeCompare(const MachineInstr *MI, unsigned &SrcReg, int &CmpMask,
|
|
|
|
int &CmpValue) const {
|
2010-08-06 09:32:48 +08:00
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: break;
|
2010-08-11 08:23:00 +08:00
|
|
|
case ARM::CMPri:
|
2010-08-06 09:32:48 +08:00
|
|
|
case ARM::t2CMPri:
|
|
|
|
SrcReg = MI->getOperand(0).getReg();
|
2010-09-21 20:01:15 +08:00
|
|
|
CmpMask = ~0;
|
2010-08-06 09:32:48 +08:00
|
|
|
CmpValue = MI->getOperand(1).getImm();
|
|
|
|
return true;
|
2010-09-21 20:01:15 +08:00
|
|
|
case ARM::TSTri:
|
|
|
|
case ARM::t2TSTri:
|
|
|
|
SrcReg = MI->getOperand(0).getReg();
|
|
|
|
CmpMask = MI->getOperand(1).getImm();
|
|
|
|
CmpValue = 0;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-09-29 18:12:08 +08:00
|
|
|
/// isSuitableForMask - Identify a suitable 'and' instruction that
|
|
|
|
/// operates on the given source register and applies the same mask
|
|
|
|
/// as a 'tst' instruction. Provide a limited look-through for copies.
|
|
|
|
/// When successful, MI will hold the found instruction.
|
|
|
|
static bool isSuitableForMask(MachineInstr *&MI, unsigned SrcReg,
|
2010-09-21 21:30:57 +08:00
|
|
|
int CmpMask, bool CommonUse) {
|
2010-09-29 18:12:08 +08:00
|
|
|
switch (MI->getOpcode()) {
|
2010-09-21 20:01:15 +08:00
|
|
|
case ARM::ANDri:
|
|
|
|
case ARM::t2ANDri:
|
2010-09-29 18:12:08 +08:00
|
|
|
if (CmpMask != MI->getOperand(2).getImm())
|
2010-09-21 21:30:57 +08:00
|
|
|
return false;
|
2010-09-29 18:12:08 +08:00
|
|
|
if (SrcReg == MI->getOperand(CommonUse ? 1 : 0).getReg())
|
2010-09-21 20:01:15 +08:00
|
|
|
return true;
|
|
|
|
break;
|
2010-09-29 18:12:08 +08:00
|
|
|
case ARM::COPY: {
|
|
|
|
// Walk down one instruction which is potentially an 'and'.
|
|
|
|
const MachineInstr &Copy = *MI;
|
2010-10-05 14:00:43 +08:00
|
|
|
MachineBasicBlock::iterator AND(
|
|
|
|
llvm::next(MachineBasicBlock::iterator(MI)));
|
2010-09-29 18:12:08 +08:00
|
|
|
if (AND == MI->getParent()->end()) return false;
|
|
|
|
MI = AND;
|
|
|
|
return isSuitableForMask(MI, Copy.getOperand(0).getReg(),
|
|
|
|
CmpMask, true);
|
|
|
|
}
|
2010-08-06 09:32:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-09-11 08:13:50 +08:00
|
|
|
/// OptimizeCompareInstr - Convert the instruction supplying the argument to the
|
2010-11-16 05:20:45 +08:00
|
|
|
/// comparison into one that sets the zero bit in the flags register.
|
2010-08-06 09:32:48 +08:00
|
|
|
bool ARMBaseInstrInfo::
|
2010-09-21 20:01:15 +08:00
|
|
|
OptimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, int CmpMask,
|
2010-11-16 05:20:45 +08:00
|
|
|
int CmpValue, const MachineRegisterInfo *MRI) const {
|
2010-09-11 07:46:12 +08:00
|
|
|
if (CmpValue != 0)
|
2010-09-11 07:34:19 +08:00
|
|
|
return false;
|
|
|
|
|
2010-10-19 05:22:31 +08:00
|
|
|
MachineRegisterInfo::def_iterator DI = MRI->def_begin(SrcReg);
|
|
|
|
if (llvm::next(DI) != MRI->def_end())
|
2010-09-11 07:34:19 +08:00
|
|
|
// Only support one definition.
|
|
|
|
return false;
|
|
|
|
|
|
|
|
MachineInstr *MI = &*DI;
|
|
|
|
|
2010-09-21 20:01:15 +08:00
|
|
|
// Masked compares sometimes use the same register as the corresponding 'and'.
|
|
|
|
if (CmpMask != ~0) {
|
2010-09-29 18:12:08 +08:00
|
|
|
if (!isSuitableForMask(MI, SrcReg, CmpMask, false)) {
|
2010-09-21 20:01:15 +08:00
|
|
|
MI = 0;
|
2010-10-19 05:22:31 +08:00
|
|
|
for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(SrcReg),
|
|
|
|
UE = MRI->use_end(); UI != UE; ++UI) {
|
2010-09-21 20:01:15 +08:00
|
|
|
if (UI->getParent() != CmpInstr->getParent()) continue;
|
2010-09-29 18:12:08 +08:00
|
|
|
MachineInstr *PotentialAND = &*UI;
|
2010-09-21 21:30:57 +08:00
|
|
|
if (!isSuitableForMask(PotentialAND, SrcReg, CmpMask, true))
|
2010-09-21 20:01:15 +08:00
|
|
|
continue;
|
2010-09-29 18:12:08 +08:00
|
|
|
MI = PotentialAND;
|
2010-09-21 20:01:15 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!MI) return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-06 09:32:48 +08:00
|
|
|
// Conservatively refuse to convert an instruction which isn't in the same BB
|
|
|
|
// as the comparison.
|
|
|
|
if (MI->getParent() != CmpInstr->getParent())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check that CPSR isn't set between the comparison instruction and the one we
|
|
|
|
// want to change.
|
2011-12-07 06:12:01 +08:00
|
|
|
MachineBasicBlock::iterator I = CmpInstr,E = MI, B = MI->getParent()->begin();
|
2010-10-09 08:03:48 +08:00
|
|
|
|
|
|
|
// Early exit if CmpInstr is at the beginning of the BB.
|
|
|
|
if (I == B) return false;
|
|
|
|
|
2010-08-06 09:32:48 +08:00
|
|
|
--I;
|
|
|
|
for (; I != E; --I) {
|
|
|
|
const MachineInstr &Instr = *I;
|
|
|
|
|
|
|
|
for (unsigned IO = 0, EO = Instr.getNumOperands(); IO != EO; ++IO) {
|
|
|
|
const MachineOperand &MO = Instr.getOperand(IO);
|
2012-02-18 03:23:15 +08:00
|
|
|
if (MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR))
|
|
|
|
return false;
|
When we look at instructions to convert to setting the 's' flag, we need to look
at more than those which define CPSR. You can have this situation:
(1) subs ...
(2) sub r6, r5, r4
(3) movge ...
(4) cmp r6, 0
(5) movge ...
We cannot convert (2) to "subs" because (3) is using the CPSR set by
(1). There's an analogous situation here:
(1) sub r1, r2, r3
(2) sub r4, r5, r6
(3) cmp r4, ...
(5) movge ...
(6) cmp r1, ...
(7) movge ...
We cannot convert (1) to "subs" because of the intervening use of CPSR.
llvm-svn: 117950
2010-11-02 04:41:43 +08:00
|
|
|
if (!MO.isReg()) continue;
|
2010-08-06 09:32:48 +08:00
|
|
|
|
When we look at instructions to convert to setting the 's' flag, we need to look
at more than those which define CPSR. You can have this situation:
(1) subs ...
(2) sub r6, r5, r4
(3) movge ...
(4) cmp r6, 0
(5) movge ...
We cannot convert (2) to "subs" because (3) is using the CPSR set by
(1). There's an analogous situation here:
(1) sub r1, r2, r3
(2) sub r4, r5, r6
(3) cmp r4, ...
(5) movge ...
(6) cmp r1, ...
(7) movge ...
We cannot convert (1) to "subs" because of the intervening use of CPSR.
llvm-svn: 117950
2010-11-02 04:41:43 +08:00
|
|
|
// This instruction modifies or uses CPSR after the one we want to
|
|
|
|
// change. We can't do this transformation.
|
2010-08-06 09:32:48 +08:00
|
|
|
if (MO.getReg() == ARM::CPSR)
|
|
|
|
return false;
|
|
|
|
}
|
2010-09-22 07:49:07 +08:00
|
|
|
|
|
|
|
if (I == B)
|
|
|
|
// The 'and' is below the comparison instruction.
|
|
|
|
return false;
|
2010-08-06 09:32:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Set the "zero" bit in CPSR.
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: break;
|
2011-04-16 04:28:28 +08:00
|
|
|
case ARM::RSBrr:
|
2011-04-07 07:35:59 +08:00
|
|
|
case ARM::RSBri:
|
2011-04-16 04:28:28 +08:00
|
|
|
case ARM::RSCrr:
|
2011-04-07 07:35:59 +08:00
|
|
|
case ARM::RSCri:
|
2011-04-16 04:28:28 +08:00
|
|
|
case ARM::ADDrr:
|
2010-08-11 08:23:00 +08:00
|
|
|
case ARM::ADDri:
|
2011-04-16 04:28:28 +08:00
|
|
|
case ARM::ADCrr:
|
2011-04-07 07:35:59 +08:00
|
|
|
case ARM::ADCri:
|
2011-04-16 04:28:28 +08:00
|
|
|
case ARM::SUBrr:
|
2010-08-11 08:23:00 +08:00
|
|
|
case ARM::SUBri:
|
2011-04-16 04:28:28 +08:00
|
|
|
case ARM::SBCrr:
|
2011-04-07 07:35:59 +08:00
|
|
|
case ARM::SBCri:
|
|
|
|
case ARM::t2RSBri:
|
2011-04-16 04:28:28 +08:00
|
|
|
case ARM::t2ADDrr:
|
2010-08-11 08:23:00 +08:00
|
|
|
case ARM::t2ADDri:
|
2011-04-16 04:28:28 +08:00
|
|
|
case ARM::t2ADCrr:
|
2011-04-07 07:35:59 +08:00
|
|
|
case ARM::t2ADCri:
|
2011-04-16 04:28:28 +08:00
|
|
|
case ARM::t2SUBrr:
|
2011-04-07 07:35:59 +08:00
|
|
|
case ARM::t2SUBri:
|
2011-04-16 04:28:28 +08:00
|
|
|
case ARM::t2SBCrr:
|
2011-04-16 04:45:00 +08:00
|
|
|
case ARM::t2SBCri:
|
|
|
|
case ARM::ANDrr:
|
|
|
|
case ARM::ANDri:
|
|
|
|
case ARM::t2ANDrr:
|
2011-04-16 05:24:38 +08:00
|
|
|
case ARM::t2ANDri:
|
|
|
|
case ARM::ORRrr:
|
|
|
|
case ARM::ORRri:
|
|
|
|
case ARM::t2ORRrr:
|
|
|
|
case ARM::t2ORRri:
|
|
|
|
case ARM::EORrr:
|
|
|
|
case ARM::EORri:
|
|
|
|
case ARM::t2EORrr:
|
|
|
|
case ARM::t2EORri: {
|
2011-03-24 06:52:04 +08:00
|
|
|
// Scan forward for the use of CPSR, if it's a conditional code requires
|
|
|
|
// checking of V bit, then this is not safe to do. If we can't find the
|
|
|
|
// CPSR use (i.e. used in another block), then it's not safe to perform
|
|
|
|
// the optimization.
|
|
|
|
bool isSafe = false;
|
|
|
|
I = CmpInstr;
|
|
|
|
E = MI->getParent()->end();
|
|
|
|
while (!isSafe && ++I != E) {
|
|
|
|
const MachineInstr &Instr = *I;
|
|
|
|
for (unsigned IO = 0, EO = Instr.getNumOperands();
|
|
|
|
!isSafe && IO != EO; ++IO) {
|
|
|
|
const MachineOperand &MO = Instr.getOperand(IO);
|
2012-02-18 03:23:15 +08:00
|
|
|
if (MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR)) {
|
|
|
|
isSafe = true;
|
|
|
|
break;
|
|
|
|
}
|
2011-03-24 06:52:04 +08:00
|
|
|
if (!MO.isReg() || MO.getReg() != ARM::CPSR)
|
|
|
|
continue;
|
|
|
|
if (MO.isDef()) {
|
|
|
|
isSafe = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
// Condition code is after the operand before CPSR.
|
|
|
|
ARMCC::CondCodes CC = (ARMCC::CondCodes)Instr.getOperand(IO-1).getImm();
|
|
|
|
switch (CC) {
|
|
|
|
default:
|
|
|
|
isSafe = true;
|
|
|
|
break;
|
|
|
|
case ARMCC::VS:
|
|
|
|
case ARMCC::VC:
|
|
|
|
case ARMCC::GE:
|
|
|
|
case ARMCC::LT:
|
|
|
|
case ARMCC::GT:
|
|
|
|
case ARMCC::LE:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!isSafe)
|
|
|
|
return false;
|
|
|
|
|
2010-11-17 16:06:50 +08:00
|
|
|
// Toggle the optional operand to CPSR.
|
|
|
|
MI->getOperand(5).setReg(ARM::CPSR);
|
|
|
|
MI->getOperand(5).setIsDef(true);
|
2010-08-06 09:32:48 +08:00
|
|
|
CmpInstr->eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
2011-04-16 04:45:00 +08:00
|
|
|
}
|
2010-08-06 09:32:48 +08:00
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2010-09-10 02:18:55 +08:00
|
|
|
|
2010-11-18 04:13:28 +08:00
|
|
|
bool ARMBaseInstrInfo::FoldImmediate(MachineInstr *UseMI,
|
|
|
|
MachineInstr *DefMI, unsigned Reg,
|
|
|
|
MachineRegisterInfo *MRI) const {
|
|
|
|
// Fold large immediates into add, sub, or, xor.
|
|
|
|
unsigned DefOpc = DefMI->getOpcode();
|
|
|
|
if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm)
|
|
|
|
return false;
|
|
|
|
if (!DefMI->getOperand(1).isImm())
|
|
|
|
// Could be t2MOVi32imm <ga:xx>
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!MRI->hasOneNonDBGUse(Reg))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned UseOpc = UseMI->getOpcode();
|
2010-11-18 09:43:23 +08:00
|
|
|
unsigned NewUseOpc = 0;
|
2010-11-18 04:13:28 +08:00
|
|
|
uint32_t ImmVal = (uint32_t)DefMI->getOperand(1).getImm();
|
2010-11-18 09:43:23 +08:00
|
|
|
uint32_t SOImmValV1 = 0, SOImmValV2 = 0;
|
2010-11-18 04:13:28 +08:00
|
|
|
bool Commute = false;
|
|
|
|
switch (UseOpc) {
|
|
|
|
default: return false;
|
|
|
|
case ARM::SUBrr:
|
|
|
|
case ARM::ADDrr:
|
|
|
|
case ARM::ORRrr:
|
|
|
|
case ARM::EORrr:
|
|
|
|
case ARM::t2SUBrr:
|
|
|
|
case ARM::t2ADDrr:
|
|
|
|
case ARM::t2ORRrr:
|
|
|
|
case ARM::t2EORrr: {
|
|
|
|
Commute = UseMI->getOperand(2).getReg() != Reg;
|
|
|
|
switch (UseOpc) {
|
|
|
|
default: break;
|
|
|
|
case ARM::SUBrr: {
|
|
|
|
if (Commute)
|
|
|
|
return false;
|
|
|
|
ImmVal = -ImmVal;
|
|
|
|
NewUseOpc = ARM::SUBri;
|
|
|
|
// Fallthrough
|
|
|
|
}
|
|
|
|
case ARM::ADDrr:
|
|
|
|
case ARM::ORRrr:
|
|
|
|
case ARM::EORrr: {
|
|
|
|
if (!ARM_AM::isSOImmTwoPartVal(ImmVal))
|
|
|
|
return false;
|
|
|
|
SOImmValV1 = (uint32_t)ARM_AM::getSOImmTwoPartFirst(ImmVal);
|
|
|
|
SOImmValV2 = (uint32_t)ARM_AM::getSOImmTwoPartSecond(ImmVal);
|
|
|
|
switch (UseOpc) {
|
|
|
|
default: break;
|
|
|
|
case ARM::ADDrr: NewUseOpc = ARM::ADDri; break;
|
|
|
|
case ARM::ORRrr: NewUseOpc = ARM::ORRri; break;
|
|
|
|
case ARM::EORrr: NewUseOpc = ARM::EORri; break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case ARM::t2SUBrr: {
|
|
|
|
if (Commute)
|
|
|
|
return false;
|
|
|
|
ImmVal = -ImmVal;
|
|
|
|
NewUseOpc = ARM::t2SUBri;
|
|
|
|
// Fallthrough
|
|
|
|
}
|
|
|
|
case ARM::t2ADDrr:
|
|
|
|
case ARM::t2ORRrr:
|
|
|
|
case ARM::t2EORrr: {
|
|
|
|
if (!ARM_AM::isT2SOImmTwoPartVal(ImmVal))
|
|
|
|
return false;
|
|
|
|
SOImmValV1 = (uint32_t)ARM_AM::getT2SOImmTwoPartFirst(ImmVal);
|
|
|
|
SOImmValV2 = (uint32_t)ARM_AM::getT2SOImmTwoPartSecond(ImmVal);
|
|
|
|
switch (UseOpc) {
|
|
|
|
default: break;
|
|
|
|
case ARM::t2ADDrr: NewUseOpc = ARM::t2ADDri; break;
|
|
|
|
case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri; break;
|
|
|
|
case ARM::t2EORrr: NewUseOpc = ARM::t2EORri; break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned OpIdx = Commute ? 2 : 1;
|
|
|
|
unsigned Reg1 = UseMI->getOperand(OpIdx).getReg();
|
|
|
|
bool isKill = UseMI->getOperand(OpIdx).isKill();
|
|
|
|
unsigned NewReg = MRI->createVirtualRegister(MRI->getRegClass(Reg));
|
|
|
|
AddDefaultCC(AddDefaultPred(BuildMI(*UseMI->getParent(),
|
2011-12-14 10:11:42 +08:00
|
|
|
UseMI, UseMI->getDebugLoc(),
|
2010-11-18 04:13:28 +08:00
|
|
|
get(NewUseOpc), NewReg)
|
|
|
|
.addReg(Reg1, getKillRegState(isKill))
|
|
|
|
.addImm(SOImmValV1)));
|
|
|
|
UseMI->setDesc(get(NewUseOpc));
|
|
|
|
UseMI->getOperand(1).setReg(NewReg);
|
|
|
|
UseMI->getOperand(1).setIsKill();
|
|
|
|
UseMI->getOperand(2).ChangeToImmediate(SOImmValV2);
|
|
|
|
DefMI->eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-09-10 02:18:55 +08:00
|
|
|
unsigned
|
2010-11-03 08:45:17 +08:00
|
|
|
ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
|
|
|
|
const MachineInstr *MI) const {
|
2010-09-10 09:29:16 +08:00
|
|
|
if (!ItinData || ItinData->isEmpty())
|
2010-09-10 02:18:55 +08:00
|
|
|
return 1;
|
|
|
|
|
2011-06-29 03:10:37 +08:00
|
|
|
const MCInstrDesc &Desc = MI->getDesc();
|
2010-09-10 02:18:55 +08:00
|
|
|
unsigned Class = Desc.getSchedClass();
|
2010-09-16 00:28:21 +08:00
|
|
|
unsigned UOps = ItinData->Itineraries[Class].NumMicroOps;
|
2010-09-10 02:18:55 +08:00
|
|
|
if (UOps)
|
|
|
|
return UOps;
|
|
|
|
|
|
|
|
unsigned Opc = MI->getOpcode();
|
|
|
|
switch (Opc) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unexpected multi-uops instruction!");
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::VLDMQIA:
|
|
|
|
case ARM::VSTMQIA:
|
2010-09-10 02:18:55 +08:00
|
|
|
return 2;
|
|
|
|
|
|
|
|
// The number of uOps for load / store multiple are determined by the number
|
|
|
|
// registers.
|
2010-12-24 12:28:06 +08:00
|
|
|
//
|
2010-09-10 09:29:16 +08:00
|
|
|
// On Cortex-A8, each pair of register loads / stores can be scheduled on the
|
|
|
|
// same cycle. The scheduling for the first load / store must be done
|
|
|
|
// separately by assuming the the address is not 64-bit aligned.
|
2010-11-16 09:16:36 +08:00
|
|
|
//
|
2010-09-10 09:29:16 +08:00
|
|
|
// On Cortex-A9, the formula is simply (#reg / 2) + (#reg % 2). If the address
|
2010-11-16 09:16:36 +08:00
|
|
|
// is not 64-bit aligned, then AGU would take an extra cycle. For VFP / NEON
|
|
|
|
// load / store multiple, the formula is (#reg / 2) + (#reg % 2) + 1.
|
|
|
|
case ARM::VLDMDIA:
|
|
|
|
case ARM::VLDMDIA_UPD:
|
|
|
|
case ARM::VLDMDDB_UPD:
|
|
|
|
case ARM::VLDMSIA:
|
|
|
|
case ARM::VLDMSIA_UPD:
|
|
|
|
case ARM::VLDMSDB_UPD:
|
|
|
|
case ARM::VSTMDIA:
|
|
|
|
case ARM::VSTMDIA_UPD:
|
|
|
|
case ARM::VSTMDDB_UPD:
|
|
|
|
case ARM::VSTMSIA:
|
|
|
|
case ARM::VSTMSIA_UPD:
|
|
|
|
case ARM::VSTMSDB_UPD: {
|
2010-09-10 02:18:55 +08:00
|
|
|
unsigned NumRegs = MI->getNumOperands() - Desc.getNumOperands();
|
|
|
|
return (NumRegs / 2) + (NumRegs % 2) + 1;
|
|
|
|
}
|
2010-11-16 09:16:36 +08:00
|
|
|
|
|
|
|
case ARM::LDMIA_RET:
|
|
|
|
case ARM::LDMIA:
|
|
|
|
case ARM::LDMDA:
|
|
|
|
case ARM::LDMDB:
|
|
|
|
case ARM::LDMIB:
|
|
|
|
case ARM::LDMIA_UPD:
|
|
|
|
case ARM::LDMDA_UPD:
|
|
|
|
case ARM::LDMDB_UPD:
|
|
|
|
case ARM::LDMIB_UPD:
|
|
|
|
case ARM::STMIA:
|
|
|
|
case ARM::STMDA:
|
|
|
|
case ARM::STMDB:
|
|
|
|
case ARM::STMIB:
|
|
|
|
case ARM::STMIA_UPD:
|
|
|
|
case ARM::STMDA_UPD:
|
|
|
|
case ARM::STMDB_UPD:
|
|
|
|
case ARM::STMIB_UPD:
|
|
|
|
case ARM::tLDMIA:
|
|
|
|
case ARM::tLDMIA_UPD:
|
|
|
|
case ARM::tSTMIA_UPD:
|
2010-09-10 02:18:55 +08:00
|
|
|
case ARM::tPOP_RET:
|
|
|
|
case ARM::tPOP:
|
|
|
|
case ARM::tPUSH:
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::t2LDMIA_RET:
|
|
|
|
case ARM::t2LDMIA:
|
|
|
|
case ARM::t2LDMDB:
|
|
|
|
case ARM::t2LDMIA_UPD:
|
|
|
|
case ARM::t2LDMDB_UPD:
|
|
|
|
case ARM::t2STMIA:
|
|
|
|
case ARM::t2STMDB:
|
|
|
|
case ARM::t2STMIA_UPD:
|
|
|
|
case ARM::t2STMDB_UPD: {
|
2010-09-10 09:29:16 +08:00
|
|
|
unsigned NumRegs = MI->getNumOperands() - Desc.getNumOperands() + 1;
|
|
|
|
if (Subtarget.isCortexA8()) {
|
2010-11-03 08:45:17 +08:00
|
|
|
if (NumRegs < 4)
|
|
|
|
return 2;
|
|
|
|
// 4 registers would be issued: 2, 2.
|
|
|
|
// 5 registers would be issued: 2, 2, 1.
|
|
|
|
UOps = (NumRegs / 2);
|
|
|
|
if (NumRegs % 2)
|
|
|
|
++UOps;
|
|
|
|
return UOps;
|
2010-09-10 09:29:16 +08:00
|
|
|
} else if (Subtarget.isCortexA9()) {
|
|
|
|
UOps = (NumRegs / 2);
|
|
|
|
// If there are odd number of registers or if it's not 64-bit aligned,
|
|
|
|
// then it takes an extra AGU (Address Generation Unit) cycle.
|
|
|
|
if ((NumRegs % 2) ||
|
|
|
|
!MI->hasOneMemOperand() ||
|
|
|
|
(*MI->memoperands_begin())->getAlignment() < 8)
|
|
|
|
++UOps;
|
|
|
|
return UOps;
|
|
|
|
} else {
|
|
|
|
// Assume the worst.
|
|
|
|
return NumRegs;
|
2010-10-05 14:00:33 +08:00
|
|
|
}
|
2010-09-10 02:18:55 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-10-06 14:27:31 +08:00
|
|
|
|
2010-10-08 07:12:15 +08:00
|
|
|
int
|
|
|
|
ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData,
|
2011-06-29 03:10:37 +08:00
|
|
|
const MCInstrDesc &DefMCID,
|
2010-10-08 07:12:15 +08:00
|
|
|
unsigned DefClass,
|
|
|
|
unsigned DefIdx, unsigned DefAlign) const {
|
2011-06-29 03:10:37 +08:00
|
|
|
int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1;
|
2010-10-08 07:12:15 +08:00
|
|
|
if (RegNo <= 0)
|
|
|
|
// Def is the address writeback.
|
|
|
|
return ItinData->getOperandCycle(DefClass, DefIdx);
|
|
|
|
|
|
|
|
int DefCycle;
|
|
|
|
if (Subtarget.isCortexA8()) {
|
|
|
|
// (regno / 2) + (regno % 2) + 1
|
|
|
|
DefCycle = RegNo / 2 + 1;
|
|
|
|
if (RegNo % 2)
|
|
|
|
++DefCycle;
|
|
|
|
} else if (Subtarget.isCortexA9()) {
|
|
|
|
DefCycle = RegNo;
|
|
|
|
bool isSLoad = false;
|
2010-11-16 09:16:36 +08:00
|
|
|
|
2011-06-29 03:10:37 +08:00
|
|
|
switch (DefMCID.getOpcode()) {
|
2010-10-08 07:12:15 +08:00
|
|
|
default: break;
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::VLDMSIA:
|
|
|
|
case ARM::VLDMSIA_UPD:
|
|
|
|
case ARM::VLDMSDB_UPD:
|
2010-10-08 07:12:15 +08:00
|
|
|
isSLoad = true;
|
|
|
|
break;
|
|
|
|
}
|
2010-11-16 09:16:36 +08:00
|
|
|
|
2010-10-08 07:12:15 +08:00
|
|
|
// If there are odd number of 'S' registers or if it's not 64-bit aligned,
|
|
|
|
// then it takes an extra cycle.
|
|
|
|
if ((isSLoad && (RegNo % 2)) || DefAlign < 8)
|
|
|
|
++DefCycle;
|
|
|
|
} else {
|
|
|
|
// Assume the worst.
|
|
|
|
DefCycle = RegNo + 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
return DefCycle;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData,
|
2011-06-29 03:10:37 +08:00
|
|
|
const MCInstrDesc &DefMCID,
|
2010-10-08 07:12:15 +08:00
|
|
|
unsigned DefClass,
|
|
|
|
unsigned DefIdx, unsigned DefAlign) const {
|
2011-06-29 03:10:37 +08:00
|
|
|
int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1;
|
2010-10-08 07:12:15 +08:00
|
|
|
if (RegNo <= 0)
|
|
|
|
// Def is the address writeback.
|
|
|
|
return ItinData->getOperandCycle(DefClass, DefIdx);
|
|
|
|
|
|
|
|
int DefCycle;
|
|
|
|
if (Subtarget.isCortexA8()) {
|
|
|
|
// 4 registers would be issued: 1, 2, 1.
|
|
|
|
// 5 registers would be issued: 1, 2, 2.
|
|
|
|
DefCycle = RegNo / 2;
|
|
|
|
if (DefCycle < 1)
|
|
|
|
DefCycle = 1;
|
|
|
|
// Result latency is issue cycle + 2: E2.
|
|
|
|
DefCycle += 2;
|
|
|
|
} else if (Subtarget.isCortexA9()) {
|
|
|
|
DefCycle = (RegNo / 2);
|
|
|
|
// If there are odd number of registers or if it's not 64-bit aligned,
|
|
|
|
// then it takes an extra AGU (Address Generation Unit) cycle.
|
|
|
|
if ((RegNo % 2) || DefAlign < 8)
|
|
|
|
++DefCycle;
|
|
|
|
// Result latency is AGU cycles + 2.
|
|
|
|
DefCycle += 2;
|
|
|
|
} else {
|
|
|
|
// Assume the worst.
|
|
|
|
DefCycle = RegNo + 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
return DefCycle;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData,
|
2011-06-29 03:10:37 +08:00
|
|
|
const MCInstrDesc &UseMCID,
|
2010-10-08 07:12:15 +08:00
|
|
|
unsigned UseClass,
|
|
|
|
unsigned UseIdx, unsigned UseAlign) const {
|
2011-06-29 03:10:37 +08:00
|
|
|
int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1;
|
2010-10-08 07:12:15 +08:00
|
|
|
if (RegNo <= 0)
|
|
|
|
return ItinData->getOperandCycle(UseClass, UseIdx);
|
|
|
|
|
|
|
|
int UseCycle;
|
|
|
|
if (Subtarget.isCortexA8()) {
|
|
|
|
// (regno / 2) + (regno % 2) + 1
|
|
|
|
UseCycle = RegNo / 2 + 1;
|
|
|
|
if (RegNo % 2)
|
|
|
|
++UseCycle;
|
|
|
|
} else if (Subtarget.isCortexA9()) {
|
|
|
|
UseCycle = RegNo;
|
|
|
|
bool isSStore = false;
|
2010-11-16 09:16:36 +08:00
|
|
|
|
2011-06-29 03:10:37 +08:00
|
|
|
switch (UseMCID.getOpcode()) {
|
2010-10-08 07:12:15 +08:00
|
|
|
default: break;
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::VSTMSIA:
|
|
|
|
case ARM::VSTMSIA_UPD:
|
|
|
|
case ARM::VSTMSDB_UPD:
|
2010-10-08 07:12:15 +08:00
|
|
|
isSStore = true;
|
|
|
|
break;
|
|
|
|
}
|
2010-11-16 09:16:36 +08:00
|
|
|
|
2010-10-08 07:12:15 +08:00
|
|
|
// If there are odd number of 'S' registers or if it's not 64-bit aligned,
|
|
|
|
// then it takes an extra cycle.
|
|
|
|
if ((isSStore && (RegNo % 2)) || UseAlign < 8)
|
|
|
|
++UseCycle;
|
|
|
|
} else {
|
|
|
|
// Assume the worst.
|
|
|
|
UseCycle = RegNo + 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
return UseCycle;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData,
|
2011-06-29 03:10:37 +08:00
|
|
|
const MCInstrDesc &UseMCID,
|
2010-10-08 07:12:15 +08:00
|
|
|
unsigned UseClass,
|
|
|
|
unsigned UseIdx, unsigned UseAlign) const {
|
2011-06-29 03:10:37 +08:00
|
|
|
int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1;
|
2010-10-08 07:12:15 +08:00
|
|
|
if (RegNo <= 0)
|
|
|
|
return ItinData->getOperandCycle(UseClass, UseIdx);
|
|
|
|
|
|
|
|
int UseCycle;
|
|
|
|
if (Subtarget.isCortexA8()) {
|
|
|
|
UseCycle = RegNo / 2;
|
|
|
|
if (UseCycle < 2)
|
|
|
|
UseCycle = 2;
|
|
|
|
// Read in E3.
|
|
|
|
UseCycle += 2;
|
|
|
|
} else if (Subtarget.isCortexA9()) {
|
|
|
|
UseCycle = (RegNo / 2);
|
|
|
|
// If there are odd number of registers or if it's not 64-bit aligned,
|
|
|
|
// then it takes an extra AGU (Address Generation Unit) cycle.
|
|
|
|
if ((RegNo % 2) || UseAlign < 8)
|
|
|
|
++UseCycle;
|
|
|
|
} else {
|
|
|
|
// Assume the worst.
|
|
|
|
UseCycle = 1;
|
|
|
|
}
|
|
|
|
return UseCycle;
|
|
|
|
}
|
|
|
|
|
2010-10-06 14:27:31 +08:00
|
|
|
int
|
|
|
|
ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
|
2011-06-29 03:10:37 +08:00
|
|
|
const MCInstrDesc &DefMCID,
|
2010-10-06 14:27:31 +08:00
|
|
|
unsigned DefIdx, unsigned DefAlign,
|
2011-06-29 03:10:37 +08:00
|
|
|
const MCInstrDesc &UseMCID,
|
2010-10-06 14:27:31 +08:00
|
|
|
unsigned UseIdx, unsigned UseAlign) const {
|
2011-06-29 03:10:37 +08:00
|
|
|
unsigned DefClass = DefMCID.getSchedClass();
|
|
|
|
unsigned UseClass = UseMCID.getSchedClass();
|
2010-10-06 14:27:31 +08:00
|
|
|
|
2011-06-29 03:10:37 +08:00
|
|
|
if (DefIdx < DefMCID.getNumDefs() && UseIdx < UseMCID.getNumOperands())
|
2010-10-06 14:27:31 +08:00
|
|
|
return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
|
|
|
|
|
|
|
|
// This may be a def / use of a variable_ops instruction, the operand
|
|
|
|
// latency might be determinable dynamically. Let the target try to
|
|
|
|
// figure it out.
|
2010-10-28 10:00:25 +08:00
|
|
|
int DefCycle = -1;
|
2010-10-28 14:47:08 +08:00
|
|
|
bool LdmBypass = false;
|
2011-06-29 03:10:37 +08:00
|
|
|
switch (DefMCID.getOpcode()) {
|
2010-10-06 14:27:31 +08:00
|
|
|
default:
|
|
|
|
DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
|
|
|
|
break;
|
2010-11-16 09:16:36 +08:00
|
|
|
|
|
|
|
case ARM::VLDMDIA:
|
|
|
|
case ARM::VLDMDIA_UPD:
|
|
|
|
case ARM::VLDMDDB_UPD:
|
|
|
|
case ARM::VLDMSIA:
|
|
|
|
case ARM::VLDMSIA_UPD:
|
|
|
|
case ARM::VLDMSDB_UPD:
|
2011-06-29 03:10:37 +08:00
|
|
|
DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
|
2010-10-07 09:50:48 +08:00
|
|
|
break;
|
2010-11-16 09:16:36 +08:00
|
|
|
|
|
|
|
case ARM::LDMIA_RET:
|
|
|
|
case ARM::LDMIA:
|
|
|
|
case ARM::LDMDA:
|
|
|
|
case ARM::LDMDB:
|
|
|
|
case ARM::LDMIB:
|
|
|
|
case ARM::LDMIA_UPD:
|
|
|
|
case ARM::LDMDA_UPD:
|
|
|
|
case ARM::LDMDB_UPD:
|
|
|
|
case ARM::LDMIB_UPD:
|
|
|
|
case ARM::tLDMIA:
|
|
|
|
case ARM::tLDMIA_UPD:
|
2010-10-06 14:27:31 +08:00
|
|
|
case ARM::tPUSH:
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::t2LDMIA_RET:
|
|
|
|
case ARM::t2LDMIA:
|
|
|
|
case ARM::t2LDMDB:
|
|
|
|
case ARM::t2LDMIA_UPD:
|
|
|
|
case ARM::t2LDMDB_UPD:
|
2010-10-06 14:27:31 +08:00
|
|
|
LdmBypass = 1;
|
2011-06-29 03:10:37 +08:00
|
|
|
DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
|
2010-10-08 07:12:15 +08:00
|
|
|
break;
|
2010-10-06 14:27:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (DefCycle == -1)
|
|
|
|
// We can't seem to determine the result latency of the def, assume it's 2.
|
|
|
|
DefCycle = 2;
|
|
|
|
|
|
|
|
int UseCycle = -1;
|
2011-06-29 03:10:37 +08:00
|
|
|
switch (UseMCID.getOpcode()) {
|
2010-10-06 14:27:31 +08:00
|
|
|
default:
|
|
|
|
UseCycle = ItinData->getOperandCycle(UseClass, UseIdx);
|
|
|
|
break;
|
2010-11-16 09:16:36 +08:00
|
|
|
|
|
|
|
case ARM::VSTMDIA:
|
|
|
|
case ARM::VSTMDIA_UPD:
|
|
|
|
case ARM::VSTMDDB_UPD:
|
|
|
|
case ARM::VSTMSIA:
|
|
|
|
case ARM::VSTMSIA_UPD:
|
|
|
|
case ARM::VSTMSDB_UPD:
|
2011-06-29 03:10:37 +08:00
|
|
|
UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
|
2010-10-07 09:50:48 +08:00
|
|
|
break;
|
2010-11-16 09:16:36 +08:00
|
|
|
|
|
|
|
case ARM::STMIA:
|
|
|
|
case ARM::STMDA:
|
|
|
|
case ARM::STMDB:
|
|
|
|
case ARM::STMIB:
|
|
|
|
case ARM::STMIA_UPD:
|
|
|
|
case ARM::STMDA_UPD:
|
|
|
|
case ARM::STMDB_UPD:
|
|
|
|
case ARM::STMIB_UPD:
|
|
|
|
case ARM::tSTMIA_UPD:
|
2010-10-06 14:27:31 +08:00
|
|
|
case ARM::tPOP_RET:
|
|
|
|
case ARM::tPOP:
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::t2STMIA:
|
|
|
|
case ARM::t2STMDB:
|
|
|
|
case ARM::t2STMIA_UPD:
|
|
|
|
case ARM::t2STMDB_UPD:
|
2011-06-29 03:10:37 +08:00
|
|
|
UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
|
2010-10-07 09:50:48 +08:00
|
|
|
break;
|
2010-10-06 14:27:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (UseCycle == -1)
|
|
|
|
// Assume it's read in the first stage.
|
|
|
|
UseCycle = 1;
|
|
|
|
|
|
|
|
UseCycle = DefCycle - UseCycle + 1;
|
|
|
|
if (UseCycle > 0) {
|
|
|
|
if (LdmBypass) {
|
|
|
|
// It's a variable_ops instruction so we can't use DefIdx here. Just use
|
|
|
|
// first def operand.
|
2011-06-29 03:10:37 +08:00
|
|
|
if (ItinData->hasPipelineForwarding(DefClass, DefMCID.getNumOperands()-1,
|
2010-10-06 14:27:31 +08:00
|
|
|
UseClass, UseIdx))
|
|
|
|
--UseCycle;
|
|
|
|
} else if (ItinData->hasPipelineForwarding(DefClass, DefIdx,
|
2010-11-16 09:16:36 +08:00
|
|
|
UseClass, UseIdx)) {
|
2010-10-06 14:27:31 +08:00
|
|
|
--UseCycle;
|
2010-11-16 09:16:36 +08:00
|
|
|
}
|
2010-10-06 14:27:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return UseCycle;
|
|
|
|
}
|
|
|
|
|
2011-12-14 10:11:42 +08:00
|
|
|
static const MachineInstr *getBundledDefMI(const TargetRegisterInfo *TRI,
|
2011-12-15 04:00:08 +08:00
|
|
|
const MachineInstr *MI, unsigned Reg,
|
2011-12-14 10:11:42 +08:00
|
|
|
unsigned &DefIdx, unsigned &Dist) {
|
|
|
|
Dist = 0;
|
|
|
|
|
|
|
|
MachineBasicBlock::const_iterator I = MI; ++I;
|
|
|
|
MachineBasicBlock::const_instr_iterator II =
|
|
|
|
llvm::prior(I.getInstrIterator());
|
|
|
|
assert(II->isInsideBundle() && "Empty bundle?");
|
|
|
|
|
|
|
|
int Idx = -1;
|
|
|
|
while (II->isInsideBundle()) {
|
|
|
|
Idx = II->findRegisterDefOperandIdx(Reg, false, true, TRI);
|
|
|
|
if (Idx != -1)
|
|
|
|
break;
|
|
|
|
--II;
|
|
|
|
++Dist;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(Idx != -1 && "Cannot find bundled definition!");
|
|
|
|
DefIdx = Idx;
|
|
|
|
return II;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const MachineInstr *getBundledUseMI(const TargetRegisterInfo *TRI,
|
2011-12-15 04:00:08 +08:00
|
|
|
const MachineInstr *MI, unsigned Reg,
|
2011-12-14 10:11:42 +08:00
|
|
|
unsigned &UseIdx, unsigned &Dist) {
|
|
|
|
Dist = 0;
|
|
|
|
|
|
|
|
MachineBasicBlock::const_instr_iterator II = MI; ++II;
|
|
|
|
assert(II->isInsideBundle() && "Empty bundle?");
|
|
|
|
MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
|
|
|
|
|
|
|
|
// FIXME: This doesn't properly handle multiple uses.
|
|
|
|
int Idx = -1;
|
|
|
|
while (II != E && II->isInsideBundle()) {
|
|
|
|
Idx = II->findRegisterUseOperandIdx(Reg, false, TRI);
|
|
|
|
if (Idx != -1)
|
|
|
|
break;
|
|
|
|
if (II->getOpcode() != ARM::t2IT)
|
|
|
|
++Dist;
|
|
|
|
++II;
|
|
|
|
}
|
|
|
|
|
2011-12-15 04:00:08 +08:00
|
|
|
if (Idx == -1) {
|
|
|
|
Dist = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-12-14 10:11:42 +08:00
|
|
|
UseIdx = Idx;
|
|
|
|
return II;
|
|
|
|
}
|
|
|
|
|
2010-10-06 14:27:31 +08:00
|
|
|
int
|
|
|
|
ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
|
|
|
|
const MachineInstr *DefMI, unsigned DefIdx,
|
|
|
|
const MachineInstr *UseMI, unsigned UseIdx) const {
|
|
|
|
if (DefMI->isCopyLike() || DefMI->isInsertSubreg() ||
|
|
|
|
DefMI->isRegSequence() || DefMI->isImplicitDef())
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (!ItinData || ItinData->isEmpty())
|
2011-12-07 15:15:52 +08:00
|
|
|
return DefMI->mayLoad() ? 3 : 1;
|
2010-10-06 14:27:31 +08:00
|
|
|
|
2011-12-14 10:11:42 +08:00
|
|
|
const MCInstrDesc *DefMCID = &DefMI->getDesc();
|
|
|
|
const MCInstrDesc *UseMCID = &UseMI->getDesc();
|
2010-10-23 10:04:38 +08:00
|
|
|
const MachineOperand &DefMO = DefMI->getOperand(DefIdx);
|
2011-12-15 04:00:08 +08:00
|
|
|
unsigned Reg = DefMO.getReg();
|
|
|
|
if (Reg == ARM::CPSR) {
|
2010-10-30 07:16:55 +08:00
|
|
|
if (DefMI->getOpcode() == ARM::FMSTAT) {
|
|
|
|
// fpscr -> cpsr stalls over 20 cycles on A8 (and earlier?)
|
|
|
|
return Subtarget.isCortexA9() ? 1 : 20;
|
|
|
|
}
|
|
|
|
|
2010-10-23 10:04:38 +08:00
|
|
|
// CPSR set and branch can be paired in the same cycle.
|
2011-12-07 15:15:52 +08:00
|
|
|
if (UseMI->isBranch())
|
2010-10-30 07:16:55 +08:00
|
|
|
return 0;
|
2011-12-14 10:11:42 +08:00
|
|
|
|
|
|
|
// Otherwise it takes the instruction latency (generally one).
|
|
|
|
int Latency = getInstrLatency(ItinData, DefMI);
|
2011-12-15 04:00:08 +08:00
|
|
|
|
|
|
|
// For Thumb2 and -Os, prefer scheduling CPSR setting instruction close to
|
|
|
|
// its uses. Instructions which are otherwise scheduled between them may
|
|
|
|
// incur a code size penalty (not able to use the CPSR setting 16-bit
|
|
|
|
// instructions).
|
|
|
|
if (Latency > 0 && Subtarget.isThumb2()) {
|
|
|
|
const MachineFunction *MF = DefMI->getParent()->getParent();
|
|
|
|
if (MF->getFunction()->hasFnAttr(Attribute::OptimizeForSize))
|
|
|
|
--Latency;
|
|
|
|
}
|
2011-12-14 10:11:42 +08:00
|
|
|
return Latency;
|
2010-10-30 07:16:55 +08:00
|
|
|
}
|
2010-10-23 10:04:38 +08:00
|
|
|
|
2010-10-06 14:27:31 +08:00
|
|
|
unsigned DefAlign = DefMI->hasOneMemOperand()
|
|
|
|
? (*DefMI->memoperands_begin())->getAlignment() : 0;
|
|
|
|
unsigned UseAlign = UseMI->hasOneMemOperand()
|
|
|
|
? (*UseMI->memoperands_begin())->getAlignment() : 0;
|
2011-12-14 10:11:42 +08:00
|
|
|
|
|
|
|
unsigned DefAdj = 0;
|
|
|
|
if (DefMI->isBundle()) {
|
2011-12-15 04:00:08 +08:00
|
|
|
DefMI = getBundledDefMI(&getRegisterInfo(), DefMI, Reg, DefIdx, DefAdj);
|
2011-12-14 10:11:42 +08:00
|
|
|
if (DefMI->isCopyLike() || DefMI->isInsertSubreg() ||
|
|
|
|
DefMI->isRegSequence() || DefMI->isImplicitDef())
|
|
|
|
return 1;
|
|
|
|
DefMCID = &DefMI->getDesc();
|
|
|
|
}
|
|
|
|
unsigned UseAdj = 0;
|
|
|
|
if (UseMI->isBundle()) {
|
2011-12-15 04:00:08 +08:00
|
|
|
unsigned NewUseIdx;
|
|
|
|
const MachineInstr *NewUseMI = getBundledUseMI(&getRegisterInfo(), UseMI,
|
|
|
|
Reg, NewUseIdx, UseAdj);
|
|
|
|
if (NewUseMI) {
|
|
|
|
UseMI = NewUseMI;
|
|
|
|
UseIdx = NewUseIdx;
|
|
|
|
UseMCID = &UseMI->getDesc();
|
|
|
|
}
|
2011-12-14 10:11:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int Latency = getOperandLatency(ItinData, *DefMCID, DefIdx, DefAlign,
|
|
|
|
*UseMCID, UseIdx, UseAlign);
|
|
|
|
int Adj = DefAdj + UseAdj;
|
|
|
|
if (Adj) {
|
|
|
|
Latency -= (int)(DefAdj + UseAdj);
|
|
|
|
if (Latency < 1)
|
|
|
|
return 1;
|
|
|
|
}
|
2010-10-28 14:47:08 +08:00
|
|
|
|
|
|
|
if (Latency > 1 &&
|
|
|
|
(Subtarget.isCortexA8() || Subtarget.isCortexA9())) {
|
|
|
|
// FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
|
|
|
|
// variants are one cycle cheaper.
|
2011-12-14 10:11:42 +08:00
|
|
|
switch (DefMCID->getOpcode()) {
|
2010-10-28 14:47:08 +08:00
|
|
|
default: break;
|
|
|
|
case ARM::LDRrs:
|
|
|
|
case ARM::LDRBrs: {
|
|
|
|
unsigned ShOpVal = DefMI->getOperand(3).getImm();
|
|
|
|
unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
|
|
|
|
if (ShImm == 0 ||
|
|
|
|
(ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))
|
|
|
|
--Latency;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case ARM::t2LDRs:
|
|
|
|
case ARM::t2LDRBs:
|
|
|
|
case ARM::t2LDRHs:
|
|
|
|
case ARM::t2LDRSHs: {
|
|
|
|
// Thumb2 mode: lsl only.
|
|
|
|
unsigned ShAmt = DefMI->getOperand(3).getImm();
|
|
|
|
if (ShAmt == 0 || ShAmt == 2)
|
|
|
|
--Latency;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-04-19 09:21:49 +08:00
|
|
|
if (DefAlign < 8 && Subtarget.isCortexA9())
|
2011-12-14 10:11:42 +08:00
|
|
|
switch (DefMCID->getOpcode()) {
|
2011-04-19 09:21:49 +08:00
|
|
|
default: break;
|
|
|
|
case ARM::VLD1q8:
|
|
|
|
case ARM::VLD1q16:
|
|
|
|
case ARM::VLD1q32:
|
|
|
|
case ARM::VLD1q64:
|
2011-10-25 05:45:13 +08:00
|
|
|
case ARM::VLD1q8wb_fixed:
|
|
|
|
case ARM::VLD1q16wb_fixed:
|
|
|
|
case ARM::VLD1q32wb_fixed:
|
|
|
|
case ARM::VLD1q64wb_fixed:
|
|
|
|
case ARM::VLD1q8wb_register:
|
|
|
|
case ARM::VLD1q16wb_register:
|
|
|
|
case ARM::VLD1q32wb_register:
|
|
|
|
case ARM::VLD1q64wb_register:
|
2011-04-19 09:21:49 +08:00
|
|
|
case ARM::VLD2d8:
|
|
|
|
case ARM::VLD2d16:
|
|
|
|
case ARM::VLD2d32:
|
|
|
|
case ARM::VLD2q8:
|
|
|
|
case ARM::VLD2q16:
|
|
|
|
case ARM::VLD2q32:
|
2011-12-10 05:28:25 +08:00
|
|
|
case ARM::VLD2d8wb_fixed:
|
|
|
|
case ARM::VLD2d16wb_fixed:
|
|
|
|
case ARM::VLD2d32wb_fixed:
|
|
|
|
case ARM::VLD2q8wb_fixed:
|
|
|
|
case ARM::VLD2q16wb_fixed:
|
|
|
|
case ARM::VLD2q32wb_fixed:
|
|
|
|
case ARM::VLD2d8wb_register:
|
|
|
|
case ARM::VLD2d16wb_register:
|
|
|
|
case ARM::VLD2d32wb_register:
|
|
|
|
case ARM::VLD2q8wb_register:
|
|
|
|
case ARM::VLD2q16wb_register:
|
|
|
|
case ARM::VLD2q32wb_register:
|
2011-04-19 09:21:49 +08:00
|
|
|
case ARM::VLD3d8:
|
|
|
|
case ARM::VLD3d16:
|
|
|
|
case ARM::VLD3d32:
|
|
|
|
case ARM::VLD1d64T:
|
|
|
|
case ARM::VLD3d8_UPD:
|
|
|
|
case ARM::VLD3d16_UPD:
|
|
|
|
case ARM::VLD3d32_UPD:
|
2011-10-25 07:26:05 +08:00
|
|
|
case ARM::VLD1d64Twb_fixed:
|
|
|
|
case ARM::VLD1d64Twb_register:
|
2011-04-19 09:21:49 +08:00
|
|
|
case ARM::VLD3q8_UPD:
|
|
|
|
case ARM::VLD3q16_UPD:
|
|
|
|
case ARM::VLD3q32_UPD:
|
|
|
|
case ARM::VLD4d8:
|
|
|
|
case ARM::VLD4d16:
|
|
|
|
case ARM::VLD4d32:
|
|
|
|
case ARM::VLD1d64Q:
|
|
|
|
case ARM::VLD4d8_UPD:
|
|
|
|
case ARM::VLD4d16_UPD:
|
|
|
|
case ARM::VLD4d32_UPD:
|
2011-10-25 08:14:01 +08:00
|
|
|
case ARM::VLD1d64Qwb_fixed:
|
|
|
|
case ARM::VLD1d64Qwb_register:
|
2011-04-19 09:21:49 +08:00
|
|
|
case ARM::VLD4q8_UPD:
|
|
|
|
case ARM::VLD4q16_UPD:
|
|
|
|
case ARM::VLD4q32_UPD:
|
|
|
|
case ARM::VLD1DUPq8:
|
|
|
|
case ARM::VLD1DUPq16:
|
|
|
|
case ARM::VLD1DUPq32:
|
2011-12-01 03:35:44 +08:00
|
|
|
case ARM::VLD1DUPq8wb_fixed:
|
|
|
|
case ARM::VLD1DUPq16wb_fixed:
|
|
|
|
case ARM::VLD1DUPq32wb_fixed:
|
|
|
|
case ARM::VLD1DUPq8wb_register:
|
|
|
|
case ARM::VLD1DUPq16wb_register:
|
|
|
|
case ARM::VLD1DUPq32wb_register:
|
2011-04-19 09:21:49 +08:00
|
|
|
case ARM::VLD2DUPd8:
|
|
|
|
case ARM::VLD2DUPd16:
|
|
|
|
case ARM::VLD2DUPd32:
|
2011-12-22 03:40:55 +08:00
|
|
|
case ARM::VLD2DUPd8wb_fixed:
|
|
|
|
case ARM::VLD2DUPd16wb_fixed:
|
|
|
|
case ARM::VLD2DUPd32wb_fixed:
|
|
|
|
case ARM::VLD2DUPd8wb_register:
|
|
|
|
case ARM::VLD2DUPd16wb_register:
|
|
|
|
case ARM::VLD2DUPd32wb_register:
|
2011-04-19 09:21:49 +08:00
|
|
|
case ARM::VLD4DUPd8:
|
|
|
|
case ARM::VLD4DUPd16:
|
|
|
|
case ARM::VLD4DUPd32:
|
|
|
|
case ARM::VLD4DUPd8_UPD:
|
|
|
|
case ARM::VLD4DUPd16_UPD:
|
|
|
|
case ARM::VLD4DUPd32_UPD:
|
|
|
|
case ARM::VLD1LNd8:
|
|
|
|
case ARM::VLD1LNd16:
|
|
|
|
case ARM::VLD1LNd32:
|
|
|
|
case ARM::VLD1LNd8_UPD:
|
|
|
|
case ARM::VLD1LNd16_UPD:
|
|
|
|
case ARM::VLD1LNd32_UPD:
|
|
|
|
case ARM::VLD2LNd8:
|
|
|
|
case ARM::VLD2LNd16:
|
|
|
|
case ARM::VLD2LNd32:
|
|
|
|
case ARM::VLD2LNq16:
|
|
|
|
case ARM::VLD2LNq32:
|
|
|
|
case ARM::VLD2LNd8_UPD:
|
|
|
|
case ARM::VLD2LNd16_UPD:
|
|
|
|
case ARM::VLD2LNd32_UPD:
|
|
|
|
case ARM::VLD2LNq16_UPD:
|
|
|
|
case ARM::VLD2LNq32_UPD:
|
|
|
|
case ARM::VLD4LNd8:
|
|
|
|
case ARM::VLD4LNd16:
|
|
|
|
case ARM::VLD4LNd32:
|
|
|
|
case ARM::VLD4LNq16:
|
|
|
|
case ARM::VLD4LNq32:
|
|
|
|
case ARM::VLD4LNd8_UPD:
|
|
|
|
case ARM::VLD4LNd16_UPD:
|
|
|
|
case ARM::VLD4LNd32_UPD:
|
|
|
|
case ARM::VLD4LNq16_UPD:
|
|
|
|
case ARM::VLD4LNq32_UPD:
|
|
|
|
// If the address is not 64-bit aligned, the latencies of these
|
|
|
|
// instructions increases by one.
|
|
|
|
++Latency;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-10-28 14:47:08 +08:00
|
|
|
return Latency;
|
2010-10-06 14:27:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
|
|
|
|
SDNode *DefNode, unsigned DefIdx,
|
|
|
|
SDNode *UseNode, unsigned UseIdx) const {
|
|
|
|
if (!DefNode->isMachineOpcode())
|
|
|
|
return 1;
|
|
|
|
|
2011-06-29 03:10:37 +08:00
|
|
|
const MCInstrDesc &DefMCID = get(DefNode->getMachineOpcode());
|
2011-01-21 13:51:33 +08:00
|
|
|
|
2011-06-29 03:10:37 +08:00
|
|
|
if (isZeroCost(DefMCID.Opcode))
|
2011-01-21 13:51:33 +08:00
|
|
|
return 0;
|
|
|
|
|
2010-10-06 14:27:31 +08:00
|
|
|
if (!ItinData || ItinData->isEmpty())
|
2011-06-29 03:10:37 +08:00
|
|
|
return DefMCID.mayLoad() ? 3 : 1;
|
2010-10-06 14:27:31 +08:00
|
|
|
|
Avoiding overly aggressive latency scheduling. If the two nodes share an
operand and one of them has a single use that is a live out copy, favor the
one that is live out. Otherwise it will be difficult to eliminate the copy
if the instruction is a loop induction variable update. e.g.
BB:
sub r1, r3, #1
str r0, [r2, r3]
mov r3, r1
cmp
bne BB
=>
BB:
str r0, [r2, r3]
sub r3, r3, #1
cmp
bne BB
This fixed the recent 256.bzip2 regression.
llvm-svn: 117675
2010-10-30 02:09:28 +08:00
|
|
|
if (!UseNode->isMachineOpcode()) {
|
2011-06-29 03:10:37 +08:00
|
|
|
int Latency = ItinData->getOperandCycle(DefMCID.getSchedClass(), DefIdx);
|
Avoiding overly aggressive latency scheduling. If the two nodes share an
operand and one of them has a single use that is a live out copy, favor the
one that is live out. Otherwise it will be difficult to eliminate the copy
if the instruction is a loop induction variable update. e.g.
BB:
sub r1, r3, #1
str r0, [r2, r3]
mov r3, r1
cmp
bne BB
=>
BB:
str r0, [r2, r3]
sub r3, r3, #1
cmp
bne BB
This fixed the recent 256.bzip2 regression.
llvm-svn: 117675
2010-10-30 02:09:28 +08:00
|
|
|
if (Subtarget.isCortexA9())
|
|
|
|
return Latency <= 2 ? 1 : Latency - 1;
|
|
|
|
else
|
|
|
|
return Latency <= 3 ? 1 : Latency - 2;
|
|
|
|
}
|
2010-10-06 14:27:31 +08:00
|
|
|
|
2011-06-29 03:10:37 +08:00
|
|
|
const MCInstrDesc &UseMCID = get(UseNode->getMachineOpcode());
|
2010-10-06 14:27:31 +08:00
|
|
|
const MachineSDNode *DefMN = dyn_cast<MachineSDNode>(DefNode);
|
|
|
|
unsigned DefAlign = !DefMN->memoperands_empty()
|
|
|
|
? (*DefMN->memoperands_begin())->getAlignment() : 0;
|
|
|
|
const MachineSDNode *UseMN = dyn_cast<MachineSDNode>(UseNode);
|
|
|
|
unsigned UseAlign = !UseMN->memoperands_empty()
|
|
|
|
? (*UseMN->memoperands_begin())->getAlignment() : 0;
|
2011-06-29 03:10:37 +08:00
|
|
|
int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign,
|
|
|
|
UseMCID, UseIdx, UseAlign);
|
2010-10-28 14:47:08 +08:00
|
|
|
|
|
|
|
if (Latency > 1 &&
|
|
|
|
(Subtarget.isCortexA8() || Subtarget.isCortexA9())) {
|
|
|
|
// FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
|
|
|
|
// variants are one cycle cheaper.
|
2011-06-29 03:10:37 +08:00
|
|
|
switch (DefMCID.getOpcode()) {
|
2010-10-28 14:47:08 +08:00
|
|
|
default: break;
|
|
|
|
case ARM::LDRrs:
|
|
|
|
case ARM::LDRBrs: {
|
|
|
|
unsigned ShOpVal =
|
|
|
|
cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
|
|
|
|
unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
|
|
|
|
if (ShImm == 0 ||
|
|
|
|
(ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))
|
|
|
|
--Latency;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case ARM::t2LDRs:
|
|
|
|
case ARM::t2LDRBs:
|
|
|
|
case ARM::t2LDRHs:
|
|
|
|
case ARM::t2LDRSHs: {
|
|
|
|
// Thumb2 mode: lsl only.
|
|
|
|
unsigned ShAmt =
|
|
|
|
cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
|
|
|
|
if (ShAmt == 0 || ShAmt == 2)
|
|
|
|
--Latency;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-04-19 09:21:49 +08:00
|
|
|
if (DefAlign < 8 && Subtarget.isCortexA9())
|
2011-06-29 03:10:37 +08:00
|
|
|
switch (DefMCID.getOpcode()) {
|
2011-04-19 09:21:49 +08:00
|
|
|
default: break;
|
|
|
|
case ARM::VLD1q8Pseudo:
|
|
|
|
case ARM::VLD1q16Pseudo:
|
|
|
|
case ARM::VLD1q32Pseudo:
|
|
|
|
case ARM::VLD1q64Pseudo:
|
2011-10-25 05:45:13 +08:00
|
|
|
case ARM::VLD1q8PseudoWB_register:
|
|
|
|
case ARM::VLD1q16PseudoWB_register:
|
|
|
|
case ARM::VLD1q32PseudoWB_register:
|
|
|
|
case ARM::VLD1q64PseudoWB_register:
|
|
|
|
case ARM::VLD1q8PseudoWB_fixed:
|
|
|
|
case ARM::VLD1q16PseudoWB_fixed:
|
|
|
|
case ARM::VLD1q32PseudoWB_fixed:
|
|
|
|
case ARM::VLD1q64PseudoWB_fixed:
|
2011-04-19 09:21:49 +08:00
|
|
|
case ARM::VLD2d8Pseudo:
|
|
|
|
case ARM::VLD2d16Pseudo:
|
|
|
|
case ARM::VLD2d32Pseudo:
|
|
|
|
case ARM::VLD2q8Pseudo:
|
|
|
|
case ARM::VLD2q16Pseudo:
|
|
|
|
case ARM::VLD2q32Pseudo:
|
2011-12-10 05:28:25 +08:00
|
|
|
case ARM::VLD2d8PseudoWB_fixed:
|
|
|
|
case ARM::VLD2d16PseudoWB_fixed:
|
|
|
|
case ARM::VLD2d32PseudoWB_fixed:
|
|
|
|
case ARM::VLD2q8PseudoWB_fixed:
|
|
|
|
case ARM::VLD2q16PseudoWB_fixed:
|
|
|
|
case ARM::VLD2q32PseudoWB_fixed:
|
|
|
|
case ARM::VLD2d8PseudoWB_register:
|
|
|
|
case ARM::VLD2d16PseudoWB_register:
|
|
|
|
case ARM::VLD2d32PseudoWB_register:
|
|
|
|
case ARM::VLD2q8PseudoWB_register:
|
|
|
|
case ARM::VLD2q16PseudoWB_register:
|
|
|
|
case ARM::VLD2q32PseudoWB_register:
|
2011-04-19 09:21:49 +08:00
|
|
|
case ARM::VLD3d8Pseudo:
|
|
|
|
case ARM::VLD3d16Pseudo:
|
|
|
|
case ARM::VLD3d32Pseudo:
|
|
|
|
case ARM::VLD1d64TPseudo:
|
|
|
|
case ARM::VLD3d8Pseudo_UPD:
|
|
|
|
case ARM::VLD3d16Pseudo_UPD:
|
|
|
|
case ARM::VLD3d32Pseudo_UPD:
|
|
|
|
case ARM::VLD3q8Pseudo_UPD:
|
|
|
|
case ARM::VLD3q16Pseudo_UPD:
|
|
|
|
case ARM::VLD3q32Pseudo_UPD:
|
|
|
|
case ARM::VLD3q8oddPseudo:
|
|
|
|
case ARM::VLD3q16oddPseudo:
|
|
|
|
case ARM::VLD3q32oddPseudo:
|
|
|
|
case ARM::VLD3q8oddPseudo_UPD:
|
|
|
|
case ARM::VLD3q16oddPseudo_UPD:
|
|
|
|
case ARM::VLD3q32oddPseudo_UPD:
|
|
|
|
case ARM::VLD4d8Pseudo:
|
|
|
|
case ARM::VLD4d16Pseudo:
|
|
|
|
case ARM::VLD4d32Pseudo:
|
|
|
|
case ARM::VLD1d64QPseudo:
|
|
|
|
case ARM::VLD4d8Pseudo_UPD:
|
|
|
|
case ARM::VLD4d16Pseudo_UPD:
|
|
|
|
case ARM::VLD4d32Pseudo_UPD:
|
|
|
|
case ARM::VLD4q8Pseudo_UPD:
|
|
|
|
case ARM::VLD4q16Pseudo_UPD:
|
|
|
|
case ARM::VLD4q32Pseudo_UPD:
|
|
|
|
case ARM::VLD4q8oddPseudo:
|
|
|
|
case ARM::VLD4q16oddPseudo:
|
|
|
|
case ARM::VLD4q32oddPseudo:
|
|
|
|
case ARM::VLD4q8oddPseudo_UPD:
|
|
|
|
case ARM::VLD4q16oddPseudo_UPD:
|
|
|
|
case ARM::VLD4q32oddPseudo_UPD:
|
|
|
|
case ARM::VLD1DUPq8Pseudo:
|
|
|
|
case ARM::VLD1DUPq16Pseudo:
|
|
|
|
case ARM::VLD1DUPq32Pseudo:
|
2011-12-01 03:35:44 +08:00
|
|
|
case ARM::VLD1DUPq8PseudoWB_fixed:
|
|
|
|
case ARM::VLD1DUPq16PseudoWB_fixed:
|
|
|
|
case ARM::VLD1DUPq32PseudoWB_fixed:
|
|
|
|
case ARM::VLD1DUPq8PseudoWB_register:
|
|
|
|
case ARM::VLD1DUPq16PseudoWB_register:
|
|
|
|
case ARM::VLD1DUPq32PseudoWB_register:
|
2011-04-19 09:21:49 +08:00
|
|
|
case ARM::VLD2DUPd8Pseudo:
|
|
|
|
case ARM::VLD2DUPd16Pseudo:
|
|
|
|
case ARM::VLD2DUPd32Pseudo:
|
2011-12-22 03:40:55 +08:00
|
|
|
case ARM::VLD2DUPd8PseudoWB_fixed:
|
|
|
|
case ARM::VLD2DUPd16PseudoWB_fixed:
|
|
|
|
case ARM::VLD2DUPd32PseudoWB_fixed:
|
|
|
|
case ARM::VLD2DUPd8PseudoWB_register:
|
|
|
|
case ARM::VLD2DUPd16PseudoWB_register:
|
|
|
|
case ARM::VLD2DUPd32PseudoWB_register:
|
2011-04-19 09:21:49 +08:00
|
|
|
case ARM::VLD4DUPd8Pseudo:
|
|
|
|
case ARM::VLD4DUPd16Pseudo:
|
|
|
|
case ARM::VLD4DUPd32Pseudo:
|
|
|
|
case ARM::VLD4DUPd8Pseudo_UPD:
|
|
|
|
case ARM::VLD4DUPd16Pseudo_UPD:
|
|
|
|
case ARM::VLD4DUPd32Pseudo_UPD:
|
|
|
|
case ARM::VLD1LNq8Pseudo:
|
|
|
|
case ARM::VLD1LNq16Pseudo:
|
|
|
|
case ARM::VLD1LNq32Pseudo:
|
|
|
|
case ARM::VLD1LNq8Pseudo_UPD:
|
|
|
|
case ARM::VLD1LNq16Pseudo_UPD:
|
|
|
|
case ARM::VLD1LNq32Pseudo_UPD:
|
|
|
|
case ARM::VLD2LNd8Pseudo:
|
|
|
|
case ARM::VLD2LNd16Pseudo:
|
|
|
|
case ARM::VLD2LNd32Pseudo:
|
|
|
|
case ARM::VLD2LNq16Pseudo:
|
|
|
|
case ARM::VLD2LNq32Pseudo:
|
|
|
|
case ARM::VLD2LNd8Pseudo_UPD:
|
|
|
|
case ARM::VLD2LNd16Pseudo_UPD:
|
|
|
|
case ARM::VLD2LNd32Pseudo_UPD:
|
|
|
|
case ARM::VLD2LNq16Pseudo_UPD:
|
|
|
|
case ARM::VLD2LNq32Pseudo_UPD:
|
|
|
|
case ARM::VLD4LNd8Pseudo:
|
|
|
|
case ARM::VLD4LNd16Pseudo:
|
|
|
|
case ARM::VLD4LNd32Pseudo:
|
|
|
|
case ARM::VLD4LNq16Pseudo:
|
|
|
|
case ARM::VLD4LNq32Pseudo:
|
|
|
|
case ARM::VLD4LNd8Pseudo_UPD:
|
|
|
|
case ARM::VLD4LNd16Pseudo_UPD:
|
|
|
|
case ARM::VLD4LNd32Pseudo_UPD:
|
|
|
|
case ARM::VLD4LNq16Pseudo_UPD:
|
|
|
|
case ARM::VLD4LNq32Pseudo_UPD:
|
|
|
|
// If the address is not 64-bit aligned, the latencies of these
|
|
|
|
// instructions increases by one.
|
|
|
|
++Latency;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-10-28 14:47:08 +08:00
|
|
|
return Latency;
|
2010-10-06 14:27:31 +08:00
|
|
|
}
|
2010-10-20 02:58:51 +08:00
|
|
|
|
2011-12-15 04:00:08 +08:00
|
|
|
unsigned
|
|
|
|
ARMBaseInstrInfo::getOutputLatency(const InstrItineraryData *ItinData,
|
|
|
|
const MachineInstr *DefMI, unsigned DefIdx,
|
|
|
|
const MachineInstr *DepMI) const {
|
|
|
|
unsigned Reg = DefMI->getOperand(DefIdx).getReg();
|
|
|
|
if (DepMI->readsRegister(Reg, &getRegisterInfo()) || !isPredicated(DepMI))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
// If the second MI is predicated, then there is an implicit use dependency.
|
|
|
|
return getOperandLatency(ItinData, DefMI, DefIdx, DepMI,
|
|
|
|
DepMI->getNumOperands());
|
|
|
|
}
|
|
|
|
|
2010-11-03 08:45:17 +08:00
|
|
|
int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
|
|
|
|
const MachineInstr *MI,
|
|
|
|
unsigned *PredCost) const {
|
|
|
|
if (MI->isCopyLike() || MI->isInsertSubreg() ||
|
|
|
|
MI->isRegSequence() || MI->isImplicitDef())
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (!ItinData || ItinData->isEmpty())
|
|
|
|
return 1;
|
|
|
|
|
2011-12-14 10:11:42 +08:00
|
|
|
if (MI->isBundle()) {
|
|
|
|
int Latency = 0;
|
|
|
|
MachineBasicBlock::const_instr_iterator I = MI;
|
|
|
|
MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
|
|
|
|
while (++I != E && I->isInsideBundle()) {
|
|
|
|
if (I->getOpcode() != ARM::t2IT)
|
|
|
|
Latency += getInstrLatency(ItinData, I, PredCost);
|
|
|
|
}
|
|
|
|
return Latency;
|
|
|
|
}
|
|
|
|
|
2011-06-29 03:10:37 +08:00
|
|
|
const MCInstrDesc &MCID = MI->getDesc();
|
|
|
|
unsigned Class = MCID.getSchedClass();
|
2010-11-03 08:45:17 +08:00
|
|
|
unsigned UOps = ItinData->Itineraries[Class].NumMicroOps;
|
2012-02-18 03:07:59 +08:00
|
|
|
if (PredCost && (MCID.isCall() || MCID.hasImplicitDefOfPhysReg(ARM::CPSR)))
|
2010-11-03 08:45:17 +08:00
|
|
|
// When predicated, CPSR is an additional source operand for CPSR updating
|
|
|
|
// instructions, this apparently increases their latencies.
|
|
|
|
*PredCost = 1;
|
|
|
|
if (UOps)
|
|
|
|
return ItinData->getStageLatency(Class);
|
|
|
|
return getNumMicroOps(ItinData, MI);
|
|
|
|
}
|
|
|
|
|
|
|
|
int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
|
|
|
|
SDNode *Node) const {
|
|
|
|
if (!Node->isMachineOpcode())
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (!ItinData || ItinData->isEmpty())
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
unsigned Opcode = Node->getMachineOpcode();
|
|
|
|
switch (Opcode) {
|
|
|
|
default:
|
|
|
|
return ItinData->getStageLatency(get(Opcode).getSchedClass());
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::VLDMQIA:
|
|
|
|
case ARM::VSTMQIA:
|
2010-11-03 08:45:17 +08:00
|
|
|
return 2;
|
2010-11-19 03:40:05 +08:00
|
|
|
}
|
2010-11-03 08:45:17 +08:00
|
|
|
}
|
|
|
|
|
2010-10-20 02:58:51 +08:00
|
|
|
bool ARMBaseInstrInfo::
|
|
|
|
hasHighOperandLatency(const InstrItineraryData *ItinData,
|
|
|
|
const MachineRegisterInfo *MRI,
|
|
|
|
const MachineInstr *DefMI, unsigned DefIdx,
|
|
|
|
const MachineInstr *UseMI, unsigned UseIdx) const {
|
|
|
|
unsigned DDomain = DefMI->getDesc().TSFlags & ARMII::DomainMask;
|
|
|
|
unsigned UDomain = UseMI->getDesc().TSFlags & ARMII::DomainMask;
|
|
|
|
if (Subtarget.isCortexA8() &&
|
|
|
|
(DDomain == ARMII::DomainVFP || UDomain == ARMII::DomainVFP))
|
|
|
|
// CortexA8 VFP instructions are not pipelined.
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Hoist VFP / NEON instructions with 4 or higher latency.
|
|
|
|
int Latency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx);
|
|
|
|
if (Latency <= 3)
|
|
|
|
return false;
|
|
|
|
return DDomain == ARMII::DomainVFP || DDomain == ARMII::DomainNEON ||
|
|
|
|
UDomain == ARMII::DomainVFP || UDomain == ARMII::DomainNEON;
|
|
|
|
}
|
2010-10-26 10:08:50 +08:00
|
|
|
|
|
|
|
bool ARMBaseInstrInfo::
|
|
|
|
hasLowDefLatency(const InstrItineraryData *ItinData,
|
|
|
|
const MachineInstr *DefMI, unsigned DefIdx) const {
|
|
|
|
if (!ItinData || ItinData->isEmpty())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned DDomain = DefMI->getDesc().TSFlags & ARMII::DomainMask;
|
|
|
|
if (DDomain == ARMII::DomainGeneral) {
|
|
|
|
unsigned DefClass = DefMI->getDesc().getSchedClass();
|
|
|
|
int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
|
|
|
|
return (DefCycle != -1 && DefCycle <= 2);
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
2010-12-06 06:04:16 +08:00
|
|
|
|
2011-09-21 10:20:46 +08:00
|
|
|
bool ARMBaseInstrInfo::verifyInstruction(const MachineInstr *MI,
|
|
|
|
StringRef &ErrInfo) const {
|
|
|
|
if (convertAddSubFlagsOpcode(MI->getOpcode())) {
|
|
|
|
ErrInfo = "Pseudo flag setting opcodes only exist in Selection DAG";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-12-06 06:04:16 +08:00
|
|
|
bool
|
|
|
|
ARMBaseInstrInfo::isFpMLxInstruction(unsigned Opcode, unsigned &MulOpc,
|
|
|
|
unsigned &AddSubOpc,
|
|
|
|
bool &NegAcc, bool &HasLane) const {
|
|
|
|
DenseMap<unsigned, unsigned>::const_iterator I = MLxEntryMap.find(Opcode);
|
|
|
|
if (I == MLxEntryMap.end())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const ARM_MLxEntry &Entry = ARM_MLxTable[I->second];
|
|
|
|
MulOpc = Entry.MulOpc;
|
|
|
|
AddSubOpc = Entry.AddSubOpc;
|
|
|
|
NegAcc = Entry.NegAcc;
|
|
|
|
HasLane = Entry.HasLane;
|
|
|
|
return true;
|
|
|
|
}
|
2011-09-28 06:57:21 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Execution domains.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// Some instructions go down the NEON pipeline, some go down the VFP pipeline,
|
|
|
|
// and some can go down both. The vmov instructions go down the VFP pipeline,
|
|
|
|
// but they can be changed to vorr equivalents that are executed by the NEON
|
|
|
|
// pipeline.
|
|
|
|
//
|
|
|
|
// We use the following execution domain numbering:
|
|
|
|
//
|
2011-09-29 10:48:41 +08:00
|
|
|
enum ARMExeDomain {
|
|
|
|
ExeGeneric = 0,
|
|
|
|
ExeVFP = 1,
|
|
|
|
ExeNEON = 2
|
|
|
|
};
|
2011-09-28 06:57:21 +08:00
|
|
|
//
|
|
|
|
// Also see ARMInstrFormats.td and Domain* enums in ARMBaseInfo.h
|
|
|
|
//
|
|
|
|
std::pair<uint16_t, uint16_t>
|
|
|
|
ARMBaseInstrInfo::getExecutionDomain(const MachineInstr *MI) const {
|
|
|
|
// VMOVD is a VFP instruction, but can be changed to NEON if it isn't
|
|
|
|
// predicated.
|
|
|
|
if (MI->getOpcode() == ARM::VMOVD && !isPredicated(MI))
|
2011-09-29 10:48:41 +08:00
|
|
|
return std::make_pair(ExeVFP, (1<<ExeVFP) | (1<<ExeNEON));
|
2011-09-28 06:57:21 +08:00
|
|
|
|
|
|
|
// No other instructions can be swizzled, so just determine their domain.
|
|
|
|
unsigned Domain = MI->getDesc().TSFlags & ARMII::DomainMask;
|
|
|
|
|
|
|
|
if (Domain & ARMII::DomainNEON)
|
2011-09-29 10:48:41 +08:00
|
|
|
return std::make_pair(ExeNEON, 0);
|
2011-09-28 06:57:21 +08:00
|
|
|
|
|
|
|
// Certain instructions can go either way on Cortex-A8.
|
|
|
|
// Treat them as NEON instructions.
|
|
|
|
if ((Domain & ARMII::DomainNEONA8) && Subtarget.isCortexA8())
|
2011-09-29 10:48:41 +08:00
|
|
|
return std::make_pair(ExeNEON, 0);
|
2011-09-28 06:57:21 +08:00
|
|
|
|
|
|
|
if (Domain & ARMII::DomainVFP)
|
2011-09-29 10:48:41 +08:00
|
|
|
return std::make_pair(ExeVFP, 0);
|
2011-09-28 06:57:21 +08:00
|
|
|
|
2011-09-29 10:48:41 +08:00
|
|
|
return std::make_pair(ExeGeneric, 0);
|
2011-09-28 06:57:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ARMBaseInstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const {
|
|
|
|
// We only know how to change VMOVD into VORR.
|
|
|
|
assert(MI->getOpcode() == ARM::VMOVD && "Can only swizzle VMOVD");
|
2011-09-29 10:48:41 +08:00
|
|
|
if (Domain != ExeNEON)
|
2011-09-28 06:57:21 +08:00
|
|
|
return;
|
|
|
|
|
2011-09-29 10:48:41 +08:00
|
|
|
// Zap the predicate operands.
|
|
|
|
assert(!isPredicated(MI) && "Cannot predicate a VORRd");
|
|
|
|
MI->RemoveOperand(3);
|
|
|
|
MI->RemoveOperand(2);
|
|
|
|
|
2011-09-28 06:57:21 +08:00
|
|
|
// Change to a VORRd which requires two identical use operands.
|
|
|
|
MI->setDesc(get(ARM::VORRd));
|
2011-09-29 10:48:41 +08:00
|
|
|
|
|
|
|
// Add the extra source operand and new predicates.
|
|
|
|
// This will go before any implicit ops.
|
2011-10-12 08:06:23 +08:00
|
|
|
AddDefaultPred(MachineInstrBuilder(MI).addOperand(MI->getOperand(1)));
|
2011-09-28 06:57:21 +08:00
|
|
|
}
|