2012-02-18 20:03:15 +08:00
|
|
|
//===-- ARMBaseInstrInfo.cpp - ARM Instruction Information ----------------===//
|
2009-07-09 00:09:28 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file contains the Base ARM implementation of the TargetInstrInfo class.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "ARM.h"
|
2013-10-03 17:31:51 +08:00
|
|
|
#include "ARMBaseInstrInfo.h"
|
2012-03-26 08:45:15 +08:00
|
|
|
#include "ARMBaseRegisterInfo.h"
|
2009-11-07 12:04:34 +08:00
|
|
|
#include "ARMConstantPoolValue.h"
|
2013-10-03 17:31:51 +08:00
|
|
|
#include "ARMFeatures.h"
|
2010-12-06 06:04:16 +08:00
|
|
|
#include "ARMHazardRecognizer.h"
|
2009-07-09 00:09:28 +08:00
|
|
|
#include "ARMMachineFunctionInfo.h"
|
2011-07-21 07:34:39 +08:00
|
|
|
#include "MCTargetDesc/ARMAddressingModes.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2009-07-09 00:09:28 +08:00
|
|
|
#include "llvm/CodeGen/LiveVariables.h"
|
2009-11-07 12:04:34 +08:00
|
|
|
#include "llvm/CodeGen/MachineConstantPool.h"
|
2009-07-09 00:09:28 +08:00
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
|
|
|
#include "llvm/CodeGen/MachineJumpTableInfo.h"
|
2009-10-07 08:06:35 +08:00
|
|
|
#include "llvm/CodeGen/MachineMemOperand.h"
|
2010-05-22 09:47:14 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2011-07-21 07:34:39 +08:00
|
|
|
#include "llvm/CodeGen/SelectionDAGNodes.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/Constants.h"
|
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/GlobalValue.h"
|
2009-08-23 04:48:53 +08:00
|
|
|
#include "llvm/MC/MCAsmInfo.h"
|
2011-07-10 10:58:07 +08:00
|
|
|
#include "llvm/Support/BranchProbability.h"
|
2009-07-09 00:09:28 +08:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2009-11-02 08:10:38 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2009-07-12 04:10:48 +08:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2011-06-29 04:07:07 +08:00
|
|
|
|
2013-11-19 08:57:56 +08:00
|
|
|
#define GET_INSTRINFO_CTOR_DTOR
|
2011-06-29 04:07:07 +08:00
|
|
|
#include "ARMGenInstrInfo.inc"
|
|
|
|
|
2009-07-09 00:09:28 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
static cl::opt<bool>
|
|
|
|
EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden,
|
|
|
|
cl::desc("Enable ARM 2-addr to 3-addr conv"));
|
|
|
|
|
2011-09-01 01:00:02 +08:00
|
|
|
static cl::opt<bool>
|
2011-11-16 07:53:18 +08:00
|
|
|
WidenVMOVS("widen-vmovs", cl::Hidden, cl::init(true),
|
2011-09-01 01:00:02 +08:00
|
|
|
cl::desc("Widen ARM vmovs to vmovd when possible"));
|
|
|
|
|
2012-09-30 05:43:49 +08:00
|
|
|
static cl::opt<unsigned>
|
|
|
|
SwiftPartialUpdateClearance("swift-partial-update-clearance",
|
|
|
|
cl::Hidden, cl::init(12),
|
|
|
|
cl::desc("Clearance before partial register updates"));
|
|
|
|
|
2010-12-06 06:04:16 +08:00
|
|
|
/// ARM_MLxEntry - Record information about MLA / MLS instructions.
|
|
|
|
struct ARM_MLxEntry {
|
2012-05-24 11:59:11 +08:00
|
|
|
uint16_t MLxOpc; // MLA / MLS opcode
|
|
|
|
uint16_t MulOpc; // Expanded multiplication opcode
|
|
|
|
uint16_t AddSubOpc; // Expanded add / sub opcode
|
2010-12-06 06:04:16 +08:00
|
|
|
bool NegAcc; // True if the acc is negated before the add / sub.
|
|
|
|
bool HasLane; // True if instruction has an extra "lane" operand.
|
|
|
|
};
|
|
|
|
|
|
|
|
static const ARM_MLxEntry ARM_MLxTable[] = {
|
|
|
|
// MLxOpc, MulOpc, AddSubOpc, NegAcc, HasLane
|
|
|
|
// fp scalar ops
|
|
|
|
{ ARM::VMLAS, ARM::VMULS, ARM::VADDS, false, false },
|
|
|
|
{ ARM::VMLSS, ARM::VMULS, ARM::VSUBS, false, false },
|
|
|
|
{ ARM::VMLAD, ARM::VMULD, ARM::VADDD, false, false },
|
|
|
|
{ ARM::VMLSD, ARM::VMULD, ARM::VSUBD, false, false },
|
|
|
|
{ ARM::VNMLAS, ARM::VNMULS, ARM::VSUBS, true, false },
|
|
|
|
{ ARM::VNMLSS, ARM::VMULS, ARM::VSUBS, true, false },
|
|
|
|
{ ARM::VNMLAD, ARM::VNMULD, ARM::VSUBD, true, false },
|
|
|
|
{ ARM::VNMLSD, ARM::VMULD, ARM::VSUBD, true, false },
|
|
|
|
|
|
|
|
// fp SIMD ops
|
|
|
|
{ ARM::VMLAfd, ARM::VMULfd, ARM::VADDfd, false, false },
|
|
|
|
{ ARM::VMLSfd, ARM::VMULfd, ARM::VSUBfd, false, false },
|
|
|
|
{ ARM::VMLAfq, ARM::VMULfq, ARM::VADDfq, false, false },
|
|
|
|
{ ARM::VMLSfq, ARM::VMULfq, ARM::VSUBfq, false, false },
|
|
|
|
{ ARM::VMLAslfd, ARM::VMULslfd, ARM::VADDfd, false, true },
|
|
|
|
{ ARM::VMLSslfd, ARM::VMULslfd, ARM::VSUBfd, false, true },
|
|
|
|
{ ARM::VMLAslfq, ARM::VMULslfq, ARM::VADDfq, false, true },
|
|
|
|
{ ARM::VMLSslfq, ARM::VMULslfq, ARM::VSUBfq, false, true },
|
|
|
|
};
|
|
|
|
|
2009-11-02 08:10:38 +08:00
|
|
|
ARMBaseInstrInfo::ARMBaseInstrInfo(const ARMSubtarget& STI)
|
2011-07-02 01:57:27 +08:00
|
|
|
: ARMGenInstrInfo(ARM::ADJCALLSTACKDOWN, ARM::ADJCALLSTACKUP),
|
2009-11-02 08:10:38 +08:00
|
|
|
Subtarget(STI) {
|
2010-12-06 06:04:16 +08:00
|
|
|
for (unsigned i = 0, e = array_lengthof(ARM_MLxTable); i != e; ++i) {
|
|
|
|
if (!MLxEntryMap.insert(std::make_pair(ARM_MLxTable[i].MLxOpc, i)).second)
|
|
|
|
assert(false && "Duplicated entries?");
|
|
|
|
MLxHazardOpcodes.insert(ARM_MLxTable[i].AddSubOpc);
|
|
|
|
MLxHazardOpcodes.insert(ARM_MLxTable[i].MulOpc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Various bits of framework needed for precise machine-level selection
DAG scheduling during isel. Most new functionality is currently
guarded by -enable-sched-cycles and -enable-sched-hazard.
Added InstrItineraryData::IssueWidth field, currently derived from
ARM itineraries, but could be initialized differently on other targets.
Added ScheduleHazardRecognizer::MaxLookAhead to indicate whether it is
active, and if so how many cycles of state it holds.
Added SchedulingPriorityQueue::HasReadyFilter to allowing gating entry
into the scheduler's available queue.
ScoreboardHazardRecognizer now accesses the ScheduleDAG in order to
get information about it's SUnits, provides RecedeCycle for bottom-up
scheduling, correctly computes scoreboard depth, tracks IssueCount, and
considers potential stall cycles when checking for hazards.
ScheduleDAGRRList now models machine cycles and hazards (under
flags). It tracks MinAvailableCycle, drives the hazard recognizer and
priority queue's ready filter, manages a new PendingQueue, properly
accounts for stall cycles, etc.
llvm-svn: 122541
2010-12-24 13:03:26 +08:00
|
|
|
// Use a ScoreboardHazardRecognizer for prepass ARM scheduling. TargetInstrImpl
|
|
|
|
// currently defaults to no prepass hazard recognizer.
|
2010-12-06 06:04:16 +08:00
|
|
|
ScheduleHazardRecognizer *ARMBaseInstrInfo::
|
Various bits of framework needed for precise machine-level selection
DAG scheduling during isel. Most new functionality is currently
guarded by -enable-sched-cycles and -enable-sched-hazard.
Added InstrItineraryData::IssueWidth field, currently derived from
ARM itineraries, but could be initialized differently on other targets.
Added ScheduleHazardRecognizer::MaxLookAhead to indicate whether it is
active, and if so how many cycles of state it holds.
Added SchedulingPriorityQueue::HasReadyFilter to allowing gating entry
into the scheduler's available queue.
ScoreboardHazardRecognizer now accesses the ScheduleDAG in order to
get information about it's SUnits, provides RecedeCycle for bottom-up
scheduling, correctly computes scoreboard depth, tracks IssueCount, and
considers potential stall cycles when checking for hazards.
ScheduleDAGRRList now models machine cycles and hazards (under
flags). It tracks MinAvailableCycle, drives the hazard recognizer and
priority queue's ready filter, manages a new PendingQueue, properly
accounts for stall cycles, etc.
llvm-svn: 122541
2010-12-24 13:03:26 +08:00
|
|
|
CreateTargetHazardRecognizer(const TargetMachine *TM,
|
|
|
|
const ScheduleDAG *DAG) const {
|
2011-01-21 13:51:33 +08:00
|
|
|
if (usePreRAHazardRecognizer()) {
|
Various bits of framework needed for precise machine-level selection
DAG scheduling during isel. Most new functionality is currently
guarded by -enable-sched-cycles and -enable-sched-hazard.
Added InstrItineraryData::IssueWidth field, currently derived from
ARM itineraries, but could be initialized differently on other targets.
Added ScheduleHazardRecognizer::MaxLookAhead to indicate whether it is
active, and if so how many cycles of state it holds.
Added SchedulingPriorityQueue::HasReadyFilter to allowing gating entry
into the scheduler's available queue.
ScoreboardHazardRecognizer now accesses the ScheduleDAG in order to
get information about it's SUnits, provides RecedeCycle for bottom-up
scheduling, correctly computes scoreboard depth, tracks IssueCount, and
considers potential stall cycles when checking for hazards.
ScheduleDAGRRList now models machine cycles and hazards (under
flags). It tracks MinAvailableCycle, drives the hazard recognizer and
priority queue's ready filter, manages a new PendingQueue, properly
accounts for stall cycles, etc.
llvm-svn: 122541
2010-12-24 13:03:26 +08:00
|
|
|
const InstrItineraryData *II = TM->getInstrItineraryData();
|
|
|
|
return new ScoreboardHazardRecognizer(II, DAG, "pre-RA-sched");
|
|
|
|
}
|
2012-11-28 10:35:17 +08:00
|
|
|
return TargetInstrInfo::CreateTargetHazardRecognizer(TM, DAG);
|
Various bits of framework needed for precise machine-level selection
DAG scheduling during isel. Most new functionality is currently
guarded by -enable-sched-cycles and -enable-sched-hazard.
Added InstrItineraryData::IssueWidth field, currently derived from
ARM itineraries, but could be initialized differently on other targets.
Added ScheduleHazardRecognizer::MaxLookAhead to indicate whether it is
active, and if so how many cycles of state it holds.
Added SchedulingPriorityQueue::HasReadyFilter to allowing gating entry
into the scheduler's available queue.
ScoreboardHazardRecognizer now accesses the ScheduleDAG in order to
get information about it's SUnits, provides RecedeCycle for bottom-up
scheduling, correctly computes scoreboard depth, tracks IssueCount, and
considers potential stall cycles when checking for hazards.
ScheduleDAGRRList now models machine cycles and hazards (under
flags). It tracks MinAvailableCycle, drives the hazard recognizer and
priority queue's ready filter, manages a new PendingQueue, properly
accounts for stall cycles, etc.
llvm-svn: 122541
2010-12-24 13:03:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ScheduleHazardRecognizer *ARMBaseInstrInfo::
|
|
|
|
CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
|
|
|
|
const ScheduleDAG *DAG) const {
|
2010-12-06 06:04:16 +08:00
|
|
|
if (Subtarget.isThumb2() || Subtarget.hasVFP2())
|
2013-06-07 13:54:19 +08:00
|
|
|
return (ScheduleHazardRecognizer *)new ARMHazardRecognizer(II, DAG);
|
2012-11-28 10:35:17 +08:00
|
|
|
return TargetInstrInfo::CreateTargetPostRAHazardRecognizer(II, DAG);
|
2009-07-09 00:09:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
MachineInstr *
|
|
|
|
ARMBaseInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
|
|
|
|
MachineBasicBlock::iterator &MBBI,
|
|
|
|
LiveVariables *LV) const {
|
2009-07-28 02:44:00 +08:00
|
|
|
// FIXME: Thumb2 support.
|
|
|
|
|
2009-07-09 00:09:28 +08:00
|
|
|
if (!EnableARM3Addr)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
MachineInstr *MI = MBBI;
|
|
|
|
MachineFunction &MF = *MI->getParent()->getParent();
|
2010-06-09 06:51:23 +08:00
|
|
|
uint64_t TSFlags = MI->getDesc().TSFlags;
|
2009-07-09 00:09:28 +08:00
|
|
|
bool isPre = false;
|
|
|
|
switch ((TSFlags & ARMII::IndexModeMask) >> ARMII::IndexModeShift) {
|
|
|
|
default: return NULL;
|
|
|
|
case ARMII::IndexModePre:
|
|
|
|
isPre = true;
|
|
|
|
break;
|
|
|
|
case ARMII::IndexModePost:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try splitting an indexed load/store to an un-indexed one plus an add/sub
|
|
|
|
// operation.
|
|
|
|
unsigned MemOpc = getUnindexedOpcode(MI->getOpcode());
|
|
|
|
if (MemOpc == 0)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
MachineInstr *UpdateMI = NULL;
|
|
|
|
MachineInstr *MemMI = NULL;
|
|
|
|
unsigned AddrMode = (TSFlags & ARMII::AddrModeMask);
|
2011-06-29 03:10:37 +08:00
|
|
|
const MCInstrDesc &MCID = MI->getDesc();
|
|
|
|
unsigned NumOps = MCID.getNumOperands();
|
2011-12-07 15:15:52 +08:00
|
|
|
bool isLoad = !MI->mayStore();
|
2009-07-09 00:09:28 +08:00
|
|
|
const MachineOperand &WB = isLoad ? MI->getOperand(1) : MI->getOperand(0);
|
|
|
|
const MachineOperand &Base = MI->getOperand(2);
|
|
|
|
const MachineOperand &Offset = MI->getOperand(NumOps-3);
|
|
|
|
unsigned WBReg = WB.getReg();
|
|
|
|
unsigned BaseReg = Base.getReg();
|
|
|
|
unsigned OffReg = Offset.getReg();
|
|
|
|
unsigned OffImm = MI->getOperand(NumOps-2).getImm();
|
|
|
|
ARMCC::CondCodes Pred = (ARMCC::CondCodes)MI->getOperand(NumOps-1).getImm();
|
|
|
|
switch (AddrMode) {
|
2012-02-07 10:50:20 +08:00
|
|
|
default: llvm_unreachable("Unknown indexed op!");
|
2009-07-09 00:09:28 +08:00
|
|
|
case ARMII::AddrMode2: {
|
|
|
|
bool isSub = ARM_AM::getAM2Op(OffImm) == ARM_AM::sub;
|
|
|
|
unsigned Amt = ARM_AM::getAM2Offset(OffImm);
|
|
|
|
if (OffReg == 0) {
|
2009-07-09 05:03:57 +08:00
|
|
|
if (ARM_AM::getSOImmVal(Amt) == -1)
|
2009-07-09 00:09:28 +08:00
|
|
|
// Can't encode it in a so_imm operand. This transformation will
|
|
|
|
// add more than 1 instruction. Abandon!
|
|
|
|
return NULL;
|
|
|
|
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
|
2009-07-28 02:44:00 +08:00
|
|
|
get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
|
2009-07-09 05:03:57 +08:00
|
|
|
.addReg(BaseReg).addImm(Amt)
|
2009-07-09 00:09:28 +08:00
|
|
|
.addImm(Pred).addReg(0).addReg(0);
|
|
|
|
} else if (Amt != 0) {
|
|
|
|
ARM_AM::ShiftOpc ShOpc = ARM_AM::getAM2ShiftOpc(OffImm);
|
|
|
|
unsigned SOOpc = ARM_AM::getSORegOpc(ShOpc, Amt);
|
|
|
|
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
|
2011-07-22 02:54:16 +08:00
|
|
|
get(isSub ? ARM::SUBrsi : ARM::ADDrsi), WBReg)
|
2009-07-09 00:09:28 +08:00
|
|
|
.addReg(BaseReg).addReg(OffReg).addReg(0).addImm(SOOpc)
|
|
|
|
.addImm(Pred).addReg(0).addReg(0);
|
|
|
|
} else
|
|
|
|
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
|
2009-07-28 02:44:00 +08:00
|
|
|
get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
|
2009-07-09 00:09:28 +08:00
|
|
|
.addReg(BaseReg).addReg(OffReg)
|
|
|
|
.addImm(Pred).addReg(0).addReg(0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case ARMII::AddrMode3 : {
|
|
|
|
bool isSub = ARM_AM::getAM3Op(OffImm) == ARM_AM::sub;
|
|
|
|
unsigned Amt = ARM_AM::getAM3Offset(OffImm);
|
|
|
|
if (OffReg == 0)
|
|
|
|
// Immediate is 8-bits. It's guaranteed to fit in a so_imm operand.
|
|
|
|
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
|
2009-07-28 02:44:00 +08:00
|
|
|
get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
|
2009-07-09 00:09:28 +08:00
|
|
|
.addReg(BaseReg).addImm(Amt)
|
|
|
|
.addImm(Pred).addReg(0).addReg(0);
|
|
|
|
else
|
|
|
|
UpdateMI = BuildMI(MF, MI->getDebugLoc(),
|
2009-07-28 02:44:00 +08:00
|
|
|
get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
|
2009-07-09 00:09:28 +08:00
|
|
|
.addReg(BaseReg).addReg(OffReg)
|
|
|
|
.addImm(Pred).addReg(0).addReg(0);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<MachineInstr*> NewMIs;
|
|
|
|
if (isPre) {
|
|
|
|
if (isLoad)
|
|
|
|
MemMI = BuildMI(MF, MI->getDebugLoc(),
|
|
|
|
get(MemOpc), MI->getOperand(0).getReg())
|
2010-10-27 06:37:02 +08:00
|
|
|
.addReg(WBReg).addImm(0).addImm(Pred);
|
2009-07-09 00:09:28 +08:00
|
|
|
else
|
|
|
|
MemMI = BuildMI(MF, MI->getDebugLoc(),
|
|
|
|
get(MemOpc)).addReg(MI->getOperand(1).getReg())
|
|
|
|
.addReg(WBReg).addReg(0).addImm(0).addImm(Pred);
|
|
|
|
NewMIs.push_back(MemMI);
|
|
|
|
NewMIs.push_back(UpdateMI);
|
|
|
|
} else {
|
|
|
|
if (isLoad)
|
|
|
|
MemMI = BuildMI(MF, MI->getDebugLoc(),
|
|
|
|
get(MemOpc), MI->getOperand(0).getReg())
|
2010-10-27 06:37:02 +08:00
|
|
|
.addReg(BaseReg).addImm(0).addImm(Pred);
|
2009-07-09 00:09:28 +08:00
|
|
|
else
|
|
|
|
MemMI = BuildMI(MF, MI->getDebugLoc(),
|
|
|
|
get(MemOpc)).addReg(MI->getOperand(1).getReg())
|
|
|
|
.addReg(BaseReg).addReg(0).addImm(0).addImm(Pred);
|
|
|
|
if (WB.isDead())
|
|
|
|
UpdateMI->getOperand(0).setIsDead();
|
|
|
|
NewMIs.push_back(UpdateMI);
|
|
|
|
NewMIs.push_back(MemMI);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Transfer LiveVariables states, kill / dead info.
|
|
|
|
if (LV) {
|
|
|
|
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
|
|
|
MachineOperand &MO = MI->getOperand(i);
|
2011-01-10 10:58:51 +08:00
|
|
|
if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
|
2009-07-09 00:09:28 +08:00
|
|
|
unsigned Reg = MO.getReg();
|
|
|
|
|
|
|
|
LiveVariables::VarInfo &VI = LV->getVarInfo(Reg);
|
|
|
|
if (MO.isDef()) {
|
|
|
|
MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
|
|
|
|
if (MO.isDead())
|
|
|
|
LV->addVirtualRegisterDead(Reg, NewMI);
|
|
|
|
}
|
|
|
|
if (MO.isUse() && MO.isKill()) {
|
|
|
|
for (unsigned j = 0; j < 2; ++j) {
|
|
|
|
// Look at the two new MI's in reverse order.
|
|
|
|
MachineInstr *NewMI = NewMIs[j];
|
|
|
|
if (!NewMI->readsRegister(Reg))
|
|
|
|
continue;
|
|
|
|
LV->addVirtualRegisterKilled(Reg, NewMI);
|
|
|
|
if (VI.removeKill(MI))
|
|
|
|
VI.Kills.push_back(NewMI);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
MFI->insert(MBBI, NewMIs[1]);
|
|
|
|
MFI->insert(MBBI, NewMIs[0]);
|
|
|
|
return NewMIs[0];
|
|
|
|
}
|
|
|
|
|
|
|
|
// Branch analysis.
|
|
|
|
bool
|
|
|
|
ARMBaseInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB,
|
|
|
|
MachineBasicBlock *&FBB,
|
|
|
|
SmallVectorImpl<MachineOperand> &Cond,
|
|
|
|
bool AllowModify) const {
|
2013-07-20 07:52:47 +08:00
|
|
|
TBB = 0;
|
|
|
|
FBB = 0;
|
|
|
|
|
2009-07-09 00:09:28 +08:00
|
|
|
MachineBasicBlock::iterator I = MBB.end();
|
2010-04-02 09:38:09 +08:00
|
|
|
if (I == MBB.begin())
|
2013-07-20 07:52:47 +08:00
|
|
|
return false; // Empty blocks are easy.
|
2010-04-02 09:38:09 +08:00
|
|
|
--I;
|
2009-07-09 00:09:28 +08:00
|
|
|
|
2013-07-20 07:52:47 +08:00
|
|
|
// Walk backwards from the end of the basic block until the branch is
|
|
|
|
// analyzed or we give up.
|
2013-12-21 04:27:51 +08:00
|
|
|
while (isPredicated(I) || I->isTerminator() || I->isDebugValue()) {
|
2013-05-06 02:06:32 +08:00
|
|
|
|
2013-07-20 07:52:47 +08:00
|
|
|
// Flag to be raised on unanalyzeable instructions. This is useful in cases
|
|
|
|
// where we want to clean up on the end of the basic block before we bail
|
|
|
|
// out.
|
|
|
|
bool CantAnalyze = false;
|
2013-05-06 02:06:32 +08:00
|
|
|
|
2013-07-20 07:52:47 +08:00
|
|
|
// Skip over DEBUG values and predicated nonterminators.
|
|
|
|
while (I->isDebugValue() || !I->isTerminator()) {
|
|
|
|
if (I == MBB.begin())
|
|
|
|
return false;
|
|
|
|
--I;
|
2009-07-09 00:09:28 +08:00
|
|
|
}
|
2013-07-20 07:52:47 +08:00
|
|
|
|
|
|
|
if (isIndirectBranchOpcode(I->getOpcode()) ||
|
|
|
|
isJumpTableBranchOpcode(I->getOpcode())) {
|
|
|
|
// Indirect branches and jump tables can't be analyzed, but we still want
|
|
|
|
// to clean up any instructions at the tail of the basic block.
|
|
|
|
CantAnalyze = true;
|
|
|
|
} else if (isUncondBranchOpcode(I->getOpcode())) {
|
|
|
|
TBB = I->getOperand(0).getMBB();
|
|
|
|
} else if (isCondBranchOpcode(I->getOpcode())) {
|
|
|
|
// Bail out if we encounter multiple conditional branches.
|
|
|
|
if (!Cond.empty())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
assert(!FBB && "FBB should have been null.");
|
|
|
|
FBB = TBB;
|
|
|
|
TBB = I->getOperand(0).getMBB();
|
|
|
|
Cond.push_back(I->getOperand(1));
|
|
|
|
Cond.push_back(I->getOperand(2));
|
|
|
|
} else if (I->isReturn()) {
|
|
|
|
// Returns can't be analyzed, but we should run cleanup.
|
|
|
|
CantAnalyze = !isPredicated(I);
|
|
|
|
} else {
|
|
|
|
// We encountered other unrecognized terminator. Bail out immediately.
|
|
|
|
return true;
|
2009-07-09 00:09:28 +08:00
|
|
|
}
|
|
|
|
|
2013-07-20 07:52:47 +08:00
|
|
|
// Cleanup code - to be run for unpredicated unconditional branches and
|
|
|
|
// returns.
|
|
|
|
if (!isPredicated(I) &&
|
|
|
|
(isUncondBranchOpcode(I->getOpcode()) ||
|
|
|
|
isIndirectBranchOpcode(I->getOpcode()) ||
|
|
|
|
isJumpTableBranchOpcode(I->getOpcode()) ||
|
|
|
|
I->isReturn())) {
|
|
|
|
// Forget any previous condition branch information - it no longer applies.
|
|
|
|
Cond.clear();
|
|
|
|
FBB = 0;
|
|
|
|
|
|
|
|
// If we can modify the function, delete everything below this
|
|
|
|
// unconditional branch.
|
|
|
|
if (AllowModify) {
|
2014-03-02 20:27:27 +08:00
|
|
|
MachineBasicBlock::iterator DI = std::next(I);
|
2013-07-20 07:52:47 +08:00
|
|
|
while (DI != MBB.end()) {
|
|
|
|
MachineInstr *InstToDelete = DI;
|
|
|
|
++DI;
|
|
|
|
InstToDelete->eraseFromParent();
|
|
|
|
}
|
2010-09-23 14:54:40 +08:00
|
|
|
}
|
|
|
|
}
|
2009-07-09 00:09:28 +08:00
|
|
|
|
2013-07-20 07:52:47 +08:00
|
|
|
if (CantAnalyze)
|
|
|
|
return true;
|
2009-07-09 00:09:28 +08:00
|
|
|
|
2013-07-20 07:52:47 +08:00
|
|
|
if (I == MBB.begin())
|
|
|
|
return false;
|
2009-07-09 00:09:28 +08:00
|
|
|
|
2013-07-20 07:52:47 +08:00
|
|
|
--I;
|
2009-07-09 00:09:28 +08:00
|
|
|
}
|
|
|
|
|
2013-07-20 07:52:47 +08:00
|
|
|
// We made it past the terminators without bailing out - we must have
|
|
|
|
// analyzed this branch successfully.
|
|
|
|
return false;
|
2009-07-09 00:09:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
unsigned ARMBaseInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
|
|
|
|
MachineBasicBlock::iterator I = MBB.end();
|
|
|
|
if (I == MBB.begin()) return 0;
|
|
|
|
--I;
|
2010-04-02 09:38:09 +08:00
|
|
|
while (I->isDebugValue()) {
|
|
|
|
if (I == MBB.begin())
|
|
|
|
return 0;
|
|
|
|
--I;
|
|
|
|
}
|
2009-07-28 02:20:05 +08:00
|
|
|
if (!isUncondBranchOpcode(I->getOpcode()) &&
|
|
|
|
!isCondBranchOpcode(I->getOpcode()))
|
2009-07-09 00:09:28 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
// Remove the branch.
|
|
|
|
I->eraseFromParent();
|
|
|
|
|
|
|
|
I = MBB.end();
|
|
|
|
|
|
|
|
if (I == MBB.begin()) return 1;
|
|
|
|
--I;
|
2009-07-28 02:20:05 +08:00
|
|
|
if (!isCondBranchOpcode(I->getOpcode()))
|
2009-07-09 00:09:28 +08:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
// Remove the branch.
|
|
|
|
I->eraseFromParent();
|
|
|
|
return 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned
|
|
|
|
ARMBaseInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
|
2010-06-18 06:43:56 +08:00
|
|
|
MachineBasicBlock *FBB,
|
|
|
|
const SmallVectorImpl<MachineOperand> &Cond,
|
|
|
|
DebugLoc DL) const {
|
2009-07-28 13:48:47 +08:00
|
|
|
ARMFunctionInfo *AFI = MBB.getParent()->getInfo<ARMFunctionInfo>();
|
|
|
|
int BOpc = !AFI->isThumbFunction()
|
|
|
|
? ARM::B : (AFI->isThumb2Function() ? ARM::t2B : ARM::tB);
|
|
|
|
int BccOpc = !AFI->isThumbFunction()
|
|
|
|
? ARM::Bcc : (AFI->isThumb2Function() ? ARM::t2Bcc : ARM::tBcc);
|
2011-09-10 05:48:23 +08:00
|
|
|
bool isThumb = AFI->isThumbFunction() || AFI->isThumb2Function();
|
2011-09-21 10:17:37 +08:00
|
|
|
|
2009-07-09 00:09:28 +08:00
|
|
|
// Shouldn't be a fall through.
|
|
|
|
assert(TBB && "InsertBranch must not be told to insert a fallthrough");
|
|
|
|
assert((Cond.size() == 2 || Cond.size() == 0) &&
|
|
|
|
"ARM branch conditions have two components!");
|
|
|
|
|
|
|
|
if (FBB == 0) {
|
2011-09-10 07:13:02 +08:00
|
|
|
if (Cond.empty()) { // Unconditional branch?
|
2011-09-10 05:48:23 +08:00
|
|
|
if (isThumb)
|
|
|
|
BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB).addImm(ARMCC::AL).addReg(0);
|
|
|
|
else
|
|
|
|
BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB);
|
2011-09-10 07:13:02 +08:00
|
|
|
} else
|
2010-06-18 06:43:56 +08:00
|
|
|
BuildMI(&MBB, DL, get(BccOpc)).addMBB(TBB)
|
2009-07-09 00:09:28 +08:00
|
|
|
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Two-way conditional branch.
|
2010-06-18 06:43:56 +08:00
|
|
|
BuildMI(&MBB, DL, get(BccOpc)).addMBB(TBB)
|
2009-07-09 00:09:28 +08:00
|
|
|
.addImm(Cond[0].getImm()).addReg(Cond[1].getReg());
|
2011-09-10 05:48:23 +08:00
|
|
|
if (isThumb)
|
|
|
|
BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB).addImm(ARMCC::AL).addReg(0);
|
|
|
|
else
|
|
|
|
BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB);
|
2009-07-09 00:09:28 +08:00
|
|
|
return 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ARMBaseInstrInfo::
|
|
|
|
ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
|
|
|
|
ARMCC::CondCodes CC = (ARMCC::CondCodes)(int)Cond[0].getImm();
|
|
|
|
Cond[0].setImm(ARMCC::getOppositeCondition(CC));
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2011-12-14 10:11:42 +08:00
|
|
|
bool ARMBaseInstrInfo::isPredicated(const MachineInstr *MI) const {
|
|
|
|
if (MI->isBundle()) {
|
|
|
|
MachineBasicBlock::const_instr_iterator I = MI;
|
|
|
|
MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
|
|
|
|
while (++I != E && I->isInsideBundle()) {
|
|
|
|
int PIdx = I->findFirstPredOperandIdx();
|
|
|
|
if (PIdx != -1 && I->getOperand(PIdx).getImm() != ARMCC::AL)
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
int PIdx = MI->findFirstPredOperandIdx();
|
|
|
|
return PIdx != -1 && MI->getOperand(PIdx).getImm() != ARMCC::AL;
|
|
|
|
}
|
|
|
|
|
2009-07-09 00:09:28 +08:00
|
|
|
bool ARMBaseInstrInfo::
|
|
|
|
PredicateInstruction(MachineInstr *MI,
|
|
|
|
const SmallVectorImpl<MachineOperand> &Pred) const {
|
|
|
|
unsigned Opc = MI->getOpcode();
|
2009-07-28 02:20:05 +08:00
|
|
|
if (isUncondBranchOpcode(Opc)) {
|
|
|
|
MI->setDesc(get(getMatchingCondBranchOpcode(Opc)));
|
2012-12-21 06:53:55 +08:00
|
|
|
MachineInstrBuilder(*MI->getParent()->getParent(), MI)
|
|
|
|
.addImm(Pred[0].getImm())
|
|
|
|
.addReg(Pred[1].getReg());
|
2009-07-09 00:09:28 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
int PIdx = MI->findFirstPredOperandIdx();
|
|
|
|
if (PIdx != -1) {
|
|
|
|
MachineOperand &PMO = MI->getOperand(PIdx);
|
|
|
|
PMO.setImm(Pred[0].getImm());
|
|
|
|
MI->getOperand(PIdx+1).setReg(Pred[1].getReg());
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ARMBaseInstrInfo::
|
|
|
|
SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
|
|
|
|
const SmallVectorImpl<MachineOperand> &Pred2) const {
|
|
|
|
if (Pred1.size() > 2 || Pred2.size() > 2)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
ARMCC::CondCodes CC1 = (ARMCC::CondCodes)Pred1[0].getImm();
|
|
|
|
ARMCC::CondCodes CC2 = (ARMCC::CondCodes)Pred2[0].getImm();
|
|
|
|
if (CC1 == CC2)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
switch (CC1) {
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
case ARMCC::AL:
|
|
|
|
return true;
|
|
|
|
case ARMCC::HS:
|
|
|
|
return CC2 == ARMCC::HI;
|
|
|
|
case ARMCC::LS:
|
|
|
|
return CC2 == ARMCC::LO || CC2 == ARMCC::EQ;
|
|
|
|
case ARMCC::GE:
|
|
|
|
return CC2 == ARMCC::GT;
|
|
|
|
case ARMCC::LE:
|
|
|
|
return CC2 == ARMCC::LT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ARMBaseInstrInfo::DefinesPredicate(MachineInstr *MI,
|
|
|
|
std::vector<MachineOperand> &Pred) const {
|
|
|
|
bool Found = false;
|
|
|
|
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
|
|
|
const MachineOperand &MO = MI->getOperand(i);
|
2012-02-18 03:23:15 +08:00
|
|
|
if ((MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR)) ||
|
|
|
|
(MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR)) {
|
2009-07-09 00:09:28 +08:00
|
|
|
Pred.push_back(MO);
|
|
|
|
Found = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Found;
|
|
|
|
}
|
|
|
|
|
2009-11-21 14:21:52 +08:00
|
|
|
/// isPredicable - Return true if the specified instruction can be predicated.
|
|
|
|
/// By default, this returns true for every instruction with a
|
|
|
|
/// PredicateOperand.
|
|
|
|
bool ARMBaseInstrInfo::isPredicable(MachineInstr *MI) const {
|
2011-12-07 15:15:52 +08:00
|
|
|
if (!MI->isPredicable())
|
2009-11-21 14:21:52 +08:00
|
|
|
return false;
|
|
|
|
|
2013-09-09 22:21:49 +08:00
|
|
|
ARMFunctionInfo *AFI =
|
|
|
|
MI->getParent()->getParent()->getInfo<ARMFunctionInfo>();
|
|
|
|
|
|
|
|
if (AFI->isThumb2Function()) {
|
2013-11-14 02:29:49 +08:00
|
|
|
if (getSubtarget().restrictIT())
|
2013-09-09 22:21:49 +08:00
|
|
|
return isV8EligibleForIT(MI);
|
|
|
|
} else { // non-Thumb
|
|
|
|
if ((MI->getDesc().TSFlags & ARMII::DomainMask) == ARMII::DomainNEON)
|
|
|
|
return false;
|
2009-11-21 14:21:52 +08:00
|
|
|
}
|
2013-09-09 22:21:49 +08:00
|
|
|
|
2009-11-21 14:21:52 +08:00
|
|
|
return true;
|
|
|
|
}
|
2009-07-09 00:09:28 +08:00
|
|
|
|
2014-02-26 19:27:28 +08:00
|
|
|
template<> bool IsCPSRDead<MachineInstr>(MachineInstr* MI) {
|
|
|
|
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
|
|
|
const MachineOperand &MO = MI->getOperand(i);
|
|
|
|
if (!MO.isReg() || MO.isUndef() || MO.isUse())
|
|
|
|
continue;
|
|
|
|
if (MO.getReg() != ARM::CPSR)
|
|
|
|
continue;
|
|
|
|
if (!MO.isDead())
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
// all definitions of CPSR are dead
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-12-03 14:58:32 +08:00
|
|
|
/// FIXME: Works around a gcc miscompilation with -fstrict-aliasing.
|
2010-10-23 16:40:19 +08:00
|
|
|
LLVM_ATTRIBUTE_NOINLINE
|
2009-07-09 00:09:28 +08:00
|
|
|
static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
|
2009-12-03 14:58:32 +08:00
|
|
|
unsigned JTI);
|
2009-07-09 00:09:28 +08:00
|
|
|
static unsigned getNumJTEntries(const std::vector<MachineJumpTableEntry> &JT,
|
|
|
|
unsigned JTI) {
|
2009-12-03 14:58:32 +08:00
|
|
|
assert(JTI < JT.size());
|
2009-07-09 00:09:28 +08:00
|
|
|
return JT[JTI].MBBs.size();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// GetInstSize - Return the size of the specified MachineInstr.
|
|
|
|
///
|
|
|
|
unsigned ARMBaseInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
|
|
|
|
const MachineBasicBlock &MBB = *MI->getParent();
|
|
|
|
const MachineFunction *MF = MBB.getParent();
|
2009-08-23 05:43:10 +08:00
|
|
|
const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
|
2009-07-09 00:09:28 +08:00
|
|
|
|
2011-06-29 03:10:37 +08:00
|
|
|
const MCInstrDesc &MCID = MI->getDesc();
|
2011-07-14 07:22:26 +08:00
|
|
|
if (MCID.getSize())
|
|
|
|
return MCID.getSize();
|
2009-07-09 00:09:28 +08:00
|
|
|
|
2012-01-21 05:51:11 +08:00
|
|
|
// If this machine instr is an inline asm, measure it.
|
|
|
|
if (MI->getOpcode() == ARM::INLINEASM)
|
|
|
|
return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI);
|
|
|
|
unsigned Opc = MI->getOpcode();
|
|
|
|
switch (Opc) {
|
2014-03-07 12:45:03 +08:00
|
|
|
default:
|
|
|
|
// pseudo-instruction sizes are zero.
|
2012-01-21 05:51:11 +08:00
|
|
|
return 0;
|
|
|
|
case TargetOpcode::BUNDLE:
|
|
|
|
return getInstBundleLength(MI);
|
|
|
|
case ARM::MOVi16_ga_pcrel:
|
|
|
|
case ARM::MOVTi16_ga_pcrel:
|
|
|
|
case ARM::t2MOVi16_ga_pcrel:
|
|
|
|
case ARM::t2MOVTi16_ga_pcrel:
|
|
|
|
return 4;
|
|
|
|
case ARM::MOVi32imm:
|
|
|
|
case ARM::t2MOVi32imm:
|
|
|
|
return 8;
|
|
|
|
case ARM::CONSTPOOL_ENTRY:
|
|
|
|
// If this machine instr is a constant pool entry, its size is recorded as
|
|
|
|
// operand #2.
|
|
|
|
return MI->getOperand(2).getImm();
|
|
|
|
case ARM::Int_eh_sjlj_longjmp:
|
|
|
|
return 16;
|
|
|
|
case ARM::tInt_eh_sjlj_longjmp:
|
|
|
|
return 10;
|
|
|
|
case ARM::Int_eh_sjlj_setjmp:
|
|
|
|
case ARM::Int_eh_sjlj_setjmp_nofp:
|
|
|
|
return 20;
|
|
|
|
case ARM::tInt_eh_sjlj_setjmp:
|
|
|
|
case ARM::t2Int_eh_sjlj_setjmp:
|
|
|
|
case ARM::t2Int_eh_sjlj_setjmp_nofp:
|
|
|
|
return 12;
|
|
|
|
case ARM::BR_JTr:
|
|
|
|
case ARM::BR_JTm:
|
|
|
|
case ARM::BR_JTadd:
|
|
|
|
case ARM::tBR_JTr:
|
|
|
|
case ARM::t2BR_JT:
|
|
|
|
case ARM::t2TBB_JT:
|
|
|
|
case ARM::t2TBH_JT: {
|
|
|
|
// These are jumptable branches, i.e. a branch followed by an inlined
|
|
|
|
// jumptable. The size is 4 + 4 * number of entries. For TBB, each
|
|
|
|
// entry is one byte; TBH two byte each.
|
|
|
|
unsigned EntrySize = (Opc == ARM::t2TBB_JT)
|
|
|
|
? 1 : ((Opc == ARM::t2TBH_JT) ? 2 : 4);
|
|
|
|
unsigned NumOps = MCID.getNumOperands();
|
|
|
|
MachineOperand JTOP =
|
|
|
|
MI->getOperand(NumOps - (MI->isPredicable() ? 3 : 2));
|
|
|
|
unsigned JTI = JTOP.getIndex();
|
|
|
|
const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
|
|
|
|
assert(MJTI != 0);
|
|
|
|
const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
|
|
|
|
assert(JTI < JT.size());
|
|
|
|
// Thumb instructions are 2 byte aligned, but JT entries are 4 byte
|
|
|
|
// 4 aligned. The assembler / linker may add 2 byte padding just before
|
|
|
|
// the JT entries. The size does not include this padding; the
|
|
|
|
// constant islands pass does separate bookkeeping for it.
|
|
|
|
// FIXME: If we know the size of the function is less than (1 << 16) *2
|
|
|
|
// bytes, we can use 16-bit entries instead. Then there won't be an
|
|
|
|
// alignment issue.
|
|
|
|
unsigned InstSize = (Opc == ARM::tBR_JTr || Opc == ARM::t2BR_JT) ? 2 : 4;
|
|
|
|
unsigned NumEntries = getNumJTEntries(JT, JTI);
|
|
|
|
if (Opc == ARM::t2TBB_JT && (NumEntries & 1))
|
|
|
|
// Make sure the instruction that follows TBB is 2-byte aligned.
|
|
|
|
// FIXME: Constant island pass should insert an "ALIGN" instruction
|
|
|
|
// instead.
|
|
|
|
++NumEntries;
|
|
|
|
return NumEntries * EntrySize + InstSize;
|
|
|
|
}
|
|
|
|
}
|
2009-07-09 00:09:28 +08:00
|
|
|
}
|
|
|
|
|
2011-12-14 10:11:42 +08:00
|
|
|
unsigned ARMBaseInstrInfo::getInstBundleLength(const MachineInstr *MI) const {
|
|
|
|
unsigned Size = 0;
|
|
|
|
MachineBasicBlock::const_instr_iterator I = MI;
|
|
|
|
MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
|
|
|
|
while (++I != E && I->isInsideBundle()) {
|
|
|
|
assert(!I->isBundle() && "No nested bundle!");
|
|
|
|
Size += GetInstSizeInBytes(&*I);
|
|
|
|
}
|
|
|
|
return Size;
|
|
|
|
}
|
|
|
|
|
2010-07-11 14:33:54 +08:00
|
|
|
void ARMBaseInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator I, DebugLoc DL,
|
|
|
|
unsigned DestReg, unsigned SrcReg,
|
|
|
|
bool KillSrc) const {
|
|
|
|
bool GPRDest = ARM::GPRRegClass.contains(DestReg);
|
2013-10-22 10:29:35 +08:00
|
|
|
bool GPRSrc = ARM::GPRRegClass.contains(SrcReg);
|
2010-07-11 14:33:54 +08:00
|
|
|
|
|
|
|
if (GPRDest && GPRSrc) {
|
|
|
|
AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::MOVr), DestReg)
|
2013-10-22 10:29:35 +08:00
|
|
|
.addReg(SrcReg, getKillRegState(KillSrc))));
|
2010-07-11 14:33:54 +08:00
|
|
|
return;
|
2009-08-06 05:02:22 +08:00
|
|
|
}
|
2009-07-09 00:09:28 +08:00
|
|
|
|
2010-07-11 14:33:54 +08:00
|
|
|
bool SPRDest = ARM::SPRRegClass.contains(DestReg);
|
2013-10-22 10:29:35 +08:00
|
|
|
bool SPRSrc = ARM::SPRRegClass.contains(SrcReg);
|
2010-07-11 14:33:54 +08:00
|
|
|
|
2011-08-20 08:17:25 +08:00
|
|
|
unsigned Opc = 0;
|
2011-10-11 08:59:06 +08:00
|
|
|
if (SPRDest && SPRSrc)
|
2010-07-11 14:33:54 +08:00
|
|
|
Opc = ARM::VMOVS;
|
2011-10-11 08:59:06 +08:00
|
|
|
else if (GPRDest && SPRSrc)
|
2010-07-11 14:33:54 +08:00
|
|
|
Opc = ARM::VMOVRS;
|
|
|
|
else if (SPRDest && GPRSrc)
|
|
|
|
Opc = ARM::VMOVSR;
|
|
|
|
else if (ARM::DPRRegClass.contains(DestReg, SrcReg))
|
|
|
|
Opc = ARM::VMOVD;
|
|
|
|
else if (ARM::QPRRegClass.contains(DestReg, SrcReg))
|
2011-07-16 02:46:47 +08:00
|
|
|
Opc = ARM::VORRq;
|
2011-08-20 08:17:25 +08:00
|
|
|
|
|
|
|
if (Opc) {
|
|
|
|
MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc), DestReg);
|
2011-07-16 02:46:47 +08:00
|
|
|
MIB.addReg(SrcReg, getKillRegState(KillSrc));
|
2011-08-20 08:17:25 +08:00
|
|
|
if (Opc == ARM::VORRq)
|
|
|
|
MIB.addReg(SrcReg, getKillRegState(KillSrc));
|
2011-08-20 08:52:40 +08:00
|
|
|
AddDefaultPred(MIB);
|
2011-08-20 08:17:25 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-03-30 05:10:40 +08:00
|
|
|
// Handle register classes that require multiple instructions.
|
|
|
|
unsigned BeginIdx = 0;
|
|
|
|
unsigned SubRegs = 0;
|
2012-08-29 12:41:37 +08:00
|
|
|
int Spacing = 1;
|
2012-03-30 05:10:40 +08:00
|
|
|
|
|
|
|
// Use VORRq when possible.
|
2013-10-22 10:29:35 +08:00
|
|
|
if (ARM::QQPRRegClass.contains(DestReg, SrcReg)) {
|
|
|
|
Opc = ARM::VORRq;
|
|
|
|
BeginIdx = ARM::qsub_0;
|
|
|
|
SubRegs = 2;
|
|
|
|
} else if (ARM::QQQQPRRegClass.contains(DestReg, SrcReg)) {
|
|
|
|
Opc = ARM::VORRq;
|
|
|
|
BeginIdx = ARM::qsub_0;
|
|
|
|
SubRegs = 4;
|
2012-03-30 05:10:40 +08:00
|
|
|
// Fall back to VMOVD.
|
2013-10-22 10:29:35 +08:00
|
|
|
} else if (ARM::DPairRegClass.contains(DestReg, SrcReg)) {
|
|
|
|
Opc = ARM::VMOVD;
|
|
|
|
BeginIdx = ARM::dsub_0;
|
|
|
|
SubRegs = 2;
|
|
|
|
} else if (ARM::DTripleRegClass.contains(DestReg, SrcReg)) {
|
|
|
|
Opc = ARM::VMOVD;
|
|
|
|
BeginIdx = ARM::dsub_0;
|
|
|
|
SubRegs = 3;
|
|
|
|
} else if (ARM::DQuadRegClass.contains(DestReg, SrcReg)) {
|
|
|
|
Opc = ARM::VMOVD;
|
|
|
|
BeginIdx = ARM::dsub_0;
|
|
|
|
SubRegs = 4;
|
|
|
|
} else if (ARM::GPRPairRegClass.contains(DestReg, SrcReg)) {
|
2013-10-22 10:29:37 +08:00
|
|
|
Opc = Subtarget.isThumb2() ? ARM::tMOVr : ARM::MOVr;
|
2013-10-22 10:29:35 +08:00
|
|
|
BeginIdx = ARM::gsub_0;
|
|
|
|
SubRegs = 2;
|
|
|
|
} else if (ARM::DPairSpcRegClass.contains(DestReg, SrcReg)) {
|
|
|
|
Opc = ARM::VMOVD;
|
|
|
|
BeginIdx = ARM::dsub_0;
|
|
|
|
SubRegs = 2;
|
|
|
|
Spacing = 2;
|
|
|
|
} else if (ARM::DTripleSpcRegClass.contains(DestReg, SrcReg)) {
|
|
|
|
Opc = ARM::VMOVD;
|
|
|
|
BeginIdx = ARM::dsub_0;
|
|
|
|
SubRegs = 3;
|
|
|
|
Spacing = 2;
|
|
|
|
} else if (ARM::DQuadSpcRegClass.contains(DestReg, SrcReg)) {
|
|
|
|
Opc = ARM::VMOVD;
|
|
|
|
BeginIdx = ARM::dsub_0;
|
|
|
|
SubRegs = 4;
|
|
|
|
Spacing = 2;
|
|
|
|
}
|
2012-03-30 05:10:40 +08:00
|
|
|
|
2012-08-29 12:41:37 +08:00
|
|
|
assert(Opc && "Impossible reg-to-reg copy");
|
2012-08-29 09:58:52 +08:00
|
|
|
|
|
|
|
const TargetRegisterInfo *TRI = &getRegisterInfo();
|
|
|
|
MachineInstrBuilder Mov;
|
2012-08-29 09:58:55 +08:00
|
|
|
|
|
|
|
// Copy register tuples backward when the first Dest reg overlaps with SrcReg.
|
|
|
|
if (TRI->regsOverlap(SrcReg, TRI->getSubReg(DestReg, BeginIdx))) {
|
2013-10-22 10:29:35 +08:00
|
|
|
BeginIdx = BeginIdx + ((SubRegs - 1) * Spacing);
|
2012-08-29 09:58:55 +08:00
|
|
|
Spacing = -Spacing;
|
|
|
|
}
|
|
|
|
#ifndef NDEBUG
|
|
|
|
SmallSet<unsigned, 4> DstRegs;
|
|
|
|
#endif
|
2012-08-29 09:58:52 +08:00
|
|
|
for (unsigned i = 0; i != SubRegs; ++i) {
|
2013-10-22 10:29:35 +08:00
|
|
|
unsigned Dst = TRI->getSubReg(DestReg, BeginIdx + i * Spacing);
|
|
|
|
unsigned Src = TRI->getSubReg(SrcReg, BeginIdx + i * Spacing);
|
2012-08-29 09:58:52 +08:00
|
|
|
assert(Dst && Src && "Bad sub-register");
|
2012-08-29 09:58:55 +08:00
|
|
|
#ifndef NDEBUG
|
|
|
|
assert(!DstRegs.count(Src) && "destructive vector copy");
|
2012-08-29 12:41:37 +08:00
|
|
|
DstRegs.insert(Dst);
|
2012-08-29 09:58:55 +08:00
|
|
|
#endif
|
2013-10-22 10:29:35 +08:00
|
|
|
Mov = BuildMI(MBB, I, I->getDebugLoc(), get(Opc), Dst).addReg(Src);
|
2012-08-29 09:58:52 +08:00
|
|
|
// VORR takes two source operands.
|
|
|
|
if (Opc == ARM::VORRq)
|
|
|
|
Mov.addReg(Src);
|
|
|
|
Mov = AddDefaultPred(Mov);
|
2013-07-13 07:33:03 +08:00
|
|
|
// MOVr can set CC.
|
|
|
|
if (Opc == ARM::MOVr)
|
|
|
|
Mov = AddDefaultCC(Mov);
|
2011-08-20 08:17:25 +08:00
|
|
|
}
|
2012-08-29 09:58:52 +08:00
|
|
|
// Add implicit super-register defs and kills to the last instruction.
|
|
|
|
Mov->addRegisterDefined(DestReg, TRI);
|
|
|
|
if (KillSrc)
|
|
|
|
Mov->addRegisterKilled(SrcReg, TRI);
|
2009-07-09 00:09:28 +08:00
|
|
|
}
|
|
|
|
|
2013-04-21 19:57:07 +08:00
|
|
|
const MachineInstrBuilder &
|
|
|
|
ARMBaseInstrInfo::AddDReg(MachineInstrBuilder &MIB, unsigned Reg,
|
|
|
|
unsigned SubIdx, unsigned State,
|
|
|
|
const TargetRegisterInfo *TRI) const {
|
2010-05-07 08:24:52 +08:00
|
|
|
if (!SubIdx)
|
|
|
|
return MIB.addReg(Reg, State);
|
|
|
|
|
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(Reg))
|
|
|
|
return MIB.addReg(TRI->getSubReg(Reg, SubIdx), State);
|
|
|
|
return MIB.addReg(Reg, State, SubIdx);
|
|
|
|
}
|
|
|
|
|
2009-07-09 00:09:28 +08:00
|
|
|
void ARMBaseInstrInfo::
|
|
|
|
storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
|
|
|
|
unsigned SrcReg, bool isKill, int FI,
|
2010-05-07 03:06:44 +08:00
|
|
|
const TargetRegisterClass *RC,
|
|
|
|
const TargetRegisterInfo *TRI) const {
|
2010-04-03 04:16:16 +08:00
|
|
|
DebugLoc DL;
|
2009-07-09 00:09:28 +08:00
|
|
|
if (I != MBB.end()) DL = I->getDebugLoc();
|
2009-10-07 08:06:35 +08:00
|
|
|
MachineFunction &MF = *MBB.getParent();
|
|
|
|
MachineFrameInfo &MFI = *MF.getFrameInfo();
|
2009-11-08 08:27:19 +08:00
|
|
|
unsigned Align = MFI.getObjectAlignment(FI);
|
2009-10-07 08:06:35 +08:00
|
|
|
|
|
|
|
MachineMemOperand *MMO =
|
2011-11-15 15:34:52 +08:00
|
|
|
MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
|
2010-09-21 12:39:43 +08:00
|
|
|
MachineMemOperand::MOStore,
|
2009-10-07 08:06:35 +08:00
|
|
|
MFI.getObjectSize(FI),
|
2009-11-08 08:27:19 +08:00
|
|
|
Align);
|
2009-07-09 00:09:28 +08:00
|
|
|
|
2011-08-11 01:21:20 +08:00
|
|
|
switch (RC->getSize()) {
|
|
|
|
case 4:
|
|
|
|
if (ARM::GPRRegClass.hasSubClassEq(RC)) {
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STRi12))
|
2009-07-09 00:09:28 +08:00
|
|
|
.addReg(SrcReg, getKillRegState(isKill))
|
2010-10-28 07:12:14 +08:00
|
|
|
.addFrameIndex(FI).addImm(0).addMemOperand(MMO));
|
2011-08-11 01:21:20 +08:00
|
|
|
} else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRS))
|
2010-05-06 09:34:11 +08:00
|
|
|
.addReg(SrcReg, getKillRegState(isKill))
|
|
|
|
.addFrameIndex(FI).addImm(0).addMemOperand(MMO));
|
2011-08-11 01:21:20 +08:00
|
|
|
} else
|
|
|
|
llvm_unreachable("Unknown reg class!");
|
|
|
|
break;
|
|
|
|
case 8:
|
|
|
|
if (ARM::DPRRegClass.hasSubClassEq(RC)) {
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTRD))
|
2009-07-09 00:09:28 +08:00
|
|
|
.addReg(SrcReg, getKillRegState(isKill))
|
2009-10-07 08:06:35 +08:00
|
|
|
.addFrameIndex(FI).addImm(0).addMemOperand(MMO));
|
2012-10-27 05:29:15 +08:00
|
|
|
} else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
|
2013-04-21 19:57:07 +08:00
|
|
|
if (Subtarget.hasV5TEOps()) {
|
|
|
|
MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::STRD));
|
|
|
|
AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI);
|
|
|
|
AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI);
|
|
|
|
MIB.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO);
|
|
|
|
|
|
|
|
AddDefaultPred(MIB);
|
|
|
|
} else {
|
|
|
|
// Fallback to STM instruction, which has existed since the dawn of
|
|
|
|
// time.
|
|
|
|
MachineInstrBuilder MIB =
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STMIA))
|
|
|
|
.addFrameIndex(FI).addMemOperand(MMO));
|
|
|
|
AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI);
|
|
|
|
AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI);
|
|
|
|
}
|
2011-08-11 01:21:20 +08:00
|
|
|
} else
|
|
|
|
llvm_unreachable("Unknown reg class!");
|
|
|
|
break;
|
|
|
|
case 16:
|
2012-03-29 05:20:32 +08:00
|
|
|
if (ARM::DPairRegClass.hasSubClassEq(RC)) {
|
2012-01-05 08:26:57 +08:00
|
|
|
// Use aligned spills if the stack can be realigned.
|
|
|
|
if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
|
2012-03-06 03:33:30 +08:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1q64))
|
2010-07-07 05:26:18 +08:00
|
|
|
.addFrameIndex(FI).addImm(16)
|
2010-05-13 09:12:06 +08:00
|
|
|
.addReg(SrcReg, getKillRegState(isKill))
|
|
|
|
.addMemOperand(MMO));
|
2011-08-11 01:21:20 +08:00
|
|
|
} else {
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMQIA))
|
2010-05-13 09:12:06 +08:00
|
|
|
.addReg(SrcReg, getKillRegState(isKill))
|
|
|
|
.addFrameIndex(FI)
|
|
|
|
.addMemOperand(MMO));
|
2011-08-11 01:21:20 +08:00
|
|
|
}
|
|
|
|
} else
|
|
|
|
llvm_unreachable("Unknown reg class!");
|
|
|
|
break;
|
2012-08-04 21:16:12 +08:00
|
|
|
case 24:
|
|
|
|
if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
|
|
|
|
// Use aligned spills if the stack can be realigned.
|
|
|
|
if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1d64TPseudo))
|
|
|
|
.addFrameIndex(FI).addImm(16)
|
|
|
|
.addReg(SrcReg, getKillRegState(isKill))
|
|
|
|
.addMemOperand(MMO));
|
|
|
|
} else {
|
|
|
|
MachineInstrBuilder MIB =
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMDIA))
|
|
|
|
.addFrameIndex(FI))
|
|
|
|
.addMemOperand(MMO);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
|
|
|
|
AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
llvm_unreachable("Unknown reg class!");
|
|
|
|
break;
|
2011-08-11 01:21:20 +08:00
|
|
|
case 32:
|
2012-08-04 21:16:12 +08:00
|
|
|
if (ARM::QQPRRegClass.hasSubClassEq(RC) || ARM::DQuadRegClass.hasSubClassEq(RC)) {
|
2011-08-11 01:21:20 +08:00
|
|
|
if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
|
|
|
|
// FIXME: It's possible to only store part of the QQ register if the
|
|
|
|
// spilled def has a sub-register index.
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VST1d64QPseudo))
|
2010-09-15 09:48:05 +08:00
|
|
|
.addFrameIndex(FI).addImm(16)
|
|
|
|
.addReg(SrcReg, getKillRegState(isKill))
|
|
|
|
.addMemOperand(MMO));
|
2011-08-11 01:21:20 +08:00
|
|
|
} else {
|
|
|
|
MachineInstrBuilder MIB =
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMDIA))
|
2010-11-16 09:16:36 +08:00
|
|
|
.addFrameIndex(FI))
|
2011-08-11 01:21:20 +08:00
|
|
|
.addMemOperand(MMO);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
|
|
|
|
AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI);
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
llvm_unreachable("Unknown reg class!");
|
|
|
|
break;
|
|
|
|
case 64:
|
|
|
|
if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
|
|
|
|
MachineInstrBuilder MIB =
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VSTMDIA))
|
|
|
|
.addFrameIndex(FI))
|
|
|
|
.addMemOperand(MMO);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_0, getKillRegState(isKill), TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_1, 0, TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_2, 0, TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_3, 0, TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_4, 0, TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_5, 0, TRI);
|
|
|
|
MIB = AddDReg(MIB, SrcReg, ARM::dsub_6, 0, TRI);
|
|
|
|
AddDReg(MIB, SrcReg, ARM::dsub_7, 0, TRI);
|
|
|
|
} else
|
|
|
|
llvm_unreachable("Unknown reg class!");
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unknown reg class!");
|
2009-07-09 00:09:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-09-16 00:36:26 +08:00
|
|
|
unsigned
|
|
|
|
ARMBaseInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
|
|
|
|
int &FrameIndex) const {
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: break;
|
2010-10-28 07:12:14 +08:00
|
|
|
case ARM::STRrs:
|
2010-09-16 00:36:26 +08:00
|
|
|
case ARM::t2STRs: // FIXME: don't use t2STRs to access frame.
|
|
|
|
if (MI->getOperand(1).isFI() &&
|
|
|
|
MI->getOperand(2).isReg() &&
|
|
|
|
MI->getOperand(3).isImm() &&
|
|
|
|
MI->getOperand(2).getReg() == 0 &&
|
|
|
|
MI->getOperand(3).getImm() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(1).getIndex();
|
|
|
|
return MI->getOperand(0).getReg();
|
|
|
|
}
|
|
|
|
break;
|
2010-10-28 07:12:14 +08:00
|
|
|
case ARM::STRi12:
|
2010-09-16 00:36:26 +08:00
|
|
|
case ARM::t2STRi12:
|
2011-06-30 04:26:39 +08:00
|
|
|
case ARM::tSTRspi:
|
2010-09-16 00:36:26 +08:00
|
|
|
case ARM::VSTRD:
|
|
|
|
case ARM::VSTRS:
|
|
|
|
if (MI->getOperand(1).isFI() &&
|
|
|
|
MI->getOperand(2).isImm() &&
|
|
|
|
MI->getOperand(2).getImm() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(1).getIndex();
|
|
|
|
return MI->getOperand(0).getReg();
|
|
|
|
}
|
|
|
|
break;
|
2012-03-06 03:33:30 +08:00
|
|
|
case ARM::VST1q64:
|
2012-08-04 21:22:14 +08:00
|
|
|
case ARM::VST1d64TPseudo:
|
|
|
|
case ARM::VST1d64QPseudo:
|
2010-09-16 01:27:09 +08:00
|
|
|
if (MI->getOperand(0).isFI() &&
|
|
|
|
MI->getOperand(2).getSubReg() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(0).getIndex();
|
|
|
|
return MI->getOperand(2).getReg();
|
|
|
|
}
|
2010-09-16 05:40:09 +08:00
|
|
|
break;
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::VSTMQIA:
|
2010-09-16 01:27:09 +08:00
|
|
|
if (MI->getOperand(1).isFI() &&
|
|
|
|
MI->getOperand(0).getSubReg() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(1).getIndex();
|
|
|
|
return MI->getOperand(0).getReg();
|
|
|
|
}
|
|
|
|
break;
|
2010-09-16 00:36:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-08-09 05:45:32 +08:00
|
|
|
unsigned ARMBaseInstrInfo::isStoreToStackSlotPostFE(const MachineInstr *MI,
|
|
|
|
int &FrameIndex) const {
|
|
|
|
const MachineMemOperand *Dummy;
|
2011-12-07 15:15:52 +08:00
|
|
|
return MI->mayStore() && hasStoreToStackSlot(MI, Dummy, FrameIndex);
|
2011-08-09 05:45:32 +08:00
|
|
|
}
|
|
|
|
|
2009-07-09 00:09:28 +08:00
|
|
|
void ARMBaseInstrInfo::
|
|
|
|
loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
|
|
|
|
unsigned DestReg, int FI,
|
2010-05-07 03:06:44 +08:00
|
|
|
const TargetRegisterClass *RC,
|
|
|
|
const TargetRegisterInfo *TRI) const {
|
2010-04-03 04:16:16 +08:00
|
|
|
DebugLoc DL;
|
2009-07-09 00:09:28 +08:00
|
|
|
if (I != MBB.end()) DL = I->getDebugLoc();
|
2009-10-07 08:06:35 +08:00
|
|
|
MachineFunction &MF = *MBB.getParent();
|
|
|
|
MachineFrameInfo &MFI = *MF.getFrameInfo();
|
2009-11-08 08:27:19 +08:00
|
|
|
unsigned Align = MFI.getObjectAlignment(FI);
|
2009-10-07 08:06:35 +08:00
|
|
|
MachineMemOperand *MMO =
|
2010-09-21 12:39:43 +08:00
|
|
|
MF.getMachineMemOperand(
|
2011-11-15 15:34:52 +08:00
|
|
|
MachinePointerInfo::getFixedStack(FI),
|
2010-09-21 12:39:43 +08:00
|
|
|
MachineMemOperand::MOLoad,
|
2009-10-07 08:06:35 +08:00
|
|
|
MFI.getObjectSize(FI),
|
2009-11-08 08:27:19 +08:00
|
|
|
Align);
|
2009-07-09 00:09:28 +08:00
|
|
|
|
2011-08-11 01:21:20 +08:00
|
|
|
switch (RC->getSize()) {
|
|
|
|
case 4:
|
|
|
|
if (ARM::GPRRegClass.hasSubClassEq(RC)) {
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDRi12), DestReg)
|
2010-10-27 06:37:02 +08:00
|
|
|
.addFrameIndex(FI).addImm(0).addMemOperand(MMO));
|
2011-08-11 01:21:20 +08:00
|
|
|
|
|
|
|
} else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRS), DestReg)
|
2010-05-06 09:34:11 +08:00
|
|
|
.addFrameIndex(FI).addImm(0).addMemOperand(MMO));
|
2011-08-11 01:21:20 +08:00
|
|
|
} else
|
|
|
|
llvm_unreachable("Unknown reg class!");
|
2010-06-19 05:32:42 +08:00
|
|
|
break;
|
2011-08-11 01:21:20 +08:00
|
|
|
case 8:
|
|
|
|
if (ARM::DPRRegClass.hasSubClassEq(RC)) {
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDRD), DestReg)
|
2009-10-07 08:06:35 +08:00
|
|
|
.addFrameIndex(FI).addImm(0).addMemOperand(MMO));
|
2012-10-27 05:29:15 +08:00
|
|
|
} else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
|
2013-04-21 19:57:07 +08:00
|
|
|
MachineInstrBuilder MIB;
|
|
|
|
|
|
|
|
if (Subtarget.hasV5TEOps()) {
|
|
|
|
MIB = BuildMI(MBB, I, DL, get(ARM::LDRD));
|
|
|
|
AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI);
|
|
|
|
AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI);
|
|
|
|
MIB.addFrameIndex(FI).addReg(0).addImm(0).addMemOperand(MMO);
|
|
|
|
|
|
|
|
AddDefaultPred(MIB);
|
|
|
|
} else {
|
|
|
|
// Fallback to LDM instruction, which has existed since the dawn of
|
|
|
|
// time.
|
|
|
|
MIB = AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDMIA))
|
|
|
|
.addFrameIndex(FI).addMemOperand(MMO));
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI);
|
|
|
|
}
|
|
|
|
|
2012-10-27 05:29:15 +08:00
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(DestReg))
|
|
|
|
MIB.addReg(DestReg, RegState::ImplicitDefine);
|
2011-08-11 01:21:20 +08:00
|
|
|
} else
|
|
|
|
llvm_unreachable("Unknown reg class!");
|
2010-06-19 05:32:42 +08:00
|
|
|
break;
|
2011-08-11 01:21:20 +08:00
|
|
|
case 16:
|
2012-03-29 05:20:32 +08:00
|
|
|
if (ARM::DPairRegClass.hasSubClassEq(RC)) {
|
2012-01-05 08:26:57 +08:00
|
|
|
if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
|
2012-03-06 03:33:30 +08:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1q64), DestReg)
|
2010-07-07 05:26:18 +08:00
|
|
|
.addFrameIndex(FI).addImm(16)
|
2010-05-13 09:12:06 +08:00
|
|
|
.addMemOperand(MMO));
|
2011-08-11 01:21:20 +08:00
|
|
|
} else {
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMQIA), DestReg)
|
|
|
|
.addFrameIndex(FI)
|
|
|
|
.addMemOperand(MMO));
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
llvm_unreachable("Unknown reg class!");
|
2010-06-19 05:32:42 +08:00
|
|
|
break;
|
2012-08-04 21:16:12 +08:00
|
|
|
case 24:
|
|
|
|
if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
|
|
|
|
if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1d64TPseudo), DestReg)
|
|
|
|
.addFrameIndex(FI).addImm(16)
|
|
|
|
.addMemOperand(MMO));
|
|
|
|
} else {
|
|
|
|
MachineInstrBuilder MIB =
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA))
|
|
|
|
.addFrameIndex(FI)
|
|
|
|
.addMemOperand(MMO));
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI);
|
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(DestReg))
|
|
|
|
MIB.addReg(DestReg, RegState::ImplicitDefine);
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
llvm_unreachable("Unknown reg class!");
|
|
|
|
break;
|
|
|
|
case 32:
|
|
|
|
if (ARM::QQPRRegClass.hasSubClassEq(RC) || ARM::DQuadRegClass.hasSubClassEq(RC)) {
|
2011-08-11 01:21:20 +08:00
|
|
|
if (Align >= 16 && getRegisterInfo().canRealignStack(MF)) {
|
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLD1d64QPseudo), DestReg)
|
2010-09-15 09:48:05 +08:00
|
|
|
.addFrameIndex(FI).addImm(16)
|
|
|
|
.addMemOperand(MMO));
|
2011-08-11 01:21:20 +08:00
|
|
|
} else {
|
|
|
|
MachineInstrBuilder MIB =
|
2010-11-16 09:16:36 +08:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA))
|
|
|
|
.addFrameIndex(FI))
|
2011-08-11 01:21:20 +08:00
|
|
|
.addMemOperand(MMO);
|
2012-03-05 02:40:30 +08:00
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI);
|
2012-03-06 10:48:17 +08:00
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(DestReg))
|
|
|
|
MIB.addReg(DestReg, RegState::ImplicitDefine);
|
2011-08-11 01:21:20 +08:00
|
|
|
}
|
|
|
|
} else
|
|
|
|
llvm_unreachable("Unknown reg class!");
|
2010-06-19 05:32:42 +08:00
|
|
|
break;
|
2011-08-11 01:21:20 +08:00
|
|
|
case 64:
|
|
|
|
if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
|
|
|
|
MachineInstrBuilder MIB =
|
2010-11-16 09:16:36 +08:00
|
|
|
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::VLDMDIA))
|
|
|
|
.addFrameIndex(FI))
|
2011-08-11 01:21:20 +08:00
|
|
|
.addMemOperand(MMO);
|
2012-03-05 02:40:30 +08:00
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_0, RegState::DefineNoRead, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_1, RegState::DefineNoRead, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_2, RegState::DefineNoRead, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_3, RegState::DefineNoRead, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_4, RegState::DefineNoRead, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_5, RegState::DefineNoRead, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_6, RegState::DefineNoRead, TRI);
|
|
|
|
MIB = AddDReg(MIB, DestReg, ARM::dsub_7, RegState::DefineNoRead, TRI);
|
2012-03-06 10:48:17 +08:00
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(DestReg))
|
|
|
|
MIB.addReg(DestReg, RegState::ImplicitDefine);
|
2011-08-11 01:21:20 +08:00
|
|
|
} else
|
|
|
|
llvm_unreachable("Unknown reg class!");
|
2010-06-19 05:32:42 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unknown regclass!");
|
2009-07-09 00:09:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-09-16 00:36:26 +08:00
|
|
|
unsigned
|
|
|
|
ARMBaseInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
|
|
|
|
int &FrameIndex) const {
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: break;
|
2010-10-27 06:37:02 +08:00
|
|
|
case ARM::LDRrs:
|
2010-09-16 00:36:26 +08:00
|
|
|
case ARM::t2LDRs: // FIXME: don't use t2LDRs to access frame.
|
|
|
|
if (MI->getOperand(1).isFI() &&
|
|
|
|
MI->getOperand(2).isReg() &&
|
|
|
|
MI->getOperand(3).isImm() &&
|
|
|
|
MI->getOperand(2).getReg() == 0 &&
|
|
|
|
MI->getOperand(3).getImm() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(1).getIndex();
|
|
|
|
return MI->getOperand(0).getReg();
|
|
|
|
}
|
|
|
|
break;
|
2010-10-27 06:37:02 +08:00
|
|
|
case ARM::LDRi12:
|
2010-09-16 00:36:26 +08:00
|
|
|
case ARM::t2LDRi12:
|
2011-06-30 04:26:39 +08:00
|
|
|
case ARM::tLDRspi:
|
2010-09-16 00:36:26 +08:00
|
|
|
case ARM::VLDRD:
|
|
|
|
case ARM::VLDRS:
|
|
|
|
if (MI->getOperand(1).isFI() &&
|
|
|
|
MI->getOperand(2).isImm() &&
|
|
|
|
MI->getOperand(2).getImm() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(1).getIndex();
|
2010-09-16 01:27:09 +08:00
|
|
|
return MI->getOperand(0).getReg();
|
|
|
|
}
|
|
|
|
break;
|
2012-03-06 03:33:30 +08:00
|
|
|
case ARM::VLD1q64:
|
2012-08-04 21:22:14 +08:00
|
|
|
case ARM::VLD1d64TPseudo:
|
|
|
|
case ARM::VLD1d64QPseudo:
|
2010-09-16 01:27:09 +08:00
|
|
|
if (MI->getOperand(1).isFI() &&
|
|
|
|
MI->getOperand(0).getSubReg() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(1).getIndex();
|
2010-09-16 05:40:11 +08:00
|
|
|
return MI->getOperand(0).getReg();
|
|
|
|
}
|
|
|
|
break;
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::VLDMQIA:
|
2010-09-16 05:40:11 +08:00
|
|
|
if (MI->getOperand(1).isFI() &&
|
|
|
|
MI->getOperand(0).getSubReg() == 0) {
|
|
|
|
FrameIndex = MI->getOperand(1).getIndex();
|
2010-09-16 00:36:26 +08:00
|
|
|
return MI->getOperand(0).getReg();
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-08-09 05:45:32 +08:00
|
|
|
unsigned ARMBaseInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
|
|
|
|
int &FrameIndex) const {
|
|
|
|
const MachineMemOperand *Dummy;
|
2011-12-07 15:15:52 +08:00
|
|
|
return MI->mayLoad() && hasLoadFromStackSlot(MI, Dummy, FrameIndex);
|
2011-08-09 05:45:32 +08:00
|
|
|
}
|
|
|
|
|
2011-10-11 08:59:06 +08:00
|
|
|
bool ARMBaseInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const{
|
|
|
|
// This hook gets to expand COPY instructions before they become
|
|
|
|
// copyPhysReg() calls. Look for VMOVS instructions that can legally be
|
|
|
|
// widened to VMOVD. We prefer the VMOVD when possible because it may be
|
|
|
|
// changed into a VORR that can go down the NEON pipeline.
|
2013-03-16 02:28:25 +08:00
|
|
|
if (!WidenVMOVS || !MI->isCopy() || Subtarget.isCortexA15())
|
2011-10-11 08:59:06 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Look for a copy between even S-registers. That is where we keep floats
|
|
|
|
// when using NEON v2f32 instructions for f32 arithmetic.
|
|
|
|
unsigned DstRegS = MI->getOperand(0).getReg();
|
|
|
|
unsigned SrcRegS = MI->getOperand(1).getReg();
|
|
|
|
if (!ARM::SPRRegClass.contains(DstRegS, SrcRegS))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const TargetRegisterInfo *TRI = &getRegisterInfo();
|
|
|
|
unsigned DstRegD = TRI->getMatchingSuperReg(DstRegS, ARM::ssub_0,
|
|
|
|
&ARM::DPRRegClass);
|
|
|
|
unsigned SrcRegD = TRI->getMatchingSuperReg(SrcRegS, ARM::ssub_0,
|
|
|
|
&ARM::DPRRegClass);
|
|
|
|
if (!DstRegD || !SrcRegD)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// We want to widen this into a DstRegD = VMOVD SrcRegD copy. This is only
|
|
|
|
// legal if the COPY already defines the full DstRegD, and it isn't a
|
|
|
|
// sub-register insertion.
|
|
|
|
if (!MI->definesRegister(DstRegD, TRI) || MI->readsRegister(DstRegD, TRI))
|
|
|
|
return false;
|
|
|
|
|
2011-10-12 08:06:23 +08:00
|
|
|
// A dead copy shouldn't show up here, but reject it just in case.
|
|
|
|
if (MI->getOperand(0).isDead())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// All clear, widen the COPY.
|
2011-10-11 08:59:06 +08:00
|
|
|
DEBUG(dbgs() << "widening: " << *MI);
|
2012-12-20 05:31:56 +08:00
|
|
|
MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
|
2011-10-12 08:06:23 +08:00
|
|
|
|
|
|
|
// Get rid of the old <imp-def> of DstRegD. Leave it if it defines a Q-reg
|
|
|
|
// or some other super-register.
|
|
|
|
int ImpDefIdx = MI->findRegisterDefOperandIdx(DstRegD);
|
|
|
|
if (ImpDefIdx != -1)
|
|
|
|
MI->RemoveOperand(ImpDefIdx);
|
|
|
|
|
|
|
|
// Change the opcode and operands.
|
2011-10-11 08:59:06 +08:00
|
|
|
MI->setDesc(get(ARM::VMOVD));
|
|
|
|
MI->getOperand(0).setReg(DstRegD);
|
|
|
|
MI->getOperand(1).setReg(SrcRegD);
|
2012-12-20 05:31:56 +08:00
|
|
|
AddDefaultPred(MIB);
|
2011-10-12 08:06:23 +08:00
|
|
|
|
|
|
|
// We are now reading SrcRegD instead of SrcRegS. This may upset the
|
|
|
|
// register scavenger and machine verifier, so we need to indicate that we
|
|
|
|
// are reading an undefined value from SrcRegD, but a proper value from
|
|
|
|
// SrcRegS.
|
|
|
|
MI->getOperand(1).setIsUndef();
|
2012-12-20 05:31:56 +08:00
|
|
|
MIB.addReg(SrcRegS, RegState::Implicit);
|
2011-10-12 08:06:23 +08:00
|
|
|
|
|
|
|
// SrcRegD may actually contain an unrelated value in the ssub_1
|
|
|
|
// sub-register. Don't kill it. Only kill the ssub_0 sub-register.
|
|
|
|
if (MI->getOperand(1).isKill()) {
|
|
|
|
MI->getOperand(1).setIsKill(false);
|
|
|
|
MI->addRegisterKilled(SrcRegS, TRI, true);
|
|
|
|
}
|
|
|
|
|
2011-10-11 08:59:06 +08:00
|
|
|
DEBUG(dbgs() << "replaced by: " << *MI);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-01-07 07:47:07 +08:00
|
|
|
/// Create a copy of a const pool value. Update CPI to the new index and return
|
|
|
|
/// the label UID.
|
|
|
|
static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI) {
|
|
|
|
MachineConstantPool *MCP = MF.getConstantPool();
|
|
|
|
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
|
|
|
|
|
|
|
|
const MachineConstantPoolEntry &MCPE = MCP->getConstants()[CPI];
|
|
|
|
assert(MCPE.isMachineConstantPoolEntry() &&
|
|
|
|
"Expecting a machine constantpool entry!");
|
|
|
|
ARMConstantPoolValue *ACPV =
|
|
|
|
static_cast<ARMConstantPoolValue*>(MCPE.Val.MachineCPVal);
|
|
|
|
|
2011-01-17 16:03:18 +08:00
|
|
|
unsigned PCLabelId = AFI->createPICLabelUId();
|
2010-01-07 07:47:07 +08:00
|
|
|
ARMConstantPoolValue *NewCPV = 0;
|
2014-01-30 00:01:24 +08:00
|
|
|
|
2010-09-11 05:38:22 +08:00
|
|
|
// FIXME: The below assumes PIC relocation model and that the function
|
|
|
|
// is Thumb mode (t1 or t2). PCAdjustment would be 8 for ARM mode PIC, and
|
|
|
|
// zero for non-PIC in ARM or Thumb. The callers are all of thumb LDR
|
|
|
|
// instructions, so that's probably OK, but is PIC always correct when
|
|
|
|
// we get here?
|
2010-01-07 07:47:07 +08:00
|
|
|
if (ACPV->isGlobalValue())
|
2011-10-01 16:00:54 +08:00
|
|
|
NewCPV = ARMConstantPoolConstant::
|
|
|
|
Create(cast<ARMConstantPoolConstant>(ACPV)->getGV(), PCLabelId,
|
|
|
|
ARMCP::CPValue, 4);
|
2010-01-07 07:47:07 +08:00
|
|
|
else if (ACPV->isExtSymbol())
|
2011-10-01 16:58:29 +08:00
|
|
|
NewCPV = ARMConstantPoolSymbol::
|
|
|
|
Create(MF.getFunction()->getContext(),
|
|
|
|
cast<ARMConstantPoolSymbol>(ACPV)->getSymbol(), PCLabelId, 4);
|
2010-01-07 07:47:07 +08:00
|
|
|
else if (ACPV->isBlockAddress())
|
2011-10-01 16:00:54 +08:00
|
|
|
NewCPV = ARMConstantPoolConstant::
|
|
|
|
Create(cast<ARMConstantPoolConstant>(ACPV)->getBlockAddress(), PCLabelId,
|
|
|
|
ARMCP::CPBlockAddress, 4);
|
2010-09-11 05:38:22 +08:00
|
|
|
else if (ACPV->isLSDA())
|
2011-10-01 16:00:54 +08:00
|
|
|
NewCPV = ARMConstantPoolConstant::Create(MF.getFunction(), PCLabelId,
|
|
|
|
ARMCP::CPLSDA, 4);
|
2011-09-30 07:50:42 +08:00
|
|
|
else if (ACPV->isMachineBasicBlock())
|
2011-10-01 17:30:42 +08:00
|
|
|
NewCPV = ARMConstantPoolMBB::
|
|
|
|
Create(MF.getFunction()->getContext(),
|
|
|
|
cast<ARMConstantPoolMBB>(ACPV)->getMBB(), PCLabelId, 4);
|
2010-01-07 07:47:07 +08:00
|
|
|
else
|
|
|
|
llvm_unreachable("Unexpected ARM constantpool value type!!");
|
|
|
|
CPI = MCP->getConstantPoolIndex(NewCPV, MCPE.getAlignment());
|
|
|
|
return PCLabelId;
|
|
|
|
}
|
|
|
|
|
2009-11-08 08:15:23 +08:00
|
|
|
void ARMBaseInstrInfo::
|
|
|
|
reMaterialize(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator I,
|
|
|
|
unsigned DestReg, unsigned SubIdx,
|
2009-11-14 10:55:43 +08:00
|
|
|
const MachineInstr *Orig,
|
2010-06-03 06:47:25 +08:00
|
|
|
const TargetRegisterInfo &TRI) const {
|
2009-11-08 08:15:23 +08:00
|
|
|
unsigned Opcode = Orig->getOpcode();
|
|
|
|
switch (Opcode) {
|
|
|
|
default: {
|
|
|
|
MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
|
2010-06-03 06:47:25 +08:00
|
|
|
MI->substituteRegister(Orig->getOperand(0).getReg(), DestReg, SubIdx, TRI);
|
2009-11-08 08:15:23 +08:00
|
|
|
MBB.insert(I, MI);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case ARM::tLDRpci_pic:
|
|
|
|
case ARM::t2LDRpci_pic: {
|
|
|
|
MachineFunction &MF = *MBB.getParent();
|
|
|
|
unsigned CPI = Orig->getOperand(1).getIndex();
|
2010-01-07 07:47:07 +08:00
|
|
|
unsigned PCLabelId = duplicateCPV(MF, CPI);
|
2009-11-08 08:15:23 +08:00
|
|
|
MachineInstrBuilder MIB = BuildMI(MBB, I, Orig->getDebugLoc(), get(Opcode),
|
|
|
|
DestReg)
|
|
|
|
.addConstantPoolIndex(CPI).addImm(PCLabelId);
|
2011-04-29 13:24:29 +08:00
|
|
|
MIB->setMemRefs(Orig->memoperands_begin(), Orig->memoperands_end());
|
2009-11-08 08:15:23 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-01-07 07:47:07 +08:00
|
|
|
MachineInstr *
|
|
|
|
ARMBaseInstrInfo::duplicate(MachineInstr *Orig, MachineFunction &MF) const {
|
2012-11-28 10:35:17 +08:00
|
|
|
MachineInstr *MI = TargetInstrInfo::duplicate(Orig, MF);
|
2010-01-07 07:47:07 +08:00
|
|
|
switch(Orig->getOpcode()) {
|
|
|
|
case ARM::tLDRpci_pic:
|
|
|
|
case ARM::t2LDRpci_pic: {
|
|
|
|
unsigned CPI = Orig->getOperand(1).getIndex();
|
|
|
|
unsigned PCLabelId = duplicateCPV(MF, CPI);
|
|
|
|
Orig->getOperand(1).setIndex(CPI);
|
|
|
|
Orig->getOperand(2).setImm(PCLabelId);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return MI;
|
|
|
|
}
|
|
|
|
|
2010-03-03 09:44:33 +08:00
|
|
|
bool ARMBaseInstrInfo::produceSameValue(const MachineInstr *MI0,
|
2011-01-20 16:34:58 +08:00
|
|
|
const MachineInstr *MI1,
|
|
|
|
const MachineRegisterInfo *MRI) const {
|
2009-11-07 12:04:34 +08:00
|
|
|
int Opcode = MI0->getOpcode();
|
2011-01-21 07:55:07 +08:00
|
|
|
if (Opcode == ARM::t2LDRpci ||
|
2009-11-20 10:10:27 +08:00
|
|
|
Opcode == ARM::t2LDRpci_pic ||
|
|
|
|
Opcode == ARM::tLDRpci ||
|
2011-01-20 16:34:58 +08:00
|
|
|
Opcode == ARM::tLDRpci_pic ||
|
2013-12-02 18:35:41 +08:00
|
|
|
Opcode == ARM::LDRLIT_ga_pcrel ||
|
|
|
|
Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
|
|
|
|
Opcode == ARM::tLDRLIT_ga_pcrel ||
|
2011-01-22 02:55:51 +08:00
|
|
|
Opcode == ARM::MOV_ga_pcrel ||
|
|
|
|
Opcode == ARM::MOV_ga_pcrel_ldr ||
|
|
|
|
Opcode == ARM::t2MOV_ga_pcrel) {
|
2009-11-07 12:04:34 +08:00
|
|
|
if (MI1->getOpcode() != Opcode)
|
|
|
|
return false;
|
|
|
|
if (MI0->getNumOperands() != MI1->getNumOperands())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const MachineOperand &MO0 = MI0->getOperand(1);
|
|
|
|
const MachineOperand &MO1 = MI1->getOperand(1);
|
|
|
|
if (MO0.getOffset() != MO1.getOffset())
|
|
|
|
return false;
|
|
|
|
|
2013-12-02 18:35:41 +08:00
|
|
|
if (Opcode == ARM::LDRLIT_ga_pcrel ||
|
|
|
|
Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
|
|
|
|
Opcode == ARM::tLDRLIT_ga_pcrel ||
|
|
|
|
Opcode == ARM::MOV_ga_pcrel ||
|
2011-01-22 02:55:51 +08:00
|
|
|
Opcode == ARM::MOV_ga_pcrel_ldr ||
|
|
|
|
Opcode == ARM::t2MOV_ga_pcrel)
|
2011-01-20 16:34:58 +08:00
|
|
|
// Ignore the PC labels.
|
|
|
|
return MO0.getGlobal() == MO1.getGlobal();
|
|
|
|
|
2009-11-07 12:04:34 +08:00
|
|
|
const MachineFunction *MF = MI0->getParent()->getParent();
|
|
|
|
const MachineConstantPool *MCP = MF->getConstantPool();
|
|
|
|
int CPI0 = MO0.getIndex();
|
|
|
|
int CPI1 = MO1.getIndex();
|
|
|
|
const MachineConstantPoolEntry &MCPE0 = MCP->getConstants()[CPI0];
|
|
|
|
const MachineConstantPoolEntry &MCPE1 = MCP->getConstants()[CPI1];
|
2011-03-24 14:20:03 +08:00
|
|
|
bool isARMCP0 = MCPE0.isMachineConstantPoolEntry();
|
|
|
|
bool isARMCP1 = MCPE1.isMachineConstantPoolEntry();
|
|
|
|
if (isARMCP0 && isARMCP1) {
|
|
|
|
ARMConstantPoolValue *ACPV0 =
|
|
|
|
static_cast<ARMConstantPoolValue*>(MCPE0.Val.MachineCPVal);
|
|
|
|
ARMConstantPoolValue *ACPV1 =
|
|
|
|
static_cast<ARMConstantPoolValue*>(MCPE1.Val.MachineCPVal);
|
|
|
|
return ACPV0->hasSameValue(ACPV1);
|
|
|
|
} else if (!isARMCP0 && !isARMCP1) {
|
|
|
|
return MCPE0.Val.ConstVal == MCPE1.Val.ConstVal;
|
|
|
|
}
|
|
|
|
return false;
|
2011-01-20 16:34:58 +08:00
|
|
|
} else if (Opcode == ARM::PICLDR) {
|
|
|
|
if (MI1->getOpcode() != Opcode)
|
|
|
|
return false;
|
|
|
|
if (MI0->getNumOperands() != MI1->getNumOperands())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned Addr0 = MI0->getOperand(1).getReg();
|
|
|
|
unsigned Addr1 = MI1->getOperand(1).getReg();
|
|
|
|
if (Addr0 != Addr1) {
|
|
|
|
if (!MRI ||
|
|
|
|
!TargetRegisterInfo::isVirtualRegister(Addr0) ||
|
|
|
|
!TargetRegisterInfo::isVirtualRegister(Addr1))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// This assumes SSA form.
|
|
|
|
MachineInstr *Def0 = MRI->getVRegDef(Addr0);
|
|
|
|
MachineInstr *Def1 = MRI->getVRegDef(Addr1);
|
|
|
|
// Check if the loaded value, e.g. a constantpool of a global address, are
|
|
|
|
// the same.
|
|
|
|
if (!produceSameValue(Def0, Def1, MRI))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned i = 3, e = MI0->getNumOperands(); i != e; ++i) {
|
|
|
|
// %vreg12<def> = PICLDR %vreg11, 0, pred:14, pred:%noreg
|
|
|
|
const MachineOperand &MO0 = MI0->getOperand(i);
|
|
|
|
const MachineOperand &MO1 = MI1->getOperand(i);
|
|
|
|
if (!MO0.isIdenticalTo(MO1))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
2009-11-07 12:04:34 +08:00
|
|
|
}
|
|
|
|
|
2010-03-03 09:44:33 +08:00
|
|
|
return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
|
2009-11-07 12:04:34 +08:00
|
|
|
}
|
|
|
|
|
2010-06-24 07:00:16 +08:00
|
|
|
/// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to
|
|
|
|
/// determine if two loads are loading from the same base address. It should
|
|
|
|
/// only return true if the base pointers are the same and the only differences
|
|
|
|
/// between the two addresses is the offset. It also returns the offsets by
|
|
|
|
/// reference.
|
2012-11-13 03:40:10 +08:00
|
|
|
///
|
|
|
|
/// FIXME: remove this in favor of the MachineInstr interface once pre-RA-sched
|
|
|
|
/// is permanently disabled.
|
2010-06-24 07:00:16 +08:00
|
|
|
bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
|
|
|
|
int64_t &Offset1,
|
|
|
|
int64_t &Offset2) const {
|
|
|
|
// Don't worry about Thumb: just ARM and Thumb2.
|
|
|
|
if (Subtarget.isThumb1Only()) return false;
|
|
|
|
|
|
|
|
if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
switch (Load1->getMachineOpcode()) {
|
|
|
|
default:
|
|
|
|
return false;
|
2010-10-27 06:37:02 +08:00
|
|
|
case ARM::LDRi12:
|
2010-10-27 08:19:44 +08:00
|
|
|
case ARM::LDRBi12:
|
2010-06-24 07:00:16 +08:00
|
|
|
case ARM::LDRD:
|
|
|
|
case ARM::LDRH:
|
|
|
|
case ARM::LDRSB:
|
|
|
|
case ARM::LDRSH:
|
|
|
|
case ARM::VLDRD:
|
|
|
|
case ARM::VLDRS:
|
|
|
|
case ARM::t2LDRi8:
|
2013-08-15 00:35:29 +08:00
|
|
|
case ARM::t2LDRBi8:
|
2010-06-24 07:00:16 +08:00
|
|
|
case ARM::t2LDRDi8:
|
|
|
|
case ARM::t2LDRSHi8:
|
|
|
|
case ARM::t2LDRi12:
|
2013-08-15 00:35:29 +08:00
|
|
|
case ARM::t2LDRBi12:
|
2010-06-24 07:00:16 +08:00
|
|
|
case ARM::t2LDRSHi12:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (Load2->getMachineOpcode()) {
|
|
|
|
default:
|
|
|
|
return false;
|
2010-10-27 06:37:02 +08:00
|
|
|
case ARM::LDRi12:
|
2010-10-27 08:19:44 +08:00
|
|
|
case ARM::LDRBi12:
|
2010-06-24 07:00:16 +08:00
|
|
|
case ARM::LDRD:
|
|
|
|
case ARM::LDRH:
|
|
|
|
case ARM::LDRSB:
|
|
|
|
case ARM::LDRSH:
|
|
|
|
case ARM::VLDRD:
|
|
|
|
case ARM::VLDRS:
|
|
|
|
case ARM::t2LDRi8:
|
2013-08-15 00:35:29 +08:00
|
|
|
case ARM::t2LDRBi8:
|
2010-06-24 07:00:16 +08:00
|
|
|
case ARM::t2LDRSHi8:
|
|
|
|
case ARM::t2LDRi12:
|
2013-08-15 00:35:29 +08:00
|
|
|
case ARM::t2LDRBi12:
|
2010-06-24 07:00:16 +08:00
|
|
|
case ARM::t2LDRSHi12:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if base addresses and chain operands match.
|
|
|
|
if (Load1->getOperand(0) != Load2->getOperand(0) ||
|
|
|
|
Load1->getOperand(4) != Load2->getOperand(4))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Index should be Reg0.
|
|
|
|
if (Load1->getOperand(3) != Load2->getOperand(3))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Determine the offsets.
|
|
|
|
if (isa<ConstantSDNode>(Load1->getOperand(1)) &&
|
|
|
|
isa<ConstantSDNode>(Load2->getOperand(1))) {
|
|
|
|
Offset1 = cast<ConstantSDNode>(Load1->getOperand(1))->getSExtValue();
|
|
|
|
Offset2 = cast<ConstantSDNode>(Load2->getOperand(1))->getSExtValue();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to
|
2011-04-15 13:18:47 +08:00
|
|
|
/// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should
|
2010-06-24 07:00:16 +08:00
|
|
|
/// be scheduled togther. On some targets if two loads are loading from
|
|
|
|
/// addresses in the same cache line, it's better if they are scheduled
|
|
|
|
/// together. This function takes two integers that represent the load offsets
|
|
|
|
/// from the common base address. It returns true if it decides it's desirable
|
|
|
|
/// to schedule the two loads together. "NumLoads" is the number of loads that
|
|
|
|
/// have already been scheduled after Load1.
|
2012-11-13 03:40:10 +08:00
|
|
|
///
|
|
|
|
/// FIXME: remove this in favor of the MachineInstr interface once pre-RA-sched
|
|
|
|
/// is permanently disabled.
|
2010-06-24 07:00:16 +08:00
|
|
|
bool ARMBaseInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
|
|
|
|
int64_t Offset1, int64_t Offset2,
|
|
|
|
unsigned NumLoads) const {
|
|
|
|
// Don't worry about Thumb: just ARM and Thumb2.
|
|
|
|
if (Subtarget.isThumb1Only()) return false;
|
|
|
|
|
|
|
|
assert(Offset2 > Offset1);
|
|
|
|
|
|
|
|
if ((Offset2 - Offset1) / 8 > 64)
|
|
|
|
return false;
|
|
|
|
|
2013-08-15 00:35:29 +08:00
|
|
|
// Check if the machine opcodes are different. If they are different
|
|
|
|
// then we consider them to not be of the same base address,
|
|
|
|
// EXCEPT in the case of Thumb2 byte loads where one is LDRBi8 and the other LDRBi12.
|
|
|
|
// In this case, they are considered to be the same because they are different
|
|
|
|
// encoding forms of the same basic instruction.
|
|
|
|
if ((Load1->getMachineOpcode() != Load2->getMachineOpcode()) &&
|
|
|
|
!((Load1->getMachineOpcode() == ARM::t2LDRBi8 &&
|
|
|
|
Load2->getMachineOpcode() == ARM::t2LDRBi12) ||
|
|
|
|
(Load1->getMachineOpcode() == ARM::t2LDRBi12 &&
|
|
|
|
Load2->getMachineOpcode() == ARM::t2LDRBi8)))
|
2010-06-24 07:00:16 +08:00
|
|
|
return false; // FIXME: overly conservative?
|
|
|
|
|
|
|
|
// Four loads in a row should be sufficient.
|
|
|
|
if (NumLoads >= 3)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-06-19 07:09:54 +08:00
|
|
|
bool ARMBaseInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
|
|
|
|
const MachineBasicBlock *MBB,
|
|
|
|
const MachineFunction &MF) const {
|
2010-06-26 02:43:14 +08:00
|
|
|
// Debug info is never a scheduling boundary. It's necessary to be explicit
|
|
|
|
// due to the special treatment of IT instructions below, otherwise a
|
|
|
|
// dbg_value followed by an IT will result in the IT instruction being
|
|
|
|
// considered a scheduling hazard, which is wrong. It should be the actual
|
|
|
|
// instruction preceding the dbg_value instruction(s), just like it is
|
|
|
|
// when debug info is not present.
|
|
|
|
if (MI->isDebugValue())
|
|
|
|
return false;
|
|
|
|
|
2010-06-19 07:09:54 +08:00
|
|
|
// Terminators and labels can't be scheduled around.
|
2014-03-07 14:08:31 +08:00
|
|
|
if (MI->isTerminator() || MI->isPosition())
|
2010-06-19 07:09:54 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
// Treat the start of the IT block as a scheduling boundary, but schedule
|
|
|
|
// t2IT along with all instructions following it.
|
|
|
|
// FIXME: This is a big hammer. But the alternative is to add all potential
|
|
|
|
// true and anti dependencies to IT block instructions as implicit operands
|
|
|
|
// to the t2IT instruction. The added compile time and complexity does not
|
|
|
|
// seem worth it.
|
|
|
|
MachineBasicBlock::const_iterator I = MI;
|
2010-06-26 02:43:14 +08:00
|
|
|
// Make sure to skip any dbg_value instructions
|
|
|
|
while (++I != MBB->end() && I->isDebugValue())
|
|
|
|
;
|
|
|
|
if (I != MBB->end() && I->getOpcode() == ARM::t2IT)
|
2010-06-19 07:09:54 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
// Don't attempt to schedule around any instruction that defines
|
|
|
|
// a stack-oriented pointer, as it's unlikely to be profitable. This
|
|
|
|
// saves compile time, because it doesn't require every single
|
|
|
|
// stack slot reference to depend on the instruction that does the
|
|
|
|
// modification.
|
2012-02-22 07:47:43 +08:00
|
|
|
// Calls don't actually change the stack pointer, even if they have imp-defs.
|
2012-02-22 09:07:19 +08:00
|
|
|
// No ARM calling conventions change the stack pointer. (X86 calling
|
|
|
|
// conventions sometimes do).
|
2012-02-22 07:47:43 +08:00
|
|
|
if (!MI->isCall() && MI->definesRegister(ARM::SP))
|
2010-06-19 07:09:54 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2011-07-10 10:58:07 +08:00
|
|
|
bool ARMBaseInstrInfo::
|
|
|
|
isProfitableToIfCvt(MachineBasicBlock &MBB,
|
|
|
|
unsigned NumCycles, unsigned ExtraPredCycles,
|
|
|
|
const BranchProbability &Probability) const {
|
2011-04-13 14:39:16 +08:00
|
|
|
if (!NumCycles)
|
2010-06-26 06:42:03 +08:00
|
|
|
return false;
|
2010-10-05 14:00:33 +08:00
|
|
|
|
2010-09-29 02:32:13 +08:00
|
|
|
// Attempt to estimate the relative costs of predication versus branching.
|
2011-07-10 10:58:07 +08:00
|
|
|
unsigned UnpredCost = Probability.getNumerator() * NumCycles;
|
|
|
|
UnpredCost /= Probability.getDenominator();
|
|
|
|
UnpredCost += 1; // The branch itself
|
|
|
|
UnpredCost += Subtarget.getMispredictionPenalty() / 10;
|
2010-10-05 14:00:33 +08:00
|
|
|
|
2011-07-10 10:58:07 +08:00
|
|
|
return (NumCycles + ExtraPredCycles) <= UnpredCost;
|
2010-06-26 06:42:03 +08:00
|
|
|
}
|
2010-10-05 14:00:33 +08:00
|
|
|
|
2010-06-26 06:42:03 +08:00
|
|
|
bool ARMBaseInstrInfo::
|
2010-11-03 08:45:17 +08:00
|
|
|
isProfitableToIfCvt(MachineBasicBlock &TMBB,
|
|
|
|
unsigned TCycles, unsigned TExtra,
|
|
|
|
MachineBasicBlock &FMBB,
|
|
|
|
unsigned FCycles, unsigned FExtra,
|
2011-07-10 10:58:07 +08:00
|
|
|
const BranchProbability &Probability) const {
|
2010-11-03 08:45:17 +08:00
|
|
|
if (!TCycles || !FCycles)
|
2010-09-29 02:32:13 +08:00
|
|
|
return false;
|
2010-10-05 14:00:33 +08:00
|
|
|
|
2010-09-29 02:32:13 +08:00
|
|
|
// Attempt to estimate the relative costs of predication versus branching.
|
2011-07-10 10:58:07 +08:00
|
|
|
unsigned TUnpredCost = Probability.getNumerator() * TCycles;
|
|
|
|
TUnpredCost /= Probability.getDenominator();
|
2011-09-21 10:17:37 +08:00
|
|
|
|
2011-07-10 10:58:07 +08:00
|
|
|
uint32_t Comp = Probability.getDenominator() - Probability.getNumerator();
|
|
|
|
unsigned FUnpredCost = Comp * FCycles;
|
|
|
|
FUnpredCost /= Probability.getDenominator();
|
|
|
|
|
|
|
|
unsigned UnpredCost = TUnpredCost + FUnpredCost;
|
|
|
|
UnpredCost += 1; // The branch itself
|
|
|
|
UnpredCost += Subtarget.getMispredictionPenalty() / 10;
|
|
|
|
|
|
|
|
return (TCycles + FCycles + TExtra + FExtra) <= UnpredCost;
|
2010-06-26 06:42:03 +08:00
|
|
|
}
|
|
|
|
|
2012-09-30 05:43:49 +08:00
|
|
|
bool
|
|
|
|
ARMBaseInstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
|
|
|
|
MachineBasicBlock &FMBB) const {
|
|
|
|
// Reduce false anti-dependencies to let Swift's out-of-order execution
|
|
|
|
// engine do its thing.
|
|
|
|
return Subtarget.isSwift();
|
|
|
|
}
|
|
|
|
|
2009-08-08 11:20:32 +08:00
|
|
|
/// getInstrPredicate - If instruction is predicated, returns its predicate
|
|
|
|
/// condition, otherwise returns AL. It also returns the condition code
|
|
|
|
/// register by reference.
|
2009-09-28 17:14:39 +08:00
|
|
|
ARMCC::CondCodes
|
|
|
|
llvm::getInstrPredicate(const MachineInstr *MI, unsigned &PredReg) {
|
2009-08-08 11:20:32 +08:00
|
|
|
int PIdx = MI->findFirstPredOperandIdx();
|
|
|
|
if (PIdx == -1) {
|
|
|
|
PredReg = 0;
|
|
|
|
return ARMCC::AL;
|
|
|
|
}
|
|
|
|
|
|
|
|
PredReg = MI->getOperand(PIdx+1).getReg();
|
|
|
|
return (ARMCC::CondCodes)MI->getOperand(PIdx).getImm();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-07-28 13:48:47 +08:00
|
|
|
int llvm::getMatchingCondBranchOpcode(int Opc) {
|
2009-07-28 02:20:05 +08:00
|
|
|
if (Opc == ARM::B)
|
|
|
|
return ARM::Bcc;
|
2012-01-21 05:51:11 +08:00
|
|
|
if (Opc == ARM::tB)
|
2009-07-28 02:20:05 +08:00
|
|
|
return ARM::tBcc;
|
2012-01-21 05:51:11 +08:00
|
|
|
if (Opc == ARM::t2B)
|
|
|
|
return ARM::t2Bcc;
|
2009-07-28 02:20:05 +08:00
|
|
|
|
|
|
|
llvm_unreachable("Unknown unconditional branch opcode!");
|
|
|
|
}
|
|
|
|
|
2012-04-05 02:23:42 +08:00
|
|
|
/// commuteInstruction - Handle commutable instructions.
|
|
|
|
MachineInstr *
|
|
|
|
ARMBaseInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
case ARM::MOVCCr:
|
|
|
|
case ARM::t2MOVCCr: {
|
|
|
|
// MOVCC can be commuted by inverting the condition.
|
|
|
|
unsigned PredReg = 0;
|
|
|
|
ARMCC::CondCodes CC = getInstrPredicate(MI, PredReg);
|
|
|
|
// MOVCC AL can't be inverted. Shouldn't happen.
|
|
|
|
if (CC == ARMCC::AL || PredReg != ARM::CPSR)
|
|
|
|
return NULL;
|
2012-11-28 10:35:17 +08:00
|
|
|
MI = TargetInstrInfo::commuteInstruction(MI, NewMI);
|
2012-04-05 02:23:42 +08:00
|
|
|
if (!MI)
|
|
|
|
return NULL;
|
|
|
|
// After swapping the MOVCC operands, also invert the condition.
|
|
|
|
MI->getOperand(MI->findFirstPredOperandIdx())
|
|
|
|
.setImm(ARMCC::getOppositeCondition(CC));
|
|
|
|
return MI;
|
|
|
|
}
|
|
|
|
}
|
2012-11-28 10:35:17 +08:00
|
|
|
return TargetInstrInfo::commuteInstruction(MI, NewMI);
|
2012-04-05 02:23:42 +08:00
|
|
|
}
|
2009-07-28 13:48:47 +08:00
|
|
|
|
2012-08-16 06:16:39 +08:00
|
|
|
/// Identify instructions that can be folded into a MOVCC instruction, and
|
Use predication instead of pseudo-opcodes when folding into MOVCC.
Now that it is possible to dynamically tie MachineInstr operands,
predicated instructions are possible in SSA form:
%vreg3<def> = SUBri %vreg1, -2147483647, pred:14, pred:%noreg, %opt:%noreg
%vreg4<def,tied1> = MOVCCr %vreg3<tied0>, %vreg1, %pred:12, pred:%CPSR
Becomes a predicated SUBri with a tied imp-use:
SUBri %vreg1, -2147483647, pred:13, pred:%CPSR, opt:%noreg, %vreg1<imp-use,tied0>
This means that any instruction that is safe to move can be folded into
a MOVCC, and the *CC pseudo-instructions are no longer needed.
The test case changes reflect that Thumb2SizeReduce recognizes the
predicated instructions. It didn't understand the pseudos.
llvm-svn: 163274
2012-09-06 07:58:02 +08:00
|
|
|
/// return the defining instruction.
|
|
|
|
static MachineInstr *canFoldIntoMOVCC(unsigned Reg,
|
|
|
|
const MachineRegisterInfo &MRI,
|
|
|
|
const TargetInstrInfo *TII) {
|
2012-08-16 06:16:39 +08:00
|
|
|
if (!TargetRegisterInfo::isVirtualRegister(Reg))
|
|
|
|
return 0;
|
|
|
|
if (!MRI.hasOneNonDBGUse(Reg))
|
|
|
|
return 0;
|
Use predication instead of pseudo-opcodes when folding into MOVCC.
Now that it is possible to dynamically tie MachineInstr operands,
predicated instructions are possible in SSA form:
%vreg3<def> = SUBri %vreg1, -2147483647, pred:14, pred:%noreg, %opt:%noreg
%vreg4<def,tied1> = MOVCCr %vreg3<tied0>, %vreg1, %pred:12, pred:%CPSR
Becomes a predicated SUBri with a tied imp-use:
SUBri %vreg1, -2147483647, pred:13, pred:%CPSR, opt:%noreg, %vreg1<imp-use,tied0>
This means that any instruction that is safe to move can be folded into
a MOVCC, and the *CC pseudo-instructions are no longer needed.
The test case changes reflect that Thumb2SizeReduce recognizes the
predicated instructions. It didn't understand the pseudos.
llvm-svn: 163274
2012-09-06 07:58:02 +08:00
|
|
|
MachineInstr *MI = MRI.getVRegDef(Reg);
|
2012-08-16 06:16:39 +08:00
|
|
|
if (!MI)
|
|
|
|
return 0;
|
Use predication instead of pseudo-opcodes when folding into MOVCC.
Now that it is possible to dynamically tie MachineInstr operands,
predicated instructions are possible in SSA form:
%vreg3<def> = SUBri %vreg1, -2147483647, pred:14, pred:%noreg, %opt:%noreg
%vreg4<def,tied1> = MOVCCr %vreg3<tied0>, %vreg1, %pred:12, pred:%CPSR
Becomes a predicated SUBri with a tied imp-use:
SUBri %vreg1, -2147483647, pred:13, pred:%CPSR, opt:%noreg, %vreg1<imp-use,tied0>
This means that any instruction that is safe to move can be folded into
a MOVCC, and the *CC pseudo-instructions are no longer needed.
The test case changes reflect that Thumb2SizeReduce recognizes the
predicated instructions. It didn't understand the pseudos.
llvm-svn: 163274
2012-09-06 07:58:02 +08:00
|
|
|
// MI is folded into the MOVCC by predicating it.
|
|
|
|
if (!MI->isPredicable())
|
|
|
|
return 0;
|
2012-08-16 06:16:39 +08:00
|
|
|
// Check if MI has any non-dead defs or physreg uses. This also detects
|
|
|
|
// predicated instructions which will be reading CPSR.
|
|
|
|
for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
|
|
|
|
const MachineOperand &MO = MI->getOperand(i);
|
2012-08-18 04:55:34 +08:00
|
|
|
// Reject frame index operands, PEI can't handle the predicated pseudos.
|
|
|
|
if (MO.isFI() || MO.isCPI() || MO.isJTI())
|
|
|
|
return 0;
|
2012-08-16 06:16:39 +08:00
|
|
|
if (!MO.isReg())
|
|
|
|
continue;
|
Use predication instead of pseudo-opcodes when folding into MOVCC.
Now that it is possible to dynamically tie MachineInstr operands,
predicated instructions are possible in SSA form:
%vreg3<def> = SUBri %vreg1, -2147483647, pred:14, pred:%noreg, %opt:%noreg
%vreg4<def,tied1> = MOVCCr %vreg3<tied0>, %vreg1, %pred:12, pred:%CPSR
Becomes a predicated SUBri with a tied imp-use:
SUBri %vreg1, -2147483647, pred:13, pred:%CPSR, opt:%noreg, %vreg1<imp-use,tied0>
This means that any instruction that is safe to move can be folded into
a MOVCC, and the *CC pseudo-instructions are no longer needed.
The test case changes reflect that Thumb2SizeReduce recognizes the
predicated instructions. It didn't understand the pseudos.
llvm-svn: 163274
2012-09-06 07:58:02 +08:00
|
|
|
// MI can't have any tied operands, that would conflict with predication.
|
|
|
|
if (MO.isTied())
|
|
|
|
return 0;
|
2012-08-16 06:16:39 +08:00
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
|
|
|
|
return 0;
|
|
|
|
if (MO.isDef() && !MO.isDead())
|
|
|
|
return 0;
|
|
|
|
}
|
Use predication instead of pseudo-opcodes when folding into MOVCC.
Now that it is possible to dynamically tie MachineInstr operands,
predicated instructions are possible in SSA form:
%vreg3<def> = SUBri %vreg1, -2147483647, pred:14, pred:%noreg, %opt:%noreg
%vreg4<def,tied1> = MOVCCr %vreg3<tied0>, %vreg1, %pred:12, pred:%CPSR
Becomes a predicated SUBri with a tied imp-use:
SUBri %vreg1, -2147483647, pred:13, pred:%CPSR, opt:%noreg, %vreg1<imp-use,tied0>
This means that any instruction that is safe to move can be folded into
a MOVCC, and the *CC pseudo-instructions are no longer needed.
The test case changes reflect that Thumb2SizeReduce recognizes the
predicated instructions. It didn't understand the pseudos.
llvm-svn: 163274
2012-09-06 07:58:02 +08:00
|
|
|
bool DontMoveAcrossStores = true;
|
|
|
|
if (!MI->isSafeToMove(TII, /* AliasAnalysis = */ 0, DontMoveAcrossStores))
|
|
|
|
return 0;
|
|
|
|
return MI;
|
2012-08-16 06:16:39 +08:00
|
|
|
}
|
|
|
|
|
2012-08-17 07:14:20 +08:00
|
|
|
bool ARMBaseInstrInfo::analyzeSelect(const MachineInstr *MI,
|
|
|
|
SmallVectorImpl<MachineOperand> &Cond,
|
|
|
|
unsigned &TrueOp, unsigned &FalseOp,
|
|
|
|
bool &Optimizable) const {
|
|
|
|
assert((MI->getOpcode() == ARM::MOVCCr || MI->getOpcode() == ARM::t2MOVCCr) &&
|
|
|
|
"Unknown select instruction");
|
|
|
|
// MOVCC operands:
|
|
|
|
// 0: Def.
|
|
|
|
// 1: True use.
|
|
|
|
// 2: False use.
|
|
|
|
// 3: Condition code.
|
|
|
|
// 4: CPSR use.
|
|
|
|
TrueOp = 1;
|
|
|
|
FalseOp = 2;
|
|
|
|
Cond.push_back(MI->getOperand(3));
|
|
|
|
Cond.push_back(MI->getOperand(4));
|
|
|
|
// We can always fold a def.
|
|
|
|
Optimizable = true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
MachineInstr *ARMBaseInstrInfo::optimizeSelect(MachineInstr *MI,
|
|
|
|
bool PreferFalse) const {
|
|
|
|
assert((MI->getOpcode() == ARM::MOVCCr || MI->getOpcode() == ARM::t2MOVCCr) &&
|
|
|
|
"Unknown select instruction");
|
2013-10-05 00:52:56 +08:00
|
|
|
MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
|
Use predication instead of pseudo-opcodes when folding into MOVCC.
Now that it is possible to dynamically tie MachineInstr operands,
predicated instructions are possible in SSA form:
%vreg3<def> = SUBri %vreg1, -2147483647, pred:14, pred:%noreg, %opt:%noreg
%vreg4<def,tied1> = MOVCCr %vreg3<tied0>, %vreg1, %pred:12, pred:%CPSR
Becomes a predicated SUBri with a tied imp-use:
SUBri %vreg1, -2147483647, pred:13, pred:%CPSR, opt:%noreg, %vreg1<imp-use,tied0>
This means that any instruction that is safe to move can be folded into
a MOVCC, and the *CC pseudo-instructions are no longer needed.
The test case changes reflect that Thumb2SizeReduce recognizes the
predicated instructions. It didn't understand the pseudos.
llvm-svn: 163274
2012-09-06 07:58:02 +08:00
|
|
|
MachineInstr *DefMI = canFoldIntoMOVCC(MI->getOperand(2).getReg(), MRI, this);
|
|
|
|
bool Invert = !DefMI;
|
|
|
|
if (!DefMI)
|
|
|
|
DefMI = canFoldIntoMOVCC(MI->getOperand(1).getReg(), MRI, this);
|
|
|
|
if (!DefMI)
|
2012-08-17 07:14:20 +08:00
|
|
|
return 0;
|
|
|
|
|
2013-10-05 00:52:56 +08:00
|
|
|
// Find new register class to use.
|
|
|
|
MachineOperand FalseReg = MI->getOperand(Invert ? 2 : 1);
|
|
|
|
unsigned DestReg = MI->getOperand(0).getReg();
|
|
|
|
const TargetRegisterClass *PreviousClass = MRI.getRegClass(FalseReg.getReg());
|
|
|
|
if (!MRI.constrainRegClass(DestReg, PreviousClass))
|
|
|
|
return 0;
|
|
|
|
|
2012-08-17 07:14:20 +08:00
|
|
|
// Create a new predicated version of DefMI.
|
|
|
|
// Rfalse is the first use.
|
|
|
|
MachineInstrBuilder NewMI = BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
|
2013-10-05 00:52:56 +08:00
|
|
|
DefMI->getDesc(), DestReg);
|
2012-08-17 07:14:20 +08:00
|
|
|
|
|
|
|
// Copy all the DefMI operands, excluding its (null) predicate.
|
|
|
|
const MCInstrDesc &DefDesc = DefMI->getDesc();
|
|
|
|
for (unsigned i = 1, e = DefDesc.getNumOperands();
|
|
|
|
i != e && !DefDesc.OpInfo[i].isPredicate(); ++i)
|
|
|
|
NewMI.addOperand(DefMI->getOperand(i));
|
|
|
|
|
|
|
|
unsigned CondCode = MI->getOperand(3).getImm();
|
|
|
|
if (Invert)
|
|
|
|
NewMI.addImm(ARMCC::getOppositeCondition(ARMCC::CondCodes(CondCode)));
|
|
|
|
else
|
|
|
|
NewMI.addImm(CondCode);
|
|
|
|
NewMI.addOperand(MI->getOperand(4));
|
|
|
|
|
|
|
|
// DefMI is not the -S version that sets CPSR, so add an optional %noreg.
|
|
|
|
if (NewMI->hasOptionalDef())
|
|
|
|
AddDefaultCC(NewMI);
|
|
|
|
|
Use predication instead of pseudo-opcodes when folding into MOVCC.
Now that it is possible to dynamically tie MachineInstr operands,
predicated instructions are possible in SSA form:
%vreg3<def> = SUBri %vreg1, -2147483647, pred:14, pred:%noreg, %opt:%noreg
%vreg4<def,tied1> = MOVCCr %vreg3<tied0>, %vreg1, %pred:12, pred:%CPSR
Becomes a predicated SUBri with a tied imp-use:
SUBri %vreg1, -2147483647, pred:13, pred:%CPSR, opt:%noreg, %vreg1<imp-use,tied0>
This means that any instruction that is safe to move can be folded into
a MOVCC, and the *CC pseudo-instructions are no longer needed.
The test case changes reflect that Thumb2SizeReduce recognizes the
predicated instructions. It didn't understand the pseudos.
llvm-svn: 163274
2012-09-06 07:58:02 +08:00
|
|
|
// The output register value when the predicate is false is an implicit
|
|
|
|
// register operand tied to the first def.
|
|
|
|
// The tie makes the register allocator ensure the FalseReg is allocated the
|
|
|
|
// same register as operand 0.
|
|
|
|
FalseReg.setImplicit();
|
2012-12-21 06:53:55 +08:00
|
|
|
NewMI.addOperand(FalseReg);
|
Use predication instead of pseudo-opcodes when folding into MOVCC.
Now that it is possible to dynamically tie MachineInstr operands,
predicated instructions are possible in SSA form:
%vreg3<def> = SUBri %vreg1, -2147483647, pred:14, pred:%noreg, %opt:%noreg
%vreg4<def,tied1> = MOVCCr %vreg3<tied0>, %vreg1, %pred:12, pred:%CPSR
Becomes a predicated SUBri with a tied imp-use:
SUBri %vreg1, -2147483647, pred:13, pred:%CPSR, opt:%noreg, %vreg1<imp-use,tied0>
This means that any instruction that is safe to move can be folded into
a MOVCC, and the *CC pseudo-instructions are no longer needed.
The test case changes reflect that Thumb2SizeReduce recognizes the
predicated instructions. It didn't understand the pseudos.
llvm-svn: 163274
2012-09-06 07:58:02 +08:00
|
|
|
NewMI->tieOperands(0, NewMI->getNumOperands() - 1);
|
|
|
|
|
2012-08-17 07:14:20 +08:00
|
|
|
// The caller will erase MI, but not DefMI.
|
|
|
|
DefMI->eraseFromParent();
|
|
|
|
return NewMI;
|
|
|
|
}
|
|
|
|
|
2011-09-21 10:20:46 +08:00
|
|
|
/// Map pseudo instructions that imply an 'S' bit onto real opcodes. Whether the
|
|
|
|
/// instruction is encoded with an 'S' bit is determined by the optional CPSR
|
|
|
|
/// def operand.
|
|
|
|
///
|
|
|
|
/// This will go away once we can teach tblgen how to set the optional CPSR def
|
|
|
|
/// operand itself.
|
|
|
|
struct AddSubFlagsOpcodePair {
|
2012-05-24 11:59:11 +08:00
|
|
|
uint16_t PseudoOpc;
|
|
|
|
uint16_t MachineOpc;
|
2011-09-21 10:20:46 +08:00
|
|
|
};
|
|
|
|
|
2012-05-24 11:59:11 +08:00
|
|
|
static const AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[] = {
|
2011-09-21 10:20:46 +08:00
|
|
|
{ARM::ADDSri, ARM::ADDri},
|
|
|
|
{ARM::ADDSrr, ARM::ADDrr},
|
|
|
|
{ARM::ADDSrsi, ARM::ADDrsi},
|
|
|
|
{ARM::ADDSrsr, ARM::ADDrsr},
|
|
|
|
|
|
|
|
{ARM::SUBSri, ARM::SUBri},
|
|
|
|
{ARM::SUBSrr, ARM::SUBrr},
|
|
|
|
{ARM::SUBSrsi, ARM::SUBrsi},
|
|
|
|
{ARM::SUBSrsr, ARM::SUBrsr},
|
|
|
|
|
|
|
|
{ARM::RSBSri, ARM::RSBri},
|
|
|
|
{ARM::RSBSrsi, ARM::RSBrsi},
|
|
|
|
{ARM::RSBSrsr, ARM::RSBrsr},
|
|
|
|
|
|
|
|
{ARM::t2ADDSri, ARM::t2ADDri},
|
|
|
|
{ARM::t2ADDSrr, ARM::t2ADDrr},
|
|
|
|
{ARM::t2ADDSrs, ARM::t2ADDrs},
|
|
|
|
|
|
|
|
{ARM::t2SUBSri, ARM::t2SUBri},
|
|
|
|
{ARM::t2SUBSrr, ARM::t2SUBrr},
|
|
|
|
{ARM::t2SUBSrs, ARM::t2SUBrs},
|
|
|
|
|
|
|
|
{ARM::t2RSBSri, ARM::t2RSBri},
|
|
|
|
{ARM::t2RSBSrs, ARM::t2RSBrs},
|
|
|
|
};
|
|
|
|
|
|
|
|
unsigned llvm::convertAddSubFlagsOpcode(unsigned OldOpc) {
|
2012-05-24 11:59:11 +08:00
|
|
|
for (unsigned i = 0, e = array_lengthof(AddSubFlagsOpcodeMap); i != e; ++i)
|
|
|
|
if (OldOpc == AddSubFlagsOpcodeMap[i].PseudoOpc)
|
|
|
|
return AddSubFlagsOpcodeMap[i].MachineOpc;
|
2011-09-21 10:20:46 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-07-28 13:48:47 +08:00
|
|
|
void llvm::emitARMRegPlusImmediate(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator &MBBI, DebugLoc dl,
|
|
|
|
unsigned DestReg, unsigned BaseReg, int NumBytes,
|
|
|
|
ARMCC::CondCodes Pred, unsigned PredReg,
|
2011-03-06 02:43:32 +08:00
|
|
|
const ARMBaseInstrInfo &TII, unsigned MIFlags) {
|
2013-11-05 07:04:15 +08:00
|
|
|
if (NumBytes == 0 && DestReg != BaseReg) {
|
|
|
|
BuildMI(MBB, MBBI, dl, TII.get(ARM::MOVr), DestReg)
|
|
|
|
.addReg(BaseReg, RegState::Kill)
|
|
|
|
.addImm((unsigned)Pred).addReg(PredReg).addReg(0)
|
|
|
|
.setMIFlags(MIFlags);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-07-28 13:48:47 +08:00
|
|
|
bool isSub = NumBytes < 0;
|
|
|
|
if (isSub) NumBytes = -NumBytes;
|
|
|
|
|
|
|
|
while (NumBytes) {
|
|
|
|
unsigned RotAmt = ARM_AM::getSOImmValRotate(NumBytes);
|
|
|
|
unsigned ThisVal = NumBytes & ARM_AM::rotr32(0xFF, RotAmt);
|
|
|
|
assert(ThisVal && "Didn't extract field correctly");
|
|
|
|
|
|
|
|
// We will handle these bits from offset, clear them.
|
|
|
|
NumBytes &= ~ThisVal;
|
|
|
|
|
|
|
|
assert(ARM_AM::getSOImmVal(ThisVal) != -1 && "Bit extraction didn't work?");
|
|
|
|
|
|
|
|
// Build the new ADD / SUB.
|
|
|
|
unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri;
|
|
|
|
BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
|
|
|
|
.addReg(BaseReg, RegState::Kill).addImm(ThisVal)
|
2011-03-06 02:43:32 +08:00
|
|
|
.addImm((unsigned)Pred).addReg(PredReg).addReg(0)
|
|
|
|
.setMIFlags(MIFlags);
|
2009-07-28 13:48:47 +08:00
|
|
|
BaseReg = DestReg;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-02 22:46:26 +08:00
|
|
|
bool llvm::tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget,
|
|
|
|
MachineFunction &MF, MachineInstr *MI,
|
2013-11-09 01:18:07 +08:00
|
|
|
unsigned NumBytes) {
|
|
|
|
// This optimisation potentially adds lots of load and store
|
|
|
|
// micro-operations, it's only really a great benefit to code-size.
|
2013-12-02 22:46:26 +08:00
|
|
|
if (!Subtarget.isMinSize())
|
2013-11-09 01:18:07 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// If only one register is pushed/popped, LLVM can use an LDR/STR
|
|
|
|
// instead. We can't modify those so make sure we're dealing with an
|
|
|
|
// instruction we understand.
|
|
|
|
bool IsPop = isPopOpcode(MI->getOpcode());
|
|
|
|
bool IsPush = isPushOpcode(MI->getOpcode());
|
|
|
|
if (!IsPush && !IsPop)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
bool IsVFPPushPop = MI->getOpcode() == ARM::VSTMDDB_UPD ||
|
|
|
|
MI->getOpcode() == ARM::VLDMDIA_UPD;
|
|
|
|
bool IsT1PushPop = MI->getOpcode() == ARM::tPUSH ||
|
|
|
|
MI->getOpcode() == ARM::tPOP ||
|
|
|
|
MI->getOpcode() == ARM::tPOP_RET;
|
|
|
|
|
|
|
|
assert((IsT1PushPop || (MI->getOperand(0).getReg() == ARM::SP &&
|
|
|
|
MI->getOperand(1).getReg() == ARM::SP)) &&
|
|
|
|
"trying to fold sp update into non-sp-updating push/pop");
|
|
|
|
|
|
|
|
// The VFP push & pop act on D-registers, so we can only fold an adjustment
|
|
|
|
// by a multiple of 8 bytes in correctly. Similarly rN is 4-bytes. Don't try
|
|
|
|
// if this is violated.
|
|
|
|
if (NumBytes % (IsVFPPushPop ? 8 : 4) != 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// ARM and Thumb2 push/pop insts have explicit "sp, sp" operands (+
|
|
|
|
// pred) so the list starts at 4. Thumb1 starts after the predicate.
|
|
|
|
int RegListIdx = IsT1PushPop ? 2 : 4;
|
|
|
|
|
|
|
|
// Calculate the space we'll need in terms of registers.
|
|
|
|
unsigned FirstReg = MI->getOperand(RegListIdx).getReg();
|
|
|
|
unsigned RD0Reg, RegsNeeded;
|
|
|
|
if (IsVFPPushPop) {
|
|
|
|
RD0Reg = ARM::D0;
|
|
|
|
RegsNeeded = NumBytes / 8;
|
|
|
|
} else {
|
|
|
|
RD0Reg = ARM::R0;
|
|
|
|
RegsNeeded = NumBytes / 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
// We're going to have to strip all list operands off before
|
|
|
|
// re-adding them since the order matters, so save the existing ones
|
|
|
|
// for later.
|
|
|
|
SmallVector<MachineOperand, 4> RegList;
|
|
|
|
for (int i = MI->getNumOperands() - 1; i >= RegListIdx; --i)
|
|
|
|
RegList.push_back(MI->getOperand(i));
|
|
|
|
|
|
|
|
MachineBasicBlock *MBB = MI->getParent();
|
|
|
|
const TargetRegisterInfo *TRI = MF.getRegInfo().getTargetRegisterInfo();
|
2013-12-01 22:16:24 +08:00
|
|
|
const MCPhysReg *CSRegs = TRI->getCalleeSavedRegs(&MF);
|
2013-11-09 01:18:07 +08:00
|
|
|
|
|
|
|
// Now try to find enough space in the reglist to allocate NumBytes.
|
|
|
|
for (unsigned CurReg = FirstReg - 1; CurReg >= RD0Reg && RegsNeeded;
|
2013-12-01 22:16:24 +08:00
|
|
|
--CurReg) {
|
2013-11-09 01:18:07 +08:00
|
|
|
if (!IsPop) {
|
|
|
|
// Pushing any register is completely harmless, mark the
|
|
|
|
// register involved as undef since we don't care about it in
|
|
|
|
// the slightest.
|
|
|
|
RegList.push_back(MachineOperand::CreateReg(CurReg, false, false,
|
|
|
|
false, false, true));
|
2013-12-01 22:16:24 +08:00
|
|
|
--RegsNeeded;
|
2013-11-09 01:18:07 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2013-12-01 22:16:24 +08:00
|
|
|
// However, we can only pop an extra register if it's not live. For
|
|
|
|
// registers live within the function we might clobber a return value
|
|
|
|
// register; the other way a register can be live here is if it's
|
|
|
|
// callee-saved.
|
|
|
|
if (isCalleeSavedRegister(CurReg, CSRegs) ||
|
|
|
|
MBB->computeRegisterLiveness(TRI, CurReg, MI) !=
|
|
|
|
MachineBasicBlock::LQR_Dead) {
|
|
|
|
// VFP pops don't allow holes in the register list, so any skip is fatal
|
|
|
|
// for our transformation. GPR pops do, so we should just keep looking.
|
|
|
|
if (IsVFPPushPop)
|
|
|
|
return false;
|
|
|
|
else
|
|
|
|
continue;
|
|
|
|
}
|
2013-11-09 01:18:07 +08:00
|
|
|
|
|
|
|
// Mark the unimportant registers as <def,dead> in the POP.
|
2013-11-22 08:46:32 +08:00
|
|
|
RegList.push_back(MachineOperand::CreateReg(CurReg, true, false, false,
|
|
|
|
true));
|
2013-12-01 22:16:24 +08:00
|
|
|
--RegsNeeded;
|
2013-11-09 01:18:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (RegsNeeded > 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Finally we know we can profitably perform the optimisation so go
|
|
|
|
// ahead: strip all existing registers off and add them back again
|
|
|
|
// in the right order.
|
|
|
|
for (int i = MI->getNumOperands() - 1; i >= RegListIdx; --i)
|
|
|
|
MI->RemoveOperand(i);
|
|
|
|
|
|
|
|
// Add the complete list back in.
|
|
|
|
MachineInstrBuilder MIB(MF, &*MI);
|
|
|
|
for (int i = RegList.size() - 1; i >= 0; --i)
|
|
|
|
MIB.addOperand(RegList[i]);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2009-08-27 09:23:50 +08:00
|
|
|
bool llvm::rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
|
|
|
|
unsigned FrameReg, int &Offset,
|
|
|
|
const ARMBaseInstrInfo &TII) {
|
2009-07-28 13:48:47 +08:00
|
|
|
unsigned Opcode = MI.getOpcode();
|
2011-06-29 03:10:37 +08:00
|
|
|
const MCInstrDesc &Desc = MI.getDesc();
|
2009-07-28 13:48:47 +08:00
|
|
|
unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
|
|
|
|
bool isSub = false;
|
2009-08-11 23:33:49 +08:00
|
|
|
|
2009-07-28 13:48:47 +08:00
|
|
|
// Memory operands in inline assembly always use AddrMode2.
|
|
|
|
if (Opcode == ARM::INLINEASM)
|
|
|
|
AddrMode = ARMII::AddrMode2;
|
2009-08-11 23:33:49 +08:00
|
|
|
|
2009-07-28 13:48:47 +08:00
|
|
|
if (Opcode == ARM::ADDri) {
|
|
|
|
Offset += MI.getOperand(FrameRegIdx+1).getImm();
|
|
|
|
if (Offset == 0) {
|
|
|
|
// Turn it into a move.
|
|
|
|
MI.setDesc(TII.get(ARM::MOVr));
|
|
|
|
MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
|
|
|
|
MI.RemoveOperand(FrameRegIdx+1);
|
2009-08-27 09:23:50 +08:00
|
|
|
Offset = 0;
|
|
|
|
return true;
|
2009-07-28 13:48:47 +08:00
|
|
|
} else if (Offset < 0) {
|
|
|
|
Offset = -Offset;
|
|
|
|
isSub = true;
|
|
|
|
MI.setDesc(TII.get(ARM::SUBri));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Common case: small offset, fits into instruction.
|
|
|
|
if (ARM_AM::getSOImmVal(Offset) != -1) {
|
|
|
|
// Replace the FrameIndex with sp / fp
|
|
|
|
MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
|
|
|
|
MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
|
2009-08-27 09:23:50 +08:00
|
|
|
Offset = 0;
|
|
|
|
return true;
|
2009-07-28 13:48:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, pull as much of the immedidate into this ADDri/SUBri
|
|
|
|
// as possible.
|
|
|
|
unsigned RotAmt = ARM_AM::getSOImmValRotate(Offset);
|
|
|
|
unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xFF, RotAmt);
|
|
|
|
|
|
|
|
// We will handle these bits from offset, clear them.
|
|
|
|
Offset &= ~ThisImmVal;
|
|
|
|
|
|
|
|
// Get the properly encoded SOImmVal field.
|
|
|
|
assert(ARM_AM::getSOImmVal(ThisImmVal) != -1 &&
|
|
|
|
"Bit extraction didn't work?");
|
|
|
|
MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
|
|
|
|
} else {
|
|
|
|
unsigned ImmIdx = 0;
|
|
|
|
int InstrOffs = 0;
|
|
|
|
unsigned NumBits = 0;
|
|
|
|
unsigned Scale = 1;
|
|
|
|
switch (AddrMode) {
|
2010-10-27 06:37:02 +08:00
|
|
|
case ARMII::AddrMode_i12: {
|
|
|
|
ImmIdx = FrameRegIdx + 1;
|
|
|
|
InstrOffs = MI.getOperand(ImmIdx).getImm();
|
|
|
|
NumBits = 12;
|
|
|
|
break;
|
|
|
|
}
|
2009-07-28 13:48:47 +08:00
|
|
|
case ARMII::AddrMode2: {
|
|
|
|
ImmIdx = FrameRegIdx+2;
|
|
|
|
InstrOffs = ARM_AM::getAM2Offset(MI.getOperand(ImmIdx).getImm());
|
|
|
|
if (ARM_AM::getAM2Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
|
|
|
|
InstrOffs *= -1;
|
|
|
|
NumBits = 12;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case ARMII::AddrMode3: {
|
|
|
|
ImmIdx = FrameRegIdx+2;
|
|
|
|
InstrOffs = ARM_AM::getAM3Offset(MI.getOperand(ImmIdx).getImm());
|
|
|
|
if (ARM_AM::getAM3Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
|
|
|
|
InstrOffs *= -1;
|
|
|
|
NumBits = 8;
|
|
|
|
break;
|
|
|
|
}
|
2009-08-08 21:35:48 +08:00
|
|
|
case ARMII::AddrMode4:
|
2009-11-16 05:45:34 +08:00
|
|
|
case ARMII::AddrMode6:
|
2009-08-27 09:23:50 +08:00
|
|
|
// Can't fold any offset even if it's zero.
|
|
|
|
return false;
|
2009-07-28 13:48:47 +08:00
|
|
|
case ARMII::AddrMode5: {
|
|
|
|
ImmIdx = FrameRegIdx+1;
|
|
|
|
InstrOffs = ARM_AM::getAM5Offset(MI.getOperand(ImmIdx).getImm());
|
|
|
|
if (ARM_AM::getAM5Op(MI.getOperand(ImmIdx).getImm()) == ARM_AM::sub)
|
|
|
|
InstrOffs *= -1;
|
|
|
|
NumBits = 8;
|
|
|
|
Scale = 4;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unsupported addressing mode!");
|
|
|
|
}
|
|
|
|
|
|
|
|
Offset += InstrOffs * Scale;
|
|
|
|
assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
|
|
|
|
if (Offset < 0) {
|
|
|
|
Offset = -Offset;
|
|
|
|
isSub = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attempt to fold address comp. if opcode has offset bits
|
|
|
|
if (NumBits > 0) {
|
|
|
|
// Common case: small offset, fits into instruction.
|
|
|
|
MachineOperand &ImmOp = MI.getOperand(ImmIdx);
|
|
|
|
int ImmedOffset = Offset / Scale;
|
|
|
|
unsigned Mask = (1 << NumBits) - 1;
|
|
|
|
if ((unsigned)Offset <= Mask * Scale) {
|
|
|
|
// Replace the FrameIndex with sp
|
|
|
|
MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
|
2010-10-27 09:19:41 +08:00
|
|
|
// FIXME: When addrmode2 goes away, this will simplify (like the
|
|
|
|
// T2 version), as the LDR.i12 versions don't need the encoding
|
|
|
|
// tricks for the offset value.
|
|
|
|
if (isSub) {
|
|
|
|
if (AddrMode == ARMII::AddrMode_i12)
|
|
|
|
ImmedOffset = -ImmedOffset;
|
|
|
|
else
|
|
|
|
ImmedOffset |= 1 << NumBits;
|
|
|
|
}
|
2009-07-28 13:48:47 +08:00
|
|
|
ImmOp.ChangeToImmediate(ImmedOffset);
|
2009-08-27 09:23:50 +08:00
|
|
|
Offset = 0;
|
|
|
|
return true;
|
2009-07-28 13:48:47 +08:00
|
|
|
}
|
2009-08-11 23:33:49 +08:00
|
|
|
|
2009-07-28 13:48:47 +08:00
|
|
|
// Otherwise, it didn't fit. Pull in what we can to simplify the immed.
|
|
|
|
ImmedOffset = ImmedOffset & Mask;
|
2010-10-28 00:50:31 +08:00
|
|
|
if (isSub) {
|
|
|
|
if (AddrMode == ARMII::AddrMode_i12)
|
|
|
|
ImmedOffset = -ImmedOffset;
|
|
|
|
else
|
|
|
|
ImmedOffset |= 1 << NumBits;
|
|
|
|
}
|
2009-07-28 13:48:47 +08:00
|
|
|
ImmOp.ChangeToImmediate(ImmedOffset);
|
|
|
|
Offset &= ~(Mask*Scale);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-08-27 09:23:50 +08:00
|
|
|
Offset = (isSub) ? -Offset : Offset;
|
|
|
|
return Offset == 0;
|
2009-07-28 13:48:47 +08:00
|
|
|
}
|
2010-08-06 09:32:48 +08:00
|
|
|
|
2012-06-30 05:33:59 +08:00
|
|
|
/// analyzeCompare - For a comparison instruction, return the source registers
|
|
|
|
/// in SrcReg and SrcReg2 if having two register operands, and the value it
|
|
|
|
/// compares against in CmpValue. Return true if the comparison instruction
|
|
|
|
/// can be analyzed.
|
2010-08-06 09:32:48 +08:00
|
|
|
bool ARMBaseInstrInfo::
|
2012-06-30 05:33:59 +08:00
|
|
|
analyzeCompare(const MachineInstr *MI, unsigned &SrcReg, unsigned &SrcReg2,
|
|
|
|
int &CmpMask, int &CmpValue) const {
|
2010-08-06 09:32:48 +08:00
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: break;
|
2010-08-11 08:23:00 +08:00
|
|
|
case ARM::CMPri:
|
2010-08-06 09:32:48 +08:00
|
|
|
case ARM::t2CMPri:
|
|
|
|
SrcReg = MI->getOperand(0).getReg();
|
2012-06-30 05:33:59 +08:00
|
|
|
SrcReg2 = 0;
|
2010-09-21 20:01:15 +08:00
|
|
|
CmpMask = ~0;
|
2010-08-06 09:32:48 +08:00
|
|
|
CmpValue = MI->getOperand(1).getImm();
|
|
|
|
return true;
|
2012-05-11 09:30:47 +08:00
|
|
|
case ARM::CMPrr:
|
|
|
|
case ARM::t2CMPrr:
|
|
|
|
SrcReg = MI->getOperand(0).getReg();
|
2012-06-30 05:33:59 +08:00
|
|
|
SrcReg2 = MI->getOperand(1).getReg();
|
2012-05-11 09:30:47 +08:00
|
|
|
CmpMask = ~0;
|
|
|
|
CmpValue = 0;
|
|
|
|
return true;
|
2010-09-21 20:01:15 +08:00
|
|
|
case ARM::TSTri:
|
|
|
|
case ARM::t2TSTri:
|
|
|
|
SrcReg = MI->getOperand(0).getReg();
|
2012-06-30 05:33:59 +08:00
|
|
|
SrcReg2 = 0;
|
2010-09-21 20:01:15 +08:00
|
|
|
CmpMask = MI->getOperand(1).getImm();
|
|
|
|
CmpValue = 0;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-09-29 18:12:08 +08:00
|
|
|
/// isSuitableForMask - Identify a suitable 'and' instruction that
|
|
|
|
/// operates on the given source register and applies the same mask
|
|
|
|
/// as a 'tst' instruction. Provide a limited look-through for copies.
|
|
|
|
/// When successful, MI will hold the found instruction.
|
|
|
|
static bool isSuitableForMask(MachineInstr *&MI, unsigned SrcReg,
|
2010-09-21 21:30:57 +08:00
|
|
|
int CmpMask, bool CommonUse) {
|
2010-09-29 18:12:08 +08:00
|
|
|
switch (MI->getOpcode()) {
|
2010-09-21 20:01:15 +08:00
|
|
|
case ARM::ANDri:
|
|
|
|
case ARM::t2ANDri:
|
2010-09-29 18:12:08 +08:00
|
|
|
if (CmpMask != MI->getOperand(2).getImm())
|
2010-09-21 21:30:57 +08:00
|
|
|
return false;
|
2010-09-29 18:12:08 +08:00
|
|
|
if (SrcReg == MI->getOperand(CommonUse ? 1 : 0).getReg())
|
2010-09-21 20:01:15 +08:00
|
|
|
return true;
|
|
|
|
break;
|
2010-09-29 18:12:08 +08:00
|
|
|
case ARM::COPY: {
|
|
|
|
// Walk down one instruction which is potentially an 'and'.
|
|
|
|
const MachineInstr &Copy = *MI;
|
2010-10-05 14:00:43 +08:00
|
|
|
MachineBasicBlock::iterator AND(
|
2014-03-02 20:27:27 +08:00
|
|
|
std::next(MachineBasicBlock::iterator(MI)));
|
2010-09-29 18:12:08 +08:00
|
|
|
if (AND == MI->getParent()->end()) return false;
|
|
|
|
MI = AND;
|
|
|
|
return isSuitableForMask(MI, Copy.getOperand(0).getReg(),
|
|
|
|
CmpMask, true);
|
|
|
|
}
|
2010-08-06 09:32:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-06-30 06:06:19 +08:00
|
|
|
/// getSwappedCondition - assume the flags are set by MI(a,b), return
|
|
|
|
/// the condition code if we modify the instructions such that flags are
|
|
|
|
/// set by MI(b,a).
|
|
|
|
inline static ARMCC::CondCodes getSwappedCondition(ARMCC::CondCodes CC) {
|
|
|
|
switch (CC) {
|
|
|
|
default: return ARMCC::AL;
|
|
|
|
case ARMCC::EQ: return ARMCC::EQ;
|
|
|
|
case ARMCC::NE: return ARMCC::NE;
|
|
|
|
case ARMCC::HS: return ARMCC::LS;
|
|
|
|
case ARMCC::LO: return ARMCC::HI;
|
|
|
|
case ARMCC::HI: return ARMCC::LO;
|
|
|
|
case ARMCC::LS: return ARMCC::HS;
|
|
|
|
case ARMCC::GE: return ARMCC::LE;
|
|
|
|
case ARMCC::LT: return ARMCC::GT;
|
|
|
|
case ARMCC::GT: return ARMCC::LT;
|
|
|
|
case ARMCC::LE: return ARMCC::GE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isRedundantFlagInstr - check whether the first instruction, whose only
|
|
|
|
/// purpose is to update flags, can be made redundant.
|
|
|
|
/// CMPrr can be made redundant by SUBrr if the operands are the same.
|
|
|
|
/// CMPri can be made redundant by SUBri if the operands are the same.
|
|
|
|
/// This function can be extended later on.
|
|
|
|
inline static bool isRedundantFlagInstr(MachineInstr *CmpI, unsigned SrcReg,
|
|
|
|
unsigned SrcReg2, int ImmValue,
|
|
|
|
MachineInstr *OI) {
|
|
|
|
if ((CmpI->getOpcode() == ARM::CMPrr ||
|
|
|
|
CmpI->getOpcode() == ARM::t2CMPrr) &&
|
|
|
|
(OI->getOpcode() == ARM::SUBrr ||
|
|
|
|
OI->getOpcode() == ARM::t2SUBrr) &&
|
|
|
|
((OI->getOperand(1).getReg() == SrcReg &&
|
|
|
|
OI->getOperand(2).getReg() == SrcReg2) ||
|
|
|
|
(OI->getOperand(1).getReg() == SrcReg2 &&
|
|
|
|
OI->getOperand(2).getReg() == SrcReg)))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if ((CmpI->getOpcode() == ARM::CMPri ||
|
|
|
|
CmpI->getOpcode() == ARM::t2CMPri) &&
|
|
|
|
(OI->getOpcode() == ARM::SUBri ||
|
|
|
|
OI->getOpcode() == ARM::t2SUBri) &&
|
|
|
|
OI->getOperand(1).getReg() == SrcReg &&
|
|
|
|
OI->getOperand(2).getImm() == ImmValue)
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-06-30 05:33:59 +08:00
|
|
|
/// optimizeCompareInstr - Convert the instruction supplying the argument to the
|
|
|
|
/// comparison into one that sets the zero bit in the flags register;
|
|
|
|
/// Remove a redundant Compare instruction if an earlier instruction can set the
|
|
|
|
/// flags in the same way as Compare.
|
|
|
|
/// E.g. SUBrr(r1,r2) and CMPrr(r1,r2). We also handle the case where two
|
|
|
|
/// operands are swapped: SUBrr(r1,r2) and CMPrr(r2,r1), by updating the
|
|
|
|
/// condition code of instructions which use the flags.
|
2010-08-06 09:32:48 +08:00
|
|
|
bool ARMBaseInstrInfo::
|
2012-06-30 05:33:59 +08:00
|
|
|
optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
|
|
|
|
int CmpMask, int CmpValue,
|
|
|
|
const MachineRegisterInfo *MRI) const {
|
2012-06-30 06:06:19 +08:00
|
|
|
// Get the unique definition of SrcReg.
|
|
|
|
MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg);
|
|
|
|
if (!MI) return false;
|
2010-09-11 07:34:19 +08:00
|
|
|
|
2010-09-21 20:01:15 +08:00
|
|
|
// Masked compares sometimes use the same register as the corresponding 'and'.
|
|
|
|
if (CmpMask != ~0) {
|
2012-09-11 03:17:25 +08:00
|
|
|
if (!isSuitableForMask(MI, SrcReg, CmpMask, false) || isPredicated(MI)) {
|
2010-09-21 20:01:15 +08:00
|
|
|
MI = 0;
|
2010-10-19 05:22:31 +08:00
|
|
|
for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(SrcReg),
|
|
|
|
UE = MRI->use_end(); UI != UE; ++UI) {
|
2010-09-21 20:01:15 +08:00
|
|
|
if (UI->getParent() != CmpInstr->getParent()) continue;
|
2010-09-29 18:12:08 +08:00
|
|
|
MachineInstr *PotentialAND = &*UI;
|
2012-09-11 03:17:25 +08:00
|
|
|
if (!isSuitableForMask(PotentialAND, SrcReg, CmpMask, true) ||
|
|
|
|
isPredicated(PotentialAND))
|
2010-09-21 20:01:15 +08:00
|
|
|
continue;
|
2010-09-29 18:12:08 +08:00
|
|
|
MI = PotentialAND;
|
2010-09-21 20:01:15 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!MI) return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-05-11 09:30:47 +08:00
|
|
|
// Get ready to iterate backward from CmpInstr.
|
|
|
|
MachineBasicBlock::iterator I = CmpInstr, E = MI,
|
|
|
|
B = CmpInstr->getParent()->begin();
|
2010-10-09 08:03:48 +08:00
|
|
|
|
|
|
|
// Early exit if CmpInstr is at the beginning of the BB.
|
|
|
|
if (I == B) return false;
|
|
|
|
|
2012-05-11 09:30:47 +08:00
|
|
|
// There are two possible candidates which can be changed to set CPSR:
|
|
|
|
// One is MI, the other is a SUB instruction.
|
|
|
|
// For CMPrr(r1,r2), we are looking for SUB(r1,r2) or SUB(r2,r1).
|
|
|
|
// For CMPri(r1, CmpValue), we are looking for SUBri(r1, CmpValue).
|
|
|
|
MachineInstr *Sub = NULL;
|
2012-06-30 05:33:59 +08:00
|
|
|
if (SrcReg2 != 0)
|
2012-05-11 09:30:47 +08:00
|
|
|
// MI is not a candidate for CMPrr.
|
|
|
|
MI = NULL;
|
2012-06-30 05:33:59 +08:00
|
|
|
else if (MI->getParent() != CmpInstr->getParent() || CmpValue != 0) {
|
2012-05-11 09:30:47 +08:00
|
|
|
// Conservatively refuse to convert an instruction which isn't in the same
|
|
|
|
// BB as the comparison.
|
|
|
|
// For CMPri, we need to check Sub, thus we can't return here.
|
2012-05-11 23:36:46 +08:00
|
|
|
if (CmpInstr->getOpcode() == ARM::CMPri ||
|
2012-05-11 09:30:47 +08:00
|
|
|
CmpInstr->getOpcode() == ARM::t2CMPri)
|
|
|
|
MI = NULL;
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that CPSR isn't set between the comparison instruction and the one we
|
|
|
|
// want to change. At the same time, search for Sub.
|
2012-06-30 06:06:19 +08:00
|
|
|
const TargetRegisterInfo *TRI = &getRegisterInfo();
|
2010-08-06 09:32:48 +08:00
|
|
|
--I;
|
|
|
|
for (; I != E; --I) {
|
|
|
|
const MachineInstr &Instr = *I;
|
|
|
|
|
2012-06-30 06:06:19 +08:00
|
|
|
if (Instr.modifiesRegister(ARM::CPSR, TRI) ||
|
|
|
|
Instr.readsRegister(ARM::CPSR, TRI))
|
When we look at instructions to convert to setting the 's' flag, we need to look
at more than those which define CPSR. You can have this situation:
(1) subs ...
(2) sub r6, r5, r4
(3) movge ...
(4) cmp r6, 0
(5) movge ...
We cannot convert (2) to "subs" because (3) is using the CPSR set by
(1). There's an analogous situation here:
(1) sub r1, r2, r3
(2) sub r4, r5, r6
(3) cmp r4, ...
(5) movge ...
(6) cmp r1, ...
(7) movge ...
We cannot convert (1) to "subs" because of the intervening use of CPSR.
llvm-svn: 117950
2010-11-02 04:41:43 +08:00
|
|
|
// This instruction modifies or uses CPSR after the one we want to
|
|
|
|
// change. We can't do this transformation.
|
2012-06-30 06:06:19 +08:00
|
|
|
return false;
|
2012-05-11 09:30:47 +08:00
|
|
|
|
2012-06-30 06:06:19 +08:00
|
|
|
// Check whether CmpInstr can be made redundant by the current instruction.
|
|
|
|
if (isRedundantFlagInstr(CmpInstr, SrcReg, SrcReg2, CmpValue, &*I)) {
|
2012-05-11 09:30:47 +08:00
|
|
|
Sub = &*I;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-09-22 07:49:07 +08:00
|
|
|
if (I == B)
|
|
|
|
// The 'and' is below the comparison instruction.
|
|
|
|
return false;
|
2010-08-06 09:32:48 +08:00
|
|
|
}
|
|
|
|
|
2012-05-11 09:30:47 +08:00
|
|
|
// Return false if no candidates exist.
|
|
|
|
if (!MI && !Sub)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// The single candidate is called MI.
|
|
|
|
if (!MI) MI = Sub;
|
|
|
|
|
2012-09-11 03:17:25 +08:00
|
|
|
// We can't use a predicated instruction - it doesn't always write the flags.
|
|
|
|
if (isPredicated(MI))
|
|
|
|
return false;
|
|
|
|
|
2010-08-06 09:32:48 +08:00
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: break;
|
2011-04-16 04:28:28 +08:00
|
|
|
case ARM::RSBrr:
|
2011-04-07 07:35:59 +08:00
|
|
|
case ARM::RSBri:
|
2011-04-16 04:28:28 +08:00
|
|
|
case ARM::RSCrr:
|
2011-04-07 07:35:59 +08:00
|
|
|
case ARM::RSCri:
|
2011-04-16 04:28:28 +08:00
|
|
|
case ARM::ADDrr:
|
2010-08-11 08:23:00 +08:00
|
|
|
case ARM::ADDri:
|
2011-04-16 04:28:28 +08:00
|
|
|
case ARM::ADCrr:
|
2011-04-07 07:35:59 +08:00
|
|
|
case ARM::ADCri:
|
2011-04-16 04:28:28 +08:00
|
|
|
case ARM::SUBrr:
|
2010-08-11 08:23:00 +08:00
|
|
|
case ARM::SUBri:
|
2011-04-16 04:28:28 +08:00
|
|
|
case ARM::SBCrr:
|
2011-04-07 07:35:59 +08:00
|
|
|
case ARM::SBCri:
|
|
|
|
case ARM::t2RSBri:
|
2011-04-16 04:28:28 +08:00
|
|
|
case ARM::t2ADDrr:
|
2010-08-11 08:23:00 +08:00
|
|
|
case ARM::t2ADDri:
|
2011-04-16 04:28:28 +08:00
|
|
|
case ARM::t2ADCrr:
|
2011-04-07 07:35:59 +08:00
|
|
|
case ARM::t2ADCri:
|
2011-04-16 04:28:28 +08:00
|
|
|
case ARM::t2SUBrr:
|
2011-04-07 07:35:59 +08:00
|
|
|
case ARM::t2SUBri:
|
2011-04-16 04:28:28 +08:00
|
|
|
case ARM::t2SBCrr:
|
2011-04-16 04:45:00 +08:00
|
|
|
case ARM::t2SBCri:
|
|
|
|
case ARM::ANDrr:
|
|
|
|
case ARM::ANDri:
|
|
|
|
case ARM::t2ANDrr:
|
2011-04-16 05:24:38 +08:00
|
|
|
case ARM::t2ANDri:
|
|
|
|
case ARM::ORRrr:
|
|
|
|
case ARM::ORRri:
|
|
|
|
case ARM::t2ORRrr:
|
|
|
|
case ARM::t2ORRri:
|
|
|
|
case ARM::EORrr:
|
|
|
|
case ARM::EORri:
|
|
|
|
case ARM::t2EORrr:
|
|
|
|
case ARM::t2EORri: {
|
2012-05-11 09:30:47 +08:00
|
|
|
// Scan forward for the use of CPSR
|
|
|
|
// When checking against MI: if it's a conditional code requires
|
2012-07-12 06:51:44 +08:00
|
|
|
// checking of V bit, then this is not safe to do.
|
|
|
|
// It is safe to remove CmpInstr if CPSR is redefined or killed.
|
|
|
|
// If we are done with the basic block, we need to check whether CPSR is
|
|
|
|
// live-out.
|
2012-06-30 06:06:19 +08:00
|
|
|
SmallVector<std::pair<MachineOperand*, ARMCC::CondCodes>, 4>
|
|
|
|
OperandsToUpdate;
|
2011-03-24 06:52:04 +08:00
|
|
|
bool isSafe = false;
|
|
|
|
I = CmpInstr;
|
2012-05-11 09:30:47 +08:00
|
|
|
E = CmpInstr->getParent()->end();
|
2011-03-24 06:52:04 +08:00
|
|
|
while (!isSafe && ++I != E) {
|
|
|
|
const MachineInstr &Instr = *I;
|
|
|
|
for (unsigned IO = 0, EO = Instr.getNumOperands();
|
|
|
|
!isSafe && IO != EO; ++IO) {
|
|
|
|
const MachineOperand &MO = Instr.getOperand(IO);
|
2012-02-18 03:23:15 +08:00
|
|
|
if (MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR)) {
|
|
|
|
isSafe = true;
|
|
|
|
break;
|
|
|
|
}
|
2011-03-24 06:52:04 +08:00
|
|
|
if (!MO.isReg() || MO.getReg() != ARM::CPSR)
|
|
|
|
continue;
|
|
|
|
if (MO.isDef()) {
|
|
|
|
isSafe = true;
|
|
|
|
break;
|
|
|
|
}
|
2013-12-07 01:56:48 +08:00
|
|
|
// Condition code is after the operand before CPSR except for VSELs.
|
|
|
|
ARMCC::CondCodes CC;
|
|
|
|
bool IsInstrVSel = true;
|
|
|
|
switch (Instr.getOpcode()) {
|
|
|
|
default:
|
|
|
|
IsInstrVSel = false;
|
|
|
|
CC = (ARMCC::CondCodes)Instr.getOperand(IO - 1).getImm();
|
|
|
|
break;
|
|
|
|
case ARM::VSELEQD:
|
|
|
|
case ARM::VSELEQS:
|
|
|
|
CC = ARMCC::EQ;
|
|
|
|
break;
|
|
|
|
case ARM::VSELGTD:
|
|
|
|
case ARM::VSELGTS:
|
|
|
|
CC = ARMCC::GT;
|
|
|
|
break;
|
|
|
|
case ARM::VSELGED:
|
|
|
|
case ARM::VSELGES:
|
|
|
|
CC = ARMCC::GE;
|
|
|
|
break;
|
|
|
|
case ARM::VSELVSS:
|
|
|
|
case ARM::VSELVSD:
|
|
|
|
CC = ARMCC::VS;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2012-06-30 06:06:19 +08:00
|
|
|
if (Sub) {
|
|
|
|
ARMCC::CondCodes NewCC = getSwappedCondition(CC);
|
|
|
|
if (NewCC == ARMCC::AL)
|
2012-05-11 09:30:47 +08:00
|
|
|
return false;
|
2012-06-30 06:06:19 +08:00
|
|
|
// If we have SUB(r1, r2) and CMP(r2, r1), the condition code based
|
|
|
|
// on CMP needs to be updated to be based on SUB.
|
|
|
|
// Push the condition code operands to OperandsToUpdate.
|
|
|
|
// If it is safe to remove CmpInstr, the condition code of these
|
|
|
|
// operands will be modified.
|
|
|
|
if (SrcReg2 != 0 && Sub->getOperand(1).getReg() == SrcReg2 &&
|
2013-12-07 01:56:48 +08:00
|
|
|
Sub->getOperand(2).getReg() == SrcReg) {
|
|
|
|
// VSel doesn't support condition code update.
|
|
|
|
if (IsInstrVSel)
|
|
|
|
return false;
|
|
|
|
OperandsToUpdate.push_back(
|
|
|
|
std::make_pair(&((*I).getOperand(IO - 1)), NewCC));
|
|
|
|
}
|
|
|
|
} else
|
2012-05-11 09:30:47 +08:00
|
|
|
switch (CC) {
|
|
|
|
default:
|
2012-07-12 07:47:00 +08:00
|
|
|
// CPSR can be used multiple times, we should continue.
|
2012-05-11 09:30:47 +08:00
|
|
|
break;
|
|
|
|
case ARMCC::VS:
|
|
|
|
case ARMCC::VC:
|
|
|
|
case ARMCC::GE:
|
|
|
|
case ARMCC::LT:
|
|
|
|
case ARMCC::GT:
|
|
|
|
case ARMCC::LE:
|
|
|
|
return false;
|
|
|
|
}
|
2011-03-24 06:52:04 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-07-12 06:51:44 +08:00
|
|
|
// If CPSR is not killed nor re-defined, we should check whether it is
|
|
|
|
// live-out. If it is live-out, do not optimize.
|
|
|
|
if (!isSafe) {
|
|
|
|
MachineBasicBlock *MBB = CmpInstr->getParent();
|
|
|
|
for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
|
|
|
|
SE = MBB->succ_end(); SI != SE; ++SI)
|
|
|
|
if ((*SI)->isLiveIn(ARM::CPSR))
|
|
|
|
return false;
|
|
|
|
}
|
2011-03-24 06:52:04 +08:00
|
|
|
|
2010-11-17 16:06:50 +08:00
|
|
|
// Toggle the optional operand to CPSR.
|
|
|
|
MI->getOperand(5).setReg(ARM::CPSR);
|
|
|
|
MI->getOperand(5).setIsDef(true);
|
2012-09-11 03:17:25 +08:00
|
|
|
assert(!isPredicated(MI) && "Can't use flags from predicated instruction");
|
2010-08-06 09:32:48 +08:00
|
|
|
CmpInstr->eraseFromParent();
|
2012-05-11 09:30:47 +08:00
|
|
|
|
|
|
|
// Modify the condition code of operands in OperandsToUpdate.
|
|
|
|
// Since we have SUB(r1, r2) and CMP(r2, r1), the condition code needs to
|
|
|
|
// be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc.
|
2012-06-30 06:06:19 +08:00
|
|
|
for (unsigned i = 0, e = OperandsToUpdate.size(); i < e; i++)
|
|
|
|
OperandsToUpdate[i].first->setImm(OperandsToUpdate[i].second);
|
2010-08-06 09:32:48 +08:00
|
|
|
return true;
|
|
|
|
}
|
2011-04-16 04:45:00 +08:00
|
|
|
}
|
2010-08-06 09:32:48 +08:00
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2010-09-10 02:18:55 +08:00
|
|
|
|
2010-11-18 04:13:28 +08:00
|
|
|
bool ARMBaseInstrInfo::FoldImmediate(MachineInstr *UseMI,
|
|
|
|
MachineInstr *DefMI, unsigned Reg,
|
|
|
|
MachineRegisterInfo *MRI) const {
|
|
|
|
// Fold large immediates into add, sub, or, xor.
|
|
|
|
unsigned DefOpc = DefMI->getOpcode();
|
|
|
|
if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm)
|
|
|
|
return false;
|
|
|
|
if (!DefMI->getOperand(1).isImm())
|
|
|
|
// Could be t2MOVi32imm <ga:xx>
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!MRI->hasOneNonDBGUse(Reg))
|
|
|
|
return false;
|
|
|
|
|
ARM has a peephole optimization which looks for a def / use pair. The def
produces a 32-bit immediate which is consumed by the use. It tries to
fold the immediate by breaking it into two parts and fold them into the
immmediate fields of two uses. e.g
movw r2, #40885
movt r3, #46540
add r0, r0, r3
=>
add.w r0, r0, #3019898880
add.w r0, r0, #30146560
;
However, this transformation is incorrect if the user produces a flag. e.g.
movw r2, #40885
movt r3, #46540
adds r0, r0, r3
=>
add.w r0, r0, #3019898880
adds.w r0, r0, #30146560
Note the adds.w may not set the carry flag even if the original sequence
would.
rdar://11116189
llvm-svn: 153484
2012-03-27 07:31:00 +08:00
|
|
|
const MCInstrDesc &DefMCID = DefMI->getDesc();
|
|
|
|
if (DefMCID.hasOptionalDef()) {
|
|
|
|
unsigned NumOps = DefMCID.getNumOperands();
|
|
|
|
const MachineOperand &MO = DefMI->getOperand(NumOps-1);
|
|
|
|
if (MO.getReg() == ARM::CPSR && !MO.isDead())
|
|
|
|
// If DefMI defines CPSR and it is not dead, it's obviously not safe
|
|
|
|
// to delete DefMI.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
const MCInstrDesc &UseMCID = UseMI->getDesc();
|
|
|
|
if (UseMCID.hasOptionalDef()) {
|
|
|
|
unsigned NumOps = UseMCID.getNumOperands();
|
|
|
|
if (UseMI->getOperand(NumOps-1).getReg() == ARM::CPSR)
|
|
|
|
// If the instruction sets the flag, do not attempt this optimization
|
|
|
|
// since it may change the semantics of the code.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-11-18 04:13:28 +08:00
|
|
|
unsigned UseOpc = UseMI->getOpcode();
|
2010-11-18 09:43:23 +08:00
|
|
|
unsigned NewUseOpc = 0;
|
2010-11-18 04:13:28 +08:00
|
|
|
uint32_t ImmVal = (uint32_t)DefMI->getOperand(1).getImm();
|
2010-11-18 09:43:23 +08:00
|
|
|
uint32_t SOImmValV1 = 0, SOImmValV2 = 0;
|
2010-11-18 04:13:28 +08:00
|
|
|
bool Commute = false;
|
|
|
|
switch (UseOpc) {
|
|
|
|
default: return false;
|
|
|
|
case ARM::SUBrr:
|
|
|
|
case ARM::ADDrr:
|
|
|
|
case ARM::ORRrr:
|
|
|
|
case ARM::EORrr:
|
|
|
|
case ARM::t2SUBrr:
|
|
|
|
case ARM::t2ADDrr:
|
|
|
|
case ARM::t2ORRrr:
|
|
|
|
case ARM::t2EORrr: {
|
|
|
|
Commute = UseMI->getOperand(2).getReg() != Reg;
|
|
|
|
switch (UseOpc) {
|
|
|
|
default: break;
|
|
|
|
case ARM::SUBrr: {
|
|
|
|
if (Commute)
|
|
|
|
return false;
|
|
|
|
ImmVal = -ImmVal;
|
|
|
|
NewUseOpc = ARM::SUBri;
|
|
|
|
// Fallthrough
|
|
|
|
}
|
|
|
|
case ARM::ADDrr:
|
|
|
|
case ARM::ORRrr:
|
|
|
|
case ARM::EORrr: {
|
|
|
|
if (!ARM_AM::isSOImmTwoPartVal(ImmVal))
|
|
|
|
return false;
|
|
|
|
SOImmValV1 = (uint32_t)ARM_AM::getSOImmTwoPartFirst(ImmVal);
|
|
|
|
SOImmValV2 = (uint32_t)ARM_AM::getSOImmTwoPartSecond(ImmVal);
|
|
|
|
switch (UseOpc) {
|
|
|
|
default: break;
|
|
|
|
case ARM::ADDrr: NewUseOpc = ARM::ADDri; break;
|
|
|
|
case ARM::ORRrr: NewUseOpc = ARM::ORRri; break;
|
|
|
|
case ARM::EORrr: NewUseOpc = ARM::EORri; break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case ARM::t2SUBrr: {
|
|
|
|
if (Commute)
|
|
|
|
return false;
|
|
|
|
ImmVal = -ImmVal;
|
|
|
|
NewUseOpc = ARM::t2SUBri;
|
|
|
|
// Fallthrough
|
|
|
|
}
|
|
|
|
case ARM::t2ADDrr:
|
|
|
|
case ARM::t2ORRrr:
|
|
|
|
case ARM::t2EORrr: {
|
|
|
|
if (!ARM_AM::isT2SOImmTwoPartVal(ImmVal))
|
|
|
|
return false;
|
|
|
|
SOImmValV1 = (uint32_t)ARM_AM::getT2SOImmTwoPartFirst(ImmVal);
|
|
|
|
SOImmValV2 = (uint32_t)ARM_AM::getT2SOImmTwoPartSecond(ImmVal);
|
|
|
|
switch (UseOpc) {
|
|
|
|
default: break;
|
|
|
|
case ARM::t2ADDrr: NewUseOpc = ARM::t2ADDri; break;
|
|
|
|
case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri; break;
|
|
|
|
case ARM::t2EORrr: NewUseOpc = ARM::t2EORri; break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned OpIdx = Commute ? 2 : 1;
|
|
|
|
unsigned Reg1 = UseMI->getOperand(OpIdx).getReg();
|
|
|
|
bool isKill = UseMI->getOperand(OpIdx).isKill();
|
|
|
|
unsigned NewReg = MRI->createVirtualRegister(MRI->getRegClass(Reg));
|
|
|
|
AddDefaultCC(AddDefaultPred(BuildMI(*UseMI->getParent(),
|
2011-12-14 10:11:42 +08:00
|
|
|
UseMI, UseMI->getDebugLoc(),
|
2010-11-18 04:13:28 +08:00
|
|
|
get(NewUseOpc), NewReg)
|
|
|
|
.addReg(Reg1, getKillRegState(isKill))
|
|
|
|
.addImm(SOImmValV1)));
|
|
|
|
UseMI->setDesc(get(NewUseOpc));
|
|
|
|
UseMI->getOperand(1).setReg(NewReg);
|
|
|
|
UseMI->getOperand(1).setIsKill();
|
|
|
|
UseMI->getOperand(2).ChangeToImmediate(SOImmValV2);
|
|
|
|
DefMI->eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-09-30 05:43:49 +08:00
|
|
|
static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData,
|
|
|
|
const MachineInstr *MI) {
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: {
|
|
|
|
const MCInstrDesc &Desc = MI->getDesc();
|
|
|
|
int UOps = ItinData->getNumMicroOps(Desc.getSchedClass());
|
|
|
|
assert(UOps >= 0 && "bad # UOps");
|
|
|
|
return UOps;
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARM::LDRrs:
|
|
|
|
case ARM::LDRBrs:
|
|
|
|
case ARM::STRrs:
|
|
|
|
case ARM::STRBrs: {
|
|
|
|
unsigned ShOpVal = MI->getOperand(3).getImm();
|
|
|
|
bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
|
|
|
|
unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
|
|
|
|
if (!isSub &&
|
|
|
|
(ShImm == 0 ||
|
|
|
|
((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
|
|
|
|
ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
|
|
|
|
return 1;
|
|
|
|
return 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARM::LDRH:
|
|
|
|
case ARM::STRH: {
|
|
|
|
if (!MI->getOperand(2).getReg())
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
unsigned ShOpVal = MI->getOperand(3).getImm();
|
|
|
|
bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
|
|
|
|
unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
|
|
|
|
if (!isSub &&
|
|
|
|
(ShImm == 0 ||
|
|
|
|
((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
|
|
|
|
ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
|
|
|
|
return 1;
|
|
|
|
return 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARM::LDRSB:
|
|
|
|
case ARM::LDRSH:
|
|
|
|
return (ARM_AM::getAM3Op(MI->getOperand(3).getImm()) == ARM_AM::sub) ? 3:2;
|
|
|
|
|
|
|
|
case ARM::LDRSB_POST:
|
|
|
|
case ARM::LDRSH_POST: {
|
|
|
|
unsigned Rt = MI->getOperand(0).getReg();
|
|
|
|
unsigned Rm = MI->getOperand(3).getReg();
|
|
|
|
return (Rt == Rm) ? 4 : 3;
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARM::LDR_PRE_REG:
|
|
|
|
case ARM::LDRB_PRE_REG: {
|
|
|
|
unsigned Rt = MI->getOperand(0).getReg();
|
|
|
|
unsigned Rm = MI->getOperand(3).getReg();
|
|
|
|
if (Rt == Rm)
|
|
|
|
return 3;
|
|
|
|
unsigned ShOpVal = MI->getOperand(4).getImm();
|
|
|
|
bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
|
|
|
|
unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
|
|
|
|
if (!isSub &&
|
|
|
|
(ShImm == 0 ||
|
|
|
|
((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
|
|
|
|
ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
|
|
|
|
return 2;
|
|
|
|
return 3;
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARM::STR_PRE_REG:
|
|
|
|
case ARM::STRB_PRE_REG: {
|
|
|
|
unsigned ShOpVal = MI->getOperand(4).getImm();
|
|
|
|
bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
|
|
|
|
unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
|
|
|
|
if (!isSub &&
|
|
|
|
(ShImm == 0 ||
|
|
|
|
((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
|
|
|
|
ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
|
|
|
|
return 2;
|
|
|
|
return 3;
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARM::LDRH_PRE:
|
|
|
|
case ARM::STRH_PRE: {
|
|
|
|
unsigned Rt = MI->getOperand(0).getReg();
|
|
|
|
unsigned Rm = MI->getOperand(3).getReg();
|
|
|
|
if (!Rm)
|
|
|
|
return 2;
|
|
|
|
if (Rt == Rm)
|
|
|
|
return 3;
|
|
|
|
return (ARM_AM::getAM3Op(MI->getOperand(4).getImm()) == ARM_AM::sub)
|
|
|
|
? 3 : 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARM::LDR_POST_REG:
|
|
|
|
case ARM::LDRB_POST_REG:
|
|
|
|
case ARM::LDRH_POST: {
|
|
|
|
unsigned Rt = MI->getOperand(0).getReg();
|
|
|
|
unsigned Rm = MI->getOperand(3).getReg();
|
|
|
|
return (Rt == Rm) ? 3 : 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARM::LDR_PRE_IMM:
|
|
|
|
case ARM::LDRB_PRE_IMM:
|
|
|
|
case ARM::LDR_POST_IMM:
|
|
|
|
case ARM::LDRB_POST_IMM:
|
|
|
|
case ARM::STRB_POST_IMM:
|
|
|
|
case ARM::STRB_POST_REG:
|
|
|
|
case ARM::STRB_PRE_IMM:
|
|
|
|
case ARM::STRH_POST:
|
|
|
|
case ARM::STR_POST_IMM:
|
|
|
|
case ARM::STR_POST_REG:
|
|
|
|
case ARM::STR_PRE_IMM:
|
|
|
|
return 2;
|
|
|
|
|
|
|
|
case ARM::LDRSB_PRE:
|
|
|
|
case ARM::LDRSH_PRE: {
|
|
|
|
unsigned Rm = MI->getOperand(3).getReg();
|
|
|
|
if (Rm == 0)
|
|
|
|
return 3;
|
|
|
|
unsigned Rt = MI->getOperand(0).getReg();
|
|
|
|
if (Rt == Rm)
|
|
|
|
return 4;
|
|
|
|
unsigned ShOpVal = MI->getOperand(4).getImm();
|
|
|
|
bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
|
|
|
|
unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
|
|
|
|
if (!isSub &&
|
|
|
|
(ShImm == 0 ||
|
|
|
|
((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
|
|
|
|
ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
|
|
|
|
return 3;
|
|
|
|
return 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARM::LDRD: {
|
|
|
|
unsigned Rt = MI->getOperand(0).getReg();
|
|
|
|
unsigned Rn = MI->getOperand(2).getReg();
|
|
|
|
unsigned Rm = MI->getOperand(3).getReg();
|
|
|
|
if (Rm)
|
|
|
|
return (ARM_AM::getAM3Op(MI->getOperand(4).getImm()) == ARM_AM::sub) ?4:3;
|
|
|
|
return (Rt == Rn) ? 3 : 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARM::STRD: {
|
|
|
|
unsigned Rm = MI->getOperand(3).getReg();
|
|
|
|
if (Rm)
|
|
|
|
return (ARM_AM::getAM3Op(MI->getOperand(4).getImm()) == ARM_AM::sub) ?4:3;
|
|
|
|
return 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARM::LDRD_POST:
|
|
|
|
case ARM::t2LDRD_POST:
|
|
|
|
return 3;
|
|
|
|
|
|
|
|
case ARM::STRD_POST:
|
|
|
|
case ARM::t2STRD_POST:
|
|
|
|
return 4;
|
|
|
|
|
|
|
|
case ARM::LDRD_PRE: {
|
|
|
|
unsigned Rt = MI->getOperand(0).getReg();
|
|
|
|
unsigned Rn = MI->getOperand(3).getReg();
|
|
|
|
unsigned Rm = MI->getOperand(4).getReg();
|
|
|
|
if (Rm)
|
|
|
|
return (ARM_AM::getAM3Op(MI->getOperand(5).getImm()) == ARM_AM::sub) ?5:4;
|
|
|
|
return (Rt == Rn) ? 4 : 3;
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARM::t2LDRD_PRE: {
|
|
|
|
unsigned Rt = MI->getOperand(0).getReg();
|
|
|
|
unsigned Rn = MI->getOperand(3).getReg();
|
|
|
|
return (Rt == Rn) ? 4 : 3;
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARM::STRD_PRE: {
|
|
|
|
unsigned Rm = MI->getOperand(4).getReg();
|
|
|
|
if (Rm)
|
|
|
|
return (ARM_AM::getAM3Op(MI->getOperand(5).getImm()) == ARM_AM::sub) ?5:4;
|
|
|
|
return 3;
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARM::t2STRD_PRE:
|
|
|
|
return 3;
|
|
|
|
|
|
|
|
case ARM::t2LDR_POST:
|
|
|
|
case ARM::t2LDRB_POST:
|
|
|
|
case ARM::t2LDRB_PRE:
|
|
|
|
case ARM::t2LDRSBi12:
|
|
|
|
case ARM::t2LDRSBi8:
|
|
|
|
case ARM::t2LDRSBpci:
|
|
|
|
case ARM::t2LDRSBs:
|
|
|
|
case ARM::t2LDRH_POST:
|
|
|
|
case ARM::t2LDRH_PRE:
|
|
|
|
case ARM::t2LDRSBT:
|
|
|
|
case ARM::t2LDRSB_POST:
|
|
|
|
case ARM::t2LDRSB_PRE:
|
|
|
|
case ARM::t2LDRSH_POST:
|
|
|
|
case ARM::t2LDRSH_PRE:
|
|
|
|
case ARM::t2LDRSHi12:
|
|
|
|
case ARM::t2LDRSHi8:
|
|
|
|
case ARM::t2LDRSHpci:
|
|
|
|
case ARM::t2LDRSHs:
|
|
|
|
return 2;
|
|
|
|
|
|
|
|
case ARM::t2LDRDi8: {
|
|
|
|
unsigned Rt = MI->getOperand(0).getReg();
|
|
|
|
unsigned Rn = MI->getOperand(2).getReg();
|
|
|
|
return (Rt == Rn) ? 3 : 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
case ARM::t2STRB_POST:
|
|
|
|
case ARM::t2STRB_PRE:
|
|
|
|
case ARM::t2STRBs:
|
|
|
|
case ARM::t2STRDi8:
|
|
|
|
case ARM::t2STRH_POST:
|
|
|
|
case ARM::t2STRH_PRE:
|
|
|
|
case ARM::t2STRHs:
|
|
|
|
case ARM::t2STR_POST:
|
|
|
|
case ARM::t2STR_PRE:
|
|
|
|
case ARM::t2STRs:
|
|
|
|
return 2;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-09-15 02:48:46 +08:00
|
|
|
// Return the number of 32-bit words loaded by LDM or stored by STM. If this
|
|
|
|
// can't be easily determined return 0 (missing MachineMemOperand).
|
|
|
|
//
|
|
|
|
// FIXME: The current MachineInstr design does not support relying on machine
|
|
|
|
// mem operands to determine the width of a memory access. Instead, we expect
|
|
|
|
// the target to provide this information based on the instruction opcode and
|
|
|
|
// operands. However, using MachineMemOperand is a the best solution now for
|
|
|
|
// two reasons:
|
|
|
|
//
|
|
|
|
// 1) getNumMicroOps tries to infer LDM memory width from the total number of MI
|
|
|
|
// operands. This is much more dangerous than using the MachineMemOperand
|
|
|
|
// sizes because CodeGen passes can insert/remove optional machine operands. In
|
|
|
|
// fact, it's totally incorrect for preRA passes and appears to be wrong for
|
|
|
|
// postRA passes as well.
|
|
|
|
//
|
|
|
|
// 2) getNumLDMAddresses is only used by the scheduling machine model and any
|
|
|
|
// machine model that calls this should handle the unknown (zero size) case.
|
|
|
|
//
|
|
|
|
// Long term, we should require a target hook that verifies MachineMemOperand
|
|
|
|
// sizes during MC lowering. That target hook should be local to MC lowering
|
|
|
|
// because we can't ensure that it is aware of other MI forms. Doing this will
|
|
|
|
// ensure that MachineMemOperands are correctly propagated through all passes.
|
|
|
|
unsigned ARMBaseInstrInfo::getNumLDMAddresses(const MachineInstr *MI) const {
|
|
|
|
unsigned Size = 0;
|
|
|
|
for (MachineInstr::mmo_iterator I = MI->memoperands_begin(),
|
|
|
|
E = MI->memoperands_end(); I != E; ++I) {
|
|
|
|
Size += (*I)->getSize();
|
|
|
|
}
|
|
|
|
return Size / 4;
|
|
|
|
}
|
|
|
|
|
2010-09-10 02:18:55 +08:00
|
|
|
unsigned
|
2010-11-03 08:45:17 +08:00
|
|
|
ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
|
|
|
|
const MachineInstr *MI) const {
|
2010-09-10 09:29:16 +08:00
|
|
|
if (!ItinData || ItinData->isEmpty())
|
2010-09-10 02:18:55 +08:00
|
|
|
return 1;
|
|
|
|
|
2011-06-29 03:10:37 +08:00
|
|
|
const MCInstrDesc &Desc = MI->getDesc();
|
2010-09-10 02:18:55 +08:00
|
|
|
unsigned Class = Desc.getSchedClass();
|
2012-07-03 02:10:42 +08:00
|
|
|
int ItinUOps = ItinData->getNumMicroOps(Class);
|
2012-09-30 05:43:49 +08:00
|
|
|
if (ItinUOps >= 0) {
|
|
|
|
if (Subtarget.isSwift() && (Desc.mayLoad() || Desc.mayStore()))
|
|
|
|
return getNumMicroOpsSwiftLdSt(ItinData, MI);
|
|
|
|
|
2012-07-03 02:10:42 +08:00
|
|
|
return ItinUOps;
|
2012-09-30 05:43:49 +08:00
|
|
|
}
|
2010-09-10 02:18:55 +08:00
|
|
|
|
|
|
|
unsigned Opc = MI->getOpcode();
|
|
|
|
switch (Opc) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unexpected multi-uops instruction!");
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::VLDMQIA:
|
|
|
|
case ARM::VSTMQIA:
|
2010-09-10 02:18:55 +08:00
|
|
|
return 2;
|
|
|
|
|
|
|
|
// The number of uOps for load / store multiple are determined by the number
|
|
|
|
// registers.
|
2010-12-24 12:28:06 +08:00
|
|
|
//
|
2010-09-10 09:29:16 +08:00
|
|
|
// On Cortex-A8, each pair of register loads / stores can be scheduled on the
|
|
|
|
// same cycle. The scheduling for the first load / store must be done
|
2012-07-23 16:51:15 +08:00
|
|
|
// separately by assuming the address is not 64-bit aligned.
|
2010-11-16 09:16:36 +08:00
|
|
|
//
|
2010-09-10 09:29:16 +08:00
|
|
|
// On Cortex-A9, the formula is simply (#reg / 2) + (#reg % 2). If the address
|
2010-11-16 09:16:36 +08:00
|
|
|
// is not 64-bit aligned, then AGU would take an extra cycle. For VFP / NEON
|
|
|
|
// load / store multiple, the formula is (#reg / 2) + (#reg % 2) + 1.
|
|
|
|
case ARM::VLDMDIA:
|
|
|
|
case ARM::VLDMDIA_UPD:
|
|
|
|
case ARM::VLDMDDB_UPD:
|
|
|
|
case ARM::VLDMSIA:
|
|
|
|
case ARM::VLDMSIA_UPD:
|
|
|
|
case ARM::VLDMSDB_UPD:
|
|
|
|
case ARM::VSTMDIA:
|
|
|
|
case ARM::VSTMDIA_UPD:
|
|
|
|
case ARM::VSTMDDB_UPD:
|
|
|
|
case ARM::VSTMSIA:
|
|
|
|
case ARM::VSTMSIA_UPD:
|
|
|
|
case ARM::VSTMSDB_UPD: {
|
2010-09-10 02:18:55 +08:00
|
|
|
unsigned NumRegs = MI->getNumOperands() - Desc.getNumOperands();
|
|
|
|
return (NumRegs / 2) + (NumRegs % 2) + 1;
|
|
|
|
}
|
2010-11-16 09:16:36 +08:00
|
|
|
|
|
|
|
case ARM::LDMIA_RET:
|
|
|
|
case ARM::LDMIA:
|
|
|
|
case ARM::LDMDA:
|
|
|
|
case ARM::LDMDB:
|
|
|
|
case ARM::LDMIB:
|
|
|
|
case ARM::LDMIA_UPD:
|
|
|
|
case ARM::LDMDA_UPD:
|
|
|
|
case ARM::LDMDB_UPD:
|
|
|
|
case ARM::LDMIB_UPD:
|
|
|
|
case ARM::STMIA:
|
|
|
|
case ARM::STMDA:
|
|
|
|
case ARM::STMDB:
|
|
|
|
case ARM::STMIB:
|
|
|
|
case ARM::STMIA_UPD:
|
|
|
|
case ARM::STMDA_UPD:
|
|
|
|
case ARM::STMDB_UPD:
|
|
|
|
case ARM::STMIB_UPD:
|
|
|
|
case ARM::tLDMIA:
|
|
|
|
case ARM::tLDMIA_UPD:
|
|
|
|
case ARM::tSTMIA_UPD:
|
2010-09-10 02:18:55 +08:00
|
|
|
case ARM::tPOP_RET:
|
|
|
|
case ARM::tPOP:
|
|
|
|
case ARM::tPUSH:
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::t2LDMIA_RET:
|
|
|
|
case ARM::t2LDMIA:
|
|
|
|
case ARM::t2LDMDB:
|
|
|
|
case ARM::t2LDMIA_UPD:
|
|
|
|
case ARM::t2LDMDB_UPD:
|
|
|
|
case ARM::t2STMIA:
|
|
|
|
case ARM::t2STMDB:
|
|
|
|
case ARM::t2STMIA_UPD:
|
|
|
|
case ARM::t2STMDB_UPD: {
|
2010-09-10 09:29:16 +08:00
|
|
|
unsigned NumRegs = MI->getNumOperands() - Desc.getNumOperands() + 1;
|
2012-09-30 05:43:49 +08:00
|
|
|
if (Subtarget.isSwift()) {
|
|
|
|
int UOps = 1 + NumRegs; // One for address computation, one for each ld / st.
|
|
|
|
switch (Opc) {
|
|
|
|
default: break;
|
|
|
|
case ARM::VLDMDIA_UPD:
|
|
|
|
case ARM::VLDMDDB_UPD:
|
|
|
|
case ARM::VLDMSIA_UPD:
|
|
|
|
case ARM::VLDMSDB_UPD:
|
|
|
|
case ARM::VSTMDIA_UPD:
|
|
|
|
case ARM::VSTMDDB_UPD:
|
|
|
|
case ARM::VSTMSIA_UPD:
|
|
|
|
case ARM::VSTMSDB_UPD:
|
|
|
|
case ARM::LDMIA_UPD:
|
|
|
|
case ARM::LDMDA_UPD:
|
|
|
|
case ARM::LDMDB_UPD:
|
|
|
|
case ARM::LDMIB_UPD:
|
|
|
|
case ARM::STMIA_UPD:
|
|
|
|
case ARM::STMDA_UPD:
|
|
|
|
case ARM::STMDB_UPD:
|
|
|
|
case ARM::STMIB_UPD:
|
|
|
|
case ARM::tLDMIA_UPD:
|
|
|
|
case ARM::tSTMIA_UPD:
|
|
|
|
case ARM::t2LDMIA_UPD:
|
|
|
|
case ARM::t2LDMDB_UPD:
|
|
|
|
case ARM::t2STMIA_UPD:
|
|
|
|
case ARM::t2STMDB_UPD:
|
|
|
|
++UOps; // One for base register writeback.
|
|
|
|
break;
|
|
|
|
case ARM::LDMIA_RET:
|
|
|
|
case ARM::tPOP_RET:
|
|
|
|
case ARM::t2LDMIA_RET:
|
|
|
|
UOps += 2; // One for base reg wb, one for write to pc.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return UOps;
|
|
|
|
} else if (Subtarget.isCortexA8()) {
|
2010-11-03 08:45:17 +08:00
|
|
|
if (NumRegs < 4)
|
|
|
|
return 2;
|
|
|
|
// 4 registers would be issued: 2, 2.
|
|
|
|
// 5 registers would be issued: 2, 2, 1.
|
2012-07-03 02:10:42 +08:00
|
|
|
int A8UOps = (NumRegs / 2);
|
2010-11-03 08:45:17 +08:00
|
|
|
if (NumRegs % 2)
|
2012-07-03 02:10:42 +08:00
|
|
|
++A8UOps;
|
|
|
|
return A8UOps;
|
2012-09-30 05:43:49 +08:00
|
|
|
} else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
|
2012-07-03 02:10:42 +08:00
|
|
|
int A9UOps = (NumRegs / 2);
|
2010-09-10 09:29:16 +08:00
|
|
|
// If there are odd number of registers or if it's not 64-bit aligned,
|
|
|
|
// then it takes an extra AGU (Address Generation Unit) cycle.
|
|
|
|
if ((NumRegs % 2) ||
|
|
|
|
!MI->hasOneMemOperand() ||
|
|
|
|
(*MI->memoperands_begin())->getAlignment() < 8)
|
2012-07-03 02:10:42 +08:00
|
|
|
++A9UOps;
|
|
|
|
return A9UOps;
|
2010-09-10 09:29:16 +08:00
|
|
|
} else {
|
|
|
|
// Assume the worst.
|
|
|
|
return NumRegs;
|
2010-10-05 14:00:33 +08:00
|
|
|
}
|
2010-09-10 02:18:55 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-10-06 14:27:31 +08:00
|
|
|
|
2010-10-08 07:12:15 +08:00
|
|
|
int
|
|
|
|
ARMBaseInstrInfo::getVLDMDefCycle(const InstrItineraryData *ItinData,
|
2011-06-29 03:10:37 +08:00
|
|
|
const MCInstrDesc &DefMCID,
|
2010-10-08 07:12:15 +08:00
|
|
|
unsigned DefClass,
|
|
|
|
unsigned DefIdx, unsigned DefAlign) const {
|
2011-06-29 03:10:37 +08:00
|
|
|
int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1;
|
2010-10-08 07:12:15 +08:00
|
|
|
if (RegNo <= 0)
|
|
|
|
// Def is the address writeback.
|
|
|
|
return ItinData->getOperandCycle(DefClass, DefIdx);
|
|
|
|
|
|
|
|
int DefCycle;
|
|
|
|
if (Subtarget.isCortexA8()) {
|
|
|
|
// (regno / 2) + (regno % 2) + 1
|
|
|
|
DefCycle = RegNo / 2 + 1;
|
|
|
|
if (RegNo % 2)
|
|
|
|
++DefCycle;
|
2012-09-30 05:43:49 +08:00
|
|
|
} else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
|
2010-10-08 07:12:15 +08:00
|
|
|
DefCycle = RegNo;
|
|
|
|
bool isSLoad = false;
|
2010-11-16 09:16:36 +08:00
|
|
|
|
2011-06-29 03:10:37 +08:00
|
|
|
switch (DefMCID.getOpcode()) {
|
2010-10-08 07:12:15 +08:00
|
|
|
default: break;
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::VLDMSIA:
|
|
|
|
case ARM::VLDMSIA_UPD:
|
|
|
|
case ARM::VLDMSDB_UPD:
|
2010-10-08 07:12:15 +08:00
|
|
|
isSLoad = true;
|
|
|
|
break;
|
|
|
|
}
|
2010-11-16 09:16:36 +08:00
|
|
|
|
2010-10-08 07:12:15 +08:00
|
|
|
// If there are odd number of 'S' registers or if it's not 64-bit aligned,
|
|
|
|
// then it takes an extra cycle.
|
|
|
|
if ((isSLoad && (RegNo % 2)) || DefAlign < 8)
|
|
|
|
++DefCycle;
|
|
|
|
} else {
|
|
|
|
// Assume the worst.
|
|
|
|
DefCycle = RegNo + 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
return DefCycle;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ARMBaseInstrInfo::getLDMDefCycle(const InstrItineraryData *ItinData,
|
2011-06-29 03:10:37 +08:00
|
|
|
const MCInstrDesc &DefMCID,
|
2010-10-08 07:12:15 +08:00
|
|
|
unsigned DefClass,
|
|
|
|
unsigned DefIdx, unsigned DefAlign) const {
|
2011-06-29 03:10:37 +08:00
|
|
|
int RegNo = (int)(DefIdx+1) - DefMCID.getNumOperands() + 1;
|
2010-10-08 07:12:15 +08:00
|
|
|
if (RegNo <= 0)
|
|
|
|
// Def is the address writeback.
|
|
|
|
return ItinData->getOperandCycle(DefClass, DefIdx);
|
|
|
|
|
|
|
|
int DefCycle;
|
|
|
|
if (Subtarget.isCortexA8()) {
|
|
|
|
// 4 registers would be issued: 1, 2, 1.
|
|
|
|
// 5 registers would be issued: 1, 2, 2.
|
|
|
|
DefCycle = RegNo / 2;
|
|
|
|
if (DefCycle < 1)
|
|
|
|
DefCycle = 1;
|
|
|
|
// Result latency is issue cycle + 2: E2.
|
|
|
|
DefCycle += 2;
|
2012-09-30 05:43:49 +08:00
|
|
|
} else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
|
2010-10-08 07:12:15 +08:00
|
|
|
DefCycle = (RegNo / 2);
|
|
|
|
// If there are odd number of registers or if it's not 64-bit aligned,
|
|
|
|
// then it takes an extra AGU (Address Generation Unit) cycle.
|
|
|
|
if ((RegNo % 2) || DefAlign < 8)
|
|
|
|
++DefCycle;
|
|
|
|
// Result latency is AGU cycles + 2.
|
|
|
|
DefCycle += 2;
|
|
|
|
} else {
|
|
|
|
// Assume the worst.
|
|
|
|
DefCycle = RegNo + 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
return DefCycle;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ARMBaseInstrInfo::getVSTMUseCycle(const InstrItineraryData *ItinData,
|
2011-06-29 03:10:37 +08:00
|
|
|
const MCInstrDesc &UseMCID,
|
2010-10-08 07:12:15 +08:00
|
|
|
unsigned UseClass,
|
|
|
|
unsigned UseIdx, unsigned UseAlign) const {
|
2011-06-29 03:10:37 +08:00
|
|
|
int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1;
|
2010-10-08 07:12:15 +08:00
|
|
|
if (RegNo <= 0)
|
|
|
|
return ItinData->getOperandCycle(UseClass, UseIdx);
|
|
|
|
|
|
|
|
int UseCycle;
|
|
|
|
if (Subtarget.isCortexA8()) {
|
|
|
|
// (regno / 2) + (regno % 2) + 1
|
|
|
|
UseCycle = RegNo / 2 + 1;
|
|
|
|
if (RegNo % 2)
|
|
|
|
++UseCycle;
|
2012-09-30 05:43:49 +08:00
|
|
|
} else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
|
2010-10-08 07:12:15 +08:00
|
|
|
UseCycle = RegNo;
|
|
|
|
bool isSStore = false;
|
2010-11-16 09:16:36 +08:00
|
|
|
|
2011-06-29 03:10:37 +08:00
|
|
|
switch (UseMCID.getOpcode()) {
|
2010-10-08 07:12:15 +08:00
|
|
|
default: break;
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::VSTMSIA:
|
|
|
|
case ARM::VSTMSIA_UPD:
|
|
|
|
case ARM::VSTMSDB_UPD:
|
2010-10-08 07:12:15 +08:00
|
|
|
isSStore = true;
|
|
|
|
break;
|
|
|
|
}
|
2010-11-16 09:16:36 +08:00
|
|
|
|
2010-10-08 07:12:15 +08:00
|
|
|
// If there are odd number of 'S' registers or if it's not 64-bit aligned,
|
|
|
|
// then it takes an extra cycle.
|
|
|
|
if ((isSStore && (RegNo % 2)) || UseAlign < 8)
|
|
|
|
++UseCycle;
|
|
|
|
} else {
|
|
|
|
// Assume the worst.
|
|
|
|
UseCycle = RegNo + 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
return UseCycle;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ARMBaseInstrInfo::getSTMUseCycle(const InstrItineraryData *ItinData,
|
2011-06-29 03:10:37 +08:00
|
|
|
const MCInstrDesc &UseMCID,
|
2010-10-08 07:12:15 +08:00
|
|
|
unsigned UseClass,
|
|
|
|
unsigned UseIdx, unsigned UseAlign) const {
|
2011-06-29 03:10:37 +08:00
|
|
|
int RegNo = (int)(UseIdx+1) - UseMCID.getNumOperands() + 1;
|
2010-10-08 07:12:15 +08:00
|
|
|
if (RegNo <= 0)
|
|
|
|
return ItinData->getOperandCycle(UseClass, UseIdx);
|
|
|
|
|
|
|
|
int UseCycle;
|
|
|
|
if (Subtarget.isCortexA8()) {
|
|
|
|
UseCycle = RegNo / 2;
|
|
|
|
if (UseCycle < 2)
|
|
|
|
UseCycle = 2;
|
|
|
|
// Read in E3.
|
|
|
|
UseCycle += 2;
|
2012-09-30 05:43:49 +08:00
|
|
|
} else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
|
2010-10-08 07:12:15 +08:00
|
|
|
UseCycle = (RegNo / 2);
|
|
|
|
// If there are odd number of registers or if it's not 64-bit aligned,
|
|
|
|
// then it takes an extra AGU (Address Generation Unit) cycle.
|
|
|
|
if ((RegNo % 2) || UseAlign < 8)
|
|
|
|
++UseCycle;
|
|
|
|
} else {
|
|
|
|
// Assume the worst.
|
|
|
|
UseCycle = 1;
|
|
|
|
}
|
|
|
|
return UseCycle;
|
|
|
|
}
|
|
|
|
|
2010-10-06 14:27:31 +08:00
|
|
|
int
|
|
|
|
ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
|
2011-06-29 03:10:37 +08:00
|
|
|
const MCInstrDesc &DefMCID,
|
2010-10-06 14:27:31 +08:00
|
|
|
unsigned DefIdx, unsigned DefAlign,
|
2011-06-29 03:10:37 +08:00
|
|
|
const MCInstrDesc &UseMCID,
|
2010-10-06 14:27:31 +08:00
|
|
|
unsigned UseIdx, unsigned UseAlign) const {
|
2011-06-29 03:10:37 +08:00
|
|
|
unsigned DefClass = DefMCID.getSchedClass();
|
|
|
|
unsigned UseClass = UseMCID.getSchedClass();
|
2010-10-06 14:27:31 +08:00
|
|
|
|
2011-06-29 03:10:37 +08:00
|
|
|
if (DefIdx < DefMCID.getNumDefs() && UseIdx < UseMCID.getNumOperands())
|
2010-10-06 14:27:31 +08:00
|
|
|
return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
|
|
|
|
|
|
|
|
// This may be a def / use of a variable_ops instruction, the operand
|
|
|
|
// latency might be determinable dynamically. Let the target try to
|
|
|
|
// figure it out.
|
2010-10-28 10:00:25 +08:00
|
|
|
int DefCycle = -1;
|
2010-10-28 14:47:08 +08:00
|
|
|
bool LdmBypass = false;
|
2011-06-29 03:10:37 +08:00
|
|
|
switch (DefMCID.getOpcode()) {
|
2010-10-06 14:27:31 +08:00
|
|
|
default:
|
|
|
|
DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
|
|
|
|
break;
|
2010-11-16 09:16:36 +08:00
|
|
|
|
|
|
|
case ARM::VLDMDIA:
|
|
|
|
case ARM::VLDMDIA_UPD:
|
|
|
|
case ARM::VLDMDDB_UPD:
|
|
|
|
case ARM::VLDMSIA:
|
|
|
|
case ARM::VLDMSIA_UPD:
|
|
|
|
case ARM::VLDMSDB_UPD:
|
2011-06-29 03:10:37 +08:00
|
|
|
DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
|
2010-10-07 09:50:48 +08:00
|
|
|
break;
|
2010-11-16 09:16:36 +08:00
|
|
|
|
|
|
|
case ARM::LDMIA_RET:
|
|
|
|
case ARM::LDMIA:
|
|
|
|
case ARM::LDMDA:
|
|
|
|
case ARM::LDMDB:
|
|
|
|
case ARM::LDMIB:
|
|
|
|
case ARM::LDMIA_UPD:
|
|
|
|
case ARM::LDMDA_UPD:
|
|
|
|
case ARM::LDMDB_UPD:
|
|
|
|
case ARM::LDMIB_UPD:
|
|
|
|
case ARM::tLDMIA:
|
|
|
|
case ARM::tLDMIA_UPD:
|
2010-10-06 14:27:31 +08:00
|
|
|
case ARM::tPUSH:
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::t2LDMIA_RET:
|
|
|
|
case ARM::t2LDMIA:
|
|
|
|
case ARM::t2LDMDB:
|
|
|
|
case ARM::t2LDMIA_UPD:
|
|
|
|
case ARM::t2LDMDB_UPD:
|
2010-10-06 14:27:31 +08:00
|
|
|
LdmBypass = 1;
|
2011-06-29 03:10:37 +08:00
|
|
|
DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
|
2010-10-08 07:12:15 +08:00
|
|
|
break;
|
2010-10-06 14:27:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (DefCycle == -1)
|
|
|
|
// We can't seem to determine the result latency of the def, assume it's 2.
|
|
|
|
DefCycle = 2;
|
|
|
|
|
|
|
|
int UseCycle = -1;
|
2011-06-29 03:10:37 +08:00
|
|
|
switch (UseMCID.getOpcode()) {
|
2010-10-06 14:27:31 +08:00
|
|
|
default:
|
|
|
|
UseCycle = ItinData->getOperandCycle(UseClass, UseIdx);
|
|
|
|
break;
|
2010-11-16 09:16:36 +08:00
|
|
|
|
|
|
|
case ARM::VSTMDIA:
|
|
|
|
case ARM::VSTMDIA_UPD:
|
|
|
|
case ARM::VSTMDDB_UPD:
|
|
|
|
case ARM::VSTMSIA:
|
|
|
|
case ARM::VSTMSIA_UPD:
|
|
|
|
case ARM::VSTMSDB_UPD:
|
2011-06-29 03:10:37 +08:00
|
|
|
UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
|
2010-10-07 09:50:48 +08:00
|
|
|
break;
|
2010-11-16 09:16:36 +08:00
|
|
|
|
|
|
|
case ARM::STMIA:
|
|
|
|
case ARM::STMDA:
|
|
|
|
case ARM::STMDB:
|
|
|
|
case ARM::STMIB:
|
|
|
|
case ARM::STMIA_UPD:
|
|
|
|
case ARM::STMDA_UPD:
|
|
|
|
case ARM::STMDB_UPD:
|
|
|
|
case ARM::STMIB_UPD:
|
|
|
|
case ARM::tSTMIA_UPD:
|
2010-10-06 14:27:31 +08:00
|
|
|
case ARM::tPOP_RET:
|
|
|
|
case ARM::tPOP:
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::t2STMIA:
|
|
|
|
case ARM::t2STMDB:
|
|
|
|
case ARM::t2STMIA_UPD:
|
|
|
|
case ARM::t2STMDB_UPD:
|
2011-06-29 03:10:37 +08:00
|
|
|
UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
|
2010-10-07 09:50:48 +08:00
|
|
|
break;
|
2010-10-06 14:27:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (UseCycle == -1)
|
|
|
|
// Assume it's read in the first stage.
|
|
|
|
UseCycle = 1;
|
|
|
|
|
|
|
|
UseCycle = DefCycle - UseCycle + 1;
|
|
|
|
if (UseCycle > 0) {
|
|
|
|
if (LdmBypass) {
|
|
|
|
// It's a variable_ops instruction so we can't use DefIdx here. Just use
|
|
|
|
// first def operand.
|
2011-06-29 03:10:37 +08:00
|
|
|
if (ItinData->hasPipelineForwarding(DefClass, DefMCID.getNumOperands()-1,
|
2010-10-06 14:27:31 +08:00
|
|
|
UseClass, UseIdx))
|
|
|
|
--UseCycle;
|
|
|
|
} else if (ItinData->hasPipelineForwarding(DefClass, DefIdx,
|
2010-11-16 09:16:36 +08:00
|
|
|
UseClass, UseIdx)) {
|
2010-10-06 14:27:31 +08:00
|
|
|
--UseCycle;
|
2010-11-16 09:16:36 +08:00
|
|
|
}
|
2010-10-06 14:27:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return UseCycle;
|
|
|
|
}
|
|
|
|
|
2011-12-14 10:11:42 +08:00
|
|
|
static const MachineInstr *getBundledDefMI(const TargetRegisterInfo *TRI,
|
2011-12-15 04:00:08 +08:00
|
|
|
const MachineInstr *MI, unsigned Reg,
|
2011-12-14 10:11:42 +08:00
|
|
|
unsigned &DefIdx, unsigned &Dist) {
|
|
|
|
Dist = 0;
|
|
|
|
|
|
|
|
MachineBasicBlock::const_iterator I = MI; ++I;
|
2014-03-02 20:27:27 +08:00
|
|
|
MachineBasicBlock::const_instr_iterator II = std::prev(I.getInstrIterator());
|
2011-12-14 10:11:42 +08:00
|
|
|
assert(II->isInsideBundle() && "Empty bundle?");
|
|
|
|
|
|
|
|
int Idx = -1;
|
|
|
|
while (II->isInsideBundle()) {
|
|
|
|
Idx = II->findRegisterDefOperandIdx(Reg, false, true, TRI);
|
|
|
|
if (Idx != -1)
|
|
|
|
break;
|
|
|
|
--II;
|
|
|
|
++Dist;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(Idx != -1 && "Cannot find bundled definition!");
|
|
|
|
DefIdx = Idx;
|
|
|
|
return II;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const MachineInstr *getBundledUseMI(const TargetRegisterInfo *TRI,
|
2011-12-15 04:00:08 +08:00
|
|
|
const MachineInstr *MI, unsigned Reg,
|
2011-12-14 10:11:42 +08:00
|
|
|
unsigned &UseIdx, unsigned &Dist) {
|
|
|
|
Dist = 0;
|
|
|
|
|
|
|
|
MachineBasicBlock::const_instr_iterator II = MI; ++II;
|
|
|
|
assert(II->isInsideBundle() && "Empty bundle?");
|
|
|
|
MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
|
|
|
|
|
|
|
|
// FIXME: This doesn't properly handle multiple uses.
|
|
|
|
int Idx = -1;
|
|
|
|
while (II != E && II->isInsideBundle()) {
|
|
|
|
Idx = II->findRegisterUseOperandIdx(Reg, false, TRI);
|
|
|
|
if (Idx != -1)
|
|
|
|
break;
|
|
|
|
if (II->getOpcode() != ARM::t2IT)
|
|
|
|
++Dist;
|
|
|
|
++II;
|
|
|
|
}
|
|
|
|
|
2011-12-15 04:00:08 +08:00
|
|
|
if (Idx == -1) {
|
|
|
|
Dist = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-12-14 10:11:42 +08:00
|
|
|
UseIdx = Idx;
|
|
|
|
return II;
|
|
|
|
}
|
|
|
|
|
2012-06-08 03:42:00 +08:00
|
|
|
/// Return the number of cycles to add to (or subtract from) the static
|
|
|
|
/// itinerary based on the def opcode and alignment. The caller will ensure that
|
|
|
|
/// adjusted latency is at least one cycle.
|
|
|
|
static int adjustDefLatency(const ARMSubtarget &Subtarget,
|
|
|
|
const MachineInstr *DefMI,
|
|
|
|
const MCInstrDesc *DefMCID, unsigned DefAlign) {
|
|
|
|
int Adjust = 0;
|
2012-09-13 23:05:10 +08:00
|
|
|
if (Subtarget.isCortexA8() || Subtarget.isLikeA9()) {
|
2010-10-28 14:47:08 +08:00
|
|
|
// FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
|
|
|
|
// variants are one cycle cheaper.
|
2011-12-14 10:11:42 +08:00
|
|
|
switch (DefMCID->getOpcode()) {
|
2010-10-28 14:47:08 +08:00
|
|
|
default: break;
|
2012-08-28 11:11:27 +08:00
|
|
|
case ARM::LDRrs:
|
|
|
|
case ARM::LDRBrs: {
|
2010-10-28 14:47:08 +08:00
|
|
|
unsigned ShOpVal = DefMI->getOperand(3).getImm();
|
|
|
|
unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
|
|
|
|
if (ShImm == 0 ||
|
|
|
|
(ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))
|
2012-06-08 03:42:00 +08:00
|
|
|
--Adjust;
|
2010-10-28 14:47:08 +08:00
|
|
|
break;
|
|
|
|
}
|
2012-08-28 11:11:27 +08:00
|
|
|
case ARM::t2LDRs:
|
|
|
|
case ARM::t2LDRBs:
|
|
|
|
case ARM::t2LDRHs:
|
2010-10-28 14:47:08 +08:00
|
|
|
case ARM::t2LDRSHs: {
|
|
|
|
// Thumb2 mode: lsl only.
|
|
|
|
unsigned ShAmt = DefMI->getOperand(3).getImm();
|
|
|
|
if (ShAmt == 0 || ShAmt == 2)
|
2012-06-08 03:42:00 +08:00
|
|
|
--Adjust;
|
2010-10-28 14:47:08 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2012-09-30 05:43:49 +08:00
|
|
|
} else if (Subtarget.isSwift()) {
|
|
|
|
// FIXME: Properly handle all of the latency adjustments for address
|
|
|
|
// writeback.
|
|
|
|
switch (DefMCID->getOpcode()) {
|
|
|
|
default: break;
|
|
|
|
case ARM::LDRrs:
|
|
|
|
case ARM::LDRBrs: {
|
|
|
|
unsigned ShOpVal = DefMI->getOperand(3).getImm();
|
|
|
|
bool isSub = ARM_AM::getAM2Op(ShOpVal) == ARM_AM::sub;
|
|
|
|
unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
|
|
|
|
if (!isSub &&
|
|
|
|
(ShImm == 0 ||
|
|
|
|
((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
|
|
|
|
ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl)))
|
|
|
|
Adjust -= 2;
|
|
|
|
else if (!isSub &&
|
|
|
|
ShImm == 1 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsr)
|
|
|
|
--Adjust;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case ARM::t2LDRs:
|
|
|
|
case ARM::t2LDRBs:
|
|
|
|
case ARM::t2LDRHs:
|
|
|
|
case ARM::t2LDRSHs: {
|
|
|
|
// Thumb2 mode: lsl only.
|
|
|
|
unsigned ShAmt = DefMI->getOperand(3).getImm();
|
|
|
|
if (ShAmt == 0 || ShAmt == 1 || ShAmt == 2 || ShAmt == 3)
|
|
|
|
Adjust -= 2;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2010-10-28 14:47:08 +08:00
|
|
|
}
|
|
|
|
|
2012-09-13 23:05:10 +08:00
|
|
|
if (DefAlign < 8 && Subtarget.isLikeA9()) {
|
2011-12-14 10:11:42 +08:00
|
|
|
switch (DefMCID->getOpcode()) {
|
2011-04-19 09:21:49 +08:00
|
|
|
default: break;
|
|
|
|
case ARM::VLD1q8:
|
|
|
|
case ARM::VLD1q16:
|
|
|
|
case ARM::VLD1q32:
|
|
|
|
case ARM::VLD1q64:
|
2011-10-25 05:45:13 +08:00
|
|
|
case ARM::VLD1q8wb_fixed:
|
|
|
|
case ARM::VLD1q16wb_fixed:
|
|
|
|
case ARM::VLD1q32wb_fixed:
|
|
|
|
case ARM::VLD1q64wb_fixed:
|
|
|
|
case ARM::VLD1q8wb_register:
|
|
|
|
case ARM::VLD1q16wb_register:
|
|
|
|
case ARM::VLD1q32wb_register:
|
|
|
|
case ARM::VLD1q64wb_register:
|
2011-04-19 09:21:49 +08:00
|
|
|
case ARM::VLD2d8:
|
|
|
|
case ARM::VLD2d16:
|
|
|
|
case ARM::VLD2d32:
|
|
|
|
case ARM::VLD2q8:
|
|
|
|
case ARM::VLD2q16:
|
|
|
|
case ARM::VLD2q32:
|
2011-12-10 05:28:25 +08:00
|
|
|
case ARM::VLD2d8wb_fixed:
|
|
|
|
case ARM::VLD2d16wb_fixed:
|
|
|
|
case ARM::VLD2d32wb_fixed:
|
|
|
|
case ARM::VLD2q8wb_fixed:
|
|
|
|
case ARM::VLD2q16wb_fixed:
|
|
|
|
case ARM::VLD2q32wb_fixed:
|
|
|
|
case ARM::VLD2d8wb_register:
|
|
|
|
case ARM::VLD2d16wb_register:
|
|
|
|
case ARM::VLD2d32wb_register:
|
|
|
|
case ARM::VLD2q8wb_register:
|
|
|
|
case ARM::VLD2q16wb_register:
|
|
|
|
case ARM::VLD2q32wb_register:
|
2011-04-19 09:21:49 +08:00
|
|
|
case ARM::VLD3d8:
|
|
|
|
case ARM::VLD3d16:
|
|
|
|
case ARM::VLD3d32:
|
|
|
|
case ARM::VLD1d64T:
|
|
|
|
case ARM::VLD3d8_UPD:
|
|
|
|
case ARM::VLD3d16_UPD:
|
|
|
|
case ARM::VLD3d32_UPD:
|
2011-10-25 07:26:05 +08:00
|
|
|
case ARM::VLD1d64Twb_fixed:
|
|
|
|
case ARM::VLD1d64Twb_register:
|
2011-04-19 09:21:49 +08:00
|
|
|
case ARM::VLD3q8_UPD:
|
|
|
|
case ARM::VLD3q16_UPD:
|
|
|
|
case ARM::VLD3q32_UPD:
|
|
|
|
case ARM::VLD4d8:
|
|
|
|
case ARM::VLD4d16:
|
|
|
|
case ARM::VLD4d32:
|
|
|
|
case ARM::VLD1d64Q:
|
|
|
|
case ARM::VLD4d8_UPD:
|
|
|
|
case ARM::VLD4d16_UPD:
|
|
|
|
case ARM::VLD4d32_UPD:
|
2011-10-25 08:14:01 +08:00
|
|
|
case ARM::VLD1d64Qwb_fixed:
|
|
|
|
case ARM::VLD1d64Qwb_register:
|
2011-04-19 09:21:49 +08:00
|
|
|
case ARM::VLD4q8_UPD:
|
|
|
|
case ARM::VLD4q16_UPD:
|
|
|
|
case ARM::VLD4q32_UPD:
|
|
|
|
case ARM::VLD1DUPq8:
|
|
|
|
case ARM::VLD1DUPq16:
|
|
|
|
case ARM::VLD1DUPq32:
|
2011-12-01 03:35:44 +08:00
|
|
|
case ARM::VLD1DUPq8wb_fixed:
|
|
|
|
case ARM::VLD1DUPq16wb_fixed:
|
|
|
|
case ARM::VLD1DUPq32wb_fixed:
|
|
|
|
case ARM::VLD1DUPq8wb_register:
|
|
|
|
case ARM::VLD1DUPq16wb_register:
|
|
|
|
case ARM::VLD1DUPq32wb_register:
|
2011-04-19 09:21:49 +08:00
|
|
|
case ARM::VLD2DUPd8:
|
|
|
|
case ARM::VLD2DUPd16:
|
|
|
|
case ARM::VLD2DUPd32:
|
2011-12-22 03:40:55 +08:00
|
|
|
case ARM::VLD2DUPd8wb_fixed:
|
|
|
|
case ARM::VLD2DUPd16wb_fixed:
|
|
|
|
case ARM::VLD2DUPd32wb_fixed:
|
|
|
|
case ARM::VLD2DUPd8wb_register:
|
|
|
|
case ARM::VLD2DUPd16wb_register:
|
|
|
|
case ARM::VLD2DUPd32wb_register:
|
2011-04-19 09:21:49 +08:00
|
|
|
case ARM::VLD4DUPd8:
|
|
|
|
case ARM::VLD4DUPd16:
|
|
|
|
case ARM::VLD4DUPd32:
|
|
|
|
case ARM::VLD4DUPd8_UPD:
|
|
|
|
case ARM::VLD4DUPd16_UPD:
|
|
|
|
case ARM::VLD4DUPd32_UPD:
|
|
|
|
case ARM::VLD1LNd8:
|
|
|
|
case ARM::VLD1LNd16:
|
|
|
|
case ARM::VLD1LNd32:
|
|
|
|
case ARM::VLD1LNd8_UPD:
|
|
|
|
case ARM::VLD1LNd16_UPD:
|
|
|
|
case ARM::VLD1LNd32_UPD:
|
|
|
|
case ARM::VLD2LNd8:
|
|
|
|
case ARM::VLD2LNd16:
|
|
|
|
case ARM::VLD2LNd32:
|
|
|
|
case ARM::VLD2LNq16:
|
|
|
|
case ARM::VLD2LNq32:
|
|
|
|
case ARM::VLD2LNd8_UPD:
|
|
|
|
case ARM::VLD2LNd16_UPD:
|
|
|
|
case ARM::VLD2LNd32_UPD:
|
|
|
|
case ARM::VLD2LNq16_UPD:
|
|
|
|
case ARM::VLD2LNq32_UPD:
|
|
|
|
case ARM::VLD4LNd8:
|
|
|
|
case ARM::VLD4LNd16:
|
|
|
|
case ARM::VLD4LNd32:
|
|
|
|
case ARM::VLD4LNq16:
|
|
|
|
case ARM::VLD4LNq32:
|
|
|
|
case ARM::VLD4LNd8_UPD:
|
|
|
|
case ARM::VLD4LNd16_UPD:
|
|
|
|
case ARM::VLD4LNd32_UPD:
|
|
|
|
case ARM::VLD4LNq16_UPD:
|
|
|
|
case ARM::VLD4LNq32_UPD:
|
|
|
|
// If the address is not 64-bit aligned, the latencies of these
|
|
|
|
// instructions increases by one.
|
2012-06-08 03:42:00 +08:00
|
|
|
++Adjust;
|
2011-04-19 09:21:49 +08:00
|
|
|
break;
|
|
|
|
}
|
2012-06-08 03:42:00 +08:00
|
|
|
}
|
|
|
|
return Adjust;
|
|
|
|
}
|
|
|
|
|
2011-04-19 09:21:49 +08:00
|
|
|
|
2012-06-08 03:42:00 +08:00
|
|
|
|
|
|
|
int
|
|
|
|
ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
|
|
|
|
const MachineInstr *DefMI, unsigned DefIdx,
|
|
|
|
const MachineInstr *UseMI,
|
|
|
|
unsigned UseIdx) const {
|
|
|
|
// No operand latency. The caller may fall back to getInstrLatency.
|
|
|
|
if (!ItinData || ItinData->isEmpty())
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
const MachineOperand &DefMO = DefMI->getOperand(DefIdx);
|
|
|
|
unsigned Reg = DefMO.getReg();
|
|
|
|
const MCInstrDesc *DefMCID = &DefMI->getDesc();
|
|
|
|
const MCInstrDesc *UseMCID = &UseMI->getDesc();
|
|
|
|
|
|
|
|
unsigned DefAdj = 0;
|
|
|
|
if (DefMI->isBundle()) {
|
|
|
|
DefMI = getBundledDefMI(&getRegisterInfo(), DefMI, Reg, DefIdx, DefAdj);
|
|
|
|
DefMCID = &DefMI->getDesc();
|
|
|
|
}
|
|
|
|
if (DefMI->isCopyLike() || DefMI->isInsertSubreg() ||
|
|
|
|
DefMI->isRegSequence() || DefMI->isImplicitDef()) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned UseAdj = 0;
|
|
|
|
if (UseMI->isBundle()) {
|
|
|
|
unsigned NewUseIdx;
|
|
|
|
const MachineInstr *NewUseMI = getBundledUseMI(&getRegisterInfo(), UseMI,
|
|
|
|
Reg, NewUseIdx, UseAdj);
|
2012-06-22 10:50:33 +08:00
|
|
|
if (!NewUseMI)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
UseMI = NewUseMI;
|
|
|
|
UseIdx = NewUseIdx;
|
|
|
|
UseMCID = &UseMI->getDesc();
|
2012-06-08 03:42:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (Reg == ARM::CPSR) {
|
|
|
|
if (DefMI->getOpcode() == ARM::FMSTAT) {
|
|
|
|
// fpscr -> cpsr stalls over 20 cycles on A8 (and earlier?)
|
2012-09-13 23:05:10 +08:00
|
|
|
return Subtarget.isLikeA9() ? 1 : 20;
|
2012-06-08 03:42:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// CPSR set and branch can be paired in the same cycle.
|
|
|
|
if (UseMI->isBranch())
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
// Otherwise it takes the instruction latency (generally one).
|
|
|
|
unsigned Latency = getInstrLatency(ItinData, DefMI);
|
|
|
|
|
|
|
|
// For Thumb2 and -Os, prefer scheduling CPSR setting instruction close to
|
|
|
|
// its uses. Instructions which are otherwise scheduled between them may
|
|
|
|
// incur a code size penalty (not able to use the CPSR setting 16-bit
|
|
|
|
// instructions).
|
|
|
|
if (Latency > 0 && Subtarget.isThumb2()) {
|
|
|
|
const MachineFunction *MF = DefMI->getParent()->getParent();
|
2012-12-30 18:32:01 +08:00
|
|
|
if (MF->getFunction()->getAttributes().
|
|
|
|
hasAttribute(AttributeSet::FunctionIndex,
|
|
|
|
Attribute::OptimizeForSize))
|
2012-06-08 03:42:00 +08:00
|
|
|
--Latency;
|
|
|
|
}
|
|
|
|
return Latency;
|
|
|
|
}
|
|
|
|
|
2012-06-22 10:50:33 +08:00
|
|
|
if (DefMO.isImplicit() || UseMI->getOperand(UseIdx).isImplicit())
|
|
|
|
return -1;
|
|
|
|
|
2012-06-08 03:42:00 +08:00
|
|
|
unsigned DefAlign = DefMI->hasOneMemOperand()
|
|
|
|
? (*DefMI->memoperands_begin())->getAlignment() : 0;
|
|
|
|
unsigned UseAlign = UseMI->hasOneMemOperand()
|
|
|
|
? (*UseMI->memoperands_begin())->getAlignment() : 0;
|
|
|
|
|
|
|
|
// Get the itinerary's latency if possible, and handle variable_ops.
|
|
|
|
int Latency = getOperandLatency(ItinData, *DefMCID, DefIdx, DefAlign,
|
|
|
|
*UseMCID, UseIdx, UseAlign);
|
|
|
|
// Unable to find operand latency. The caller may resort to getInstrLatency.
|
|
|
|
if (Latency < 0)
|
|
|
|
return Latency;
|
|
|
|
|
|
|
|
// Adjust for IT block position.
|
|
|
|
int Adj = DefAdj + UseAdj;
|
|
|
|
|
|
|
|
// Adjust for dynamic def-side opcode variants not captured by the itinerary.
|
|
|
|
Adj += adjustDefLatency(Subtarget, DefMI, DefMCID, DefAlign);
|
|
|
|
if (Adj >= 0 || (int)Latency > -Adj) {
|
|
|
|
return Latency + Adj;
|
|
|
|
}
|
|
|
|
// Return the itinerary latency, which may be zero but not less than zero.
|
2010-10-28 14:47:08 +08:00
|
|
|
return Latency;
|
2010-10-06 14:27:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
|
|
|
|
SDNode *DefNode, unsigned DefIdx,
|
|
|
|
SDNode *UseNode, unsigned UseIdx) const {
|
|
|
|
if (!DefNode->isMachineOpcode())
|
|
|
|
return 1;
|
|
|
|
|
2011-06-29 03:10:37 +08:00
|
|
|
const MCInstrDesc &DefMCID = get(DefNode->getMachineOpcode());
|
2011-01-21 13:51:33 +08:00
|
|
|
|
2011-06-29 03:10:37 +08:00
|
|
|
if (isZeroCost(DefMCID.Opcode))
|
2011-01-21 13:51:33 +08:00
|
|
|
return 0;
|
|
|
|
|
2010-10-06 14:27:31 +08:00
|
|
|
if (!ItinData || ItinData->isEmpty())
|
2011-06-29 03:10:37 +08:00
|
|
|
return DefMCID.mayLoad() ? 3 : 1;
|
2010-10-06 14:27:31 +08:00
|
|
|
|
Avoiding overly aggressive latency scheduling. If the two nodes share an
operand and one of them has a single use that is a live out copy, favor the
one that is live out. Otherwise it will be difficult to eliminate the copy
if the instruction is a loop induction variable update. e.g.
BB:
sub r1, r3, #1
str r0, [r2, r3]
mov r3, r1
cmp
bne BB
=>
BB:
str r0, [r2, r3]
sub r3, r3, #1
cmp
bne BB
This fixed the recent 256.bzip2 regression.
llvm-svn: 117675
2010-10-30 02:09:28 +08:00
|
|
|
if (!UseNode->isMachineOpcode()) {
|
2011-06-29 03:10:37 +08:00
|
|
|
int Latency = ItinData->getOperandCycle(DefMCID.getSchedClass(), DefIdx);
|
2012-09-30 05:43:49 +08:00
|
|
|
if (Subtarget.isLikeA9() || Subtarget.isSwift())
|
Avoiding overly aggressive latency scheduling. If the two nodes share an
operand and one of them has a single use that is a live out copy, favor the
one that is live out. Otherwise it will be difficult to eliminate the copy
if the instruction is a loop induction variable update. e.g.
BB:
sub r1, r3, #1
str r0, [r2, r3]
mov r3, r1
cmp
bne BB
=>
BB:
str r0, [r2, r3]
sub r3, r3, #1
cmp
bne BB
This fixed the recent 256.bzip2 regression.
llvm-svn: 117675
2010-10-30 02:09:28 +08:00
|
|
|
return Latency <= 2 ? 1 : Latency - 1;
|
|
|
|
else
|
|
|
|
return Latency <= 3 ? 1 : Latency - 2;
|
|
|
|
}
|
2010-10-06 14:27:31 +08:00
|
|
|
|
2011-06-29 03:10:37 +08:00
|
|
|
const MCInstrDesc &UseMCID = get(UseNode->getMachineOpcode());
|
2010-10-06 14:27:31 +08:00
|
|
|
const MachineSDNode *DefMN = dyn_cast<MachineSDNode>(DefNode);
|
|
|
|
unsigned DefAlign = !DefMN->memoperands_empty()
|
|
|
|
? (*DefMN->memoperands_begin())->getAlignment() : 0;
|
|
|
|
const MachineSDNode *UseMN = dyn_cast<MachineSDNode>(UseNode);
|
|
|
|
unsigned UseAlign = !UseMN->memoperands_empty()
|
|
|
|
? (*UseMN->memoperands_begin())->getAlignment() : 0;
|
2011-06-29 03:10:37 +08:00
|
|
|
int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign,
|
|
|
|
UseMCID, UseIdx, UseAlign);
|
2010-10-28 14:47:08 +08:00
|
|
|
|
|
|
|
if (Latency > 1 &&
|
2012-09-13 23:05:10 +08:00
|
|
|
(Subtarget.isCortexA8() || Subtarget.isLikeA9())) {
|
2010-10-28 14:47:08 +08:00
|
|
|
// FIXME: Shifter op hack: no shift (i.e. [r +/- r]) or [r + r << 2]
|
|
|
|
// variants are one cycle cheaper.
|
2011-06-29 03:10:37 +08:00
|
|
|
switch (DefMCID.getOpcode()) {
|
2010-10-28 14:47:08 +08:00
|
|
|
default: break;
|
2012-08-28 11:11:27 +08:00
|
|
|
case ARM::LDRrs:
|
|
|
|
case ARM::LDRBrs: {
|
2010-10-28 14:47:08 +08:00
|
|
|
unsigned ShOpVal =
|
|
|
|
cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
|
|
|
|
unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
|
|
|
|
if (ShImm == 0 ||
|
|
|
|
(ShImm == 2 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))
|
|
|
|
--Latency;
|
|
|
|
break;
|
|
|
|
}
|
2012-08-28 11:11:27 +08:00
|
|
|
case ARM::t2LDRs:
|
|
|
|
case ARM::t2LDRBs:
|
|
|
|
case ARM::t2LDRHs:
|
2010-10-28 14:47:08 +08:00
|
|
|
case ARM::t2LDRSHs: {
|
|
|
|
// Thumb2 mode: lsl only.
|
|
|
|
unsigned ShAmt =
|
|
|
|
cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
|
|
|
|
if (ShAmt == 0 || ShAmt == 2)
|
|
|
|
--Latency;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2012-09-30 05:43:49 +08:00
|
|
|
} else if (DefIdx == 0 && Latency > 2 && Subtarget.isSwift()) {
|
|
|
|
// FIXME: Properly handle all of the latency adjustments for address
|
|
|
|
// writeback.
|
|
|
|
switch (DefMCID.getOpcode()) {
|
|
|
|
default: break;
|
|
|
|
case ARM::LDRrs:
|
|
|
|
case ARM::LDRBrs: {
|
|
|
|
unsigned ShOpVal =
|
|
|
|
cast<ConstantSDNode>(DefNode->getOperand(2))->getZExtValue();
|
|
|
|
unsigned ShImm = ARM_AM::getAM2Offset(ShOpVal);
|
|
|
|
if (ShImm == 0 ||
|
|
|
|
((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
|
|
|
|
ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsl))
|
|
|
|
Latency -= 2;
|
|
|
|
else if (ShImm == 1 && ARM_AM::getAM2ShiftOpc(ShOpVal) == ARM_AM::lsr)
|
|
|
|
--Latency;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case ARM::t2LDRs:
|
|
|
|
case ARM::t2LDRBs:
|
|
|
|
case ARM::t2LDRHs:
|
|
|
|
case ARM::t2LDRSHs: {
|
|
|
|
// Thumb2 mode: lsl 0-3 only.
|
|
|
|
Latency -= 2;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2010-10-28 14:47:08 +08:00
|
|
|
}
|
|
|
|
|
2012-09-13 23:05:10 +08:00
|
|
|
if (DefAlign < 8 && Subtarget.isLikeA9())
|
2011-06-29 03:10:37 +08:00
|
|
|
switch (DefMCID.getOpcode()) {
|
2011-04-19 09:21:49 +08:00
|
|
|
default: break;
|
2012-03-06 03:33:30 +08:00
|
|
|
case ARM::VLD1q8:
|
|
|
|
case ARM::VLD1q16:
|
|
|
|
case ARM::VLD1q32:
|
|
|
|
case ARM::VLD1q64:
|
|
|
|
case ARM::VLD1q8wb_register:
|
|
|
|
case ARM::VLD1q16wb_register:
|
|
|
|
case ARM::VLD1q32wb_register:
|
|
|
|
case ARM::VLD1q64wb_register:
|
|
|
|
case ARM::VLD1q8wb_fixed:
|
|
|
|
case ARM::VLD1q16wb_fixed:
|
|
|
|
case ARM::VLD1q32wb_fixed:
|
|
|
|
case ARM::VLD1q64wb_fixed:
|
|
|
|
case ARM::VLD2d8:
|
|
|
|
case ARM::VLD2d16:
|
|
|
|
case ARM::VLD2d32:
|
2011-04-19 09:21:49 +08:00
|
|
|
case ARM::VLD2q8Pseudo:
|
|
|
|
case ARM::VLD2q16Pseudo:
|
|
|
|
case ARM::VLD2q32Pseudo:
|
2012-03-06 03:33:30 +08:00
|
|
|
case ARM::VLD2d8wb_fixed:
|
|
|
|
case ARM::VLD2d16wb_fixed:
|
|
|
|
case ARM::VLD2d32wb_fixed:
|
2011-12-10 05:28:25 +08:00
|
|
|
case ARM::VLD2q8PseudoWB_fixed:
|
|
|
|
case ARM::VLD2q16PseudoWB_fixed:
|
|
|
|
case ARM::VLD2q32PseudoWB_fixed:
|
2012-03-06 03:33:30 +08:00
|
|
|
case ARM::VLD2d8wb_register:
|
|
|
|
case ARM::VLD2d16wb_register:
|
|
|
|
case ARM::VLD2d32wb_register:
|
2011-12-10 05:28:25 +08:00
|
|
|
case ARM::VLD2q8PseudoWB_register:
|
|
|
|
case ARM::VLD2q16PseudoWB_register:
|
|
|
|
case ARM::VLD2q32PseudoWB_register:
|
2011-04-19 09:21:49 +08:00
|
|
|
case ARM::VLD3d8Pseudo:
|
|
|
|
case ARM::VLD3d16Pseudo:
|
|
|
|
case ARM::VLD3d32Pseudo:
|
|
|
|
case ARM::VLD1d64TPseudo:
|
2014-01-16 17:16:13 +08:00
|
|
|
case ARM::VLD1d64TPseudoWB_fixed:
|
2011-04-19 09:21:49 +08:00
|
|
|
case ARM::VLD3d8Pseudo_UPD:
|
|
|
|
case ARM::VLD3d16Pseudo_UPD:
|
|
|
|
case ARM::VLD3d32Pseudo_UPD:
|
|
|
|
case ARM::VLD3q8Pseudo_UPD:
|
|
|
|
case ARM::VLD3q16Pseudo_UPD:
|
|
|
|
case ARM::VLD3q32Pseudo_UPD:
|
|
|
|
case ARM::VLD3q8oddPseudo:
|
|
|
|
case ARM::VLD3q16oddPseudo:
|
|
|
|
case ARM::VLD3q32oddPseudo:
|
|
|
|
case ARM::VLD3q8oddPseudo_UPD:
|
|
|
|
case ARM::VLD3q16oddPseudo_UPD:
|
|
|
|
case ARM::VLD3q32oddPseudo_UPD:
|
|
|
|
case ARM::VLD4d8Pseudo:
|
|
|
|
case ARM::VLD4d16Pseudo:
|
|
|
|
case ARM::VLD4d32Pseudo:
|
|
|
|
case ARM::VLD1d64QPseudo:
|
2014-01-16 17:16:13 +08:00
|
|
|
case ARM::VLD1d64QPseudoWB_fixed:
|
2011-04-19 09:21:49 +08:00
|
|
|
case ARM::VLD4d8Pseudo_UPD:
|
|
|
|
case ARM::VLD4d16Pseudo_UPD:
|
|
|
|
case ARM::VLD4d32Pseudo_UPD:
|
|
|
|
case ARM::VLD4q8Pseudo_UPD:
|
|
|
|
case ARM::VLD4q16Pseudo_UPD:
|
|
|
|
case ARM::VLD4q32Pseudo_UPD:
|
|
|
|
case ARM::VLD4q8oddPseudo:
|
|
|
|
case ARM::VLD4q16oddPseudo:
|
|
|
|
case ARM::VLD4q32oddPseudo:
|
|
|
|
case ARM::VLD4q8oddPseudo_UPD:
|
|
|
|
case ARM::VLD4q16oddPseudo_UPD:
|
|
|
|
case ARM::VLD4q32oddPseudo_UPD:
|
2012-03-07 06:01:44 +08:00
|
|
|
case ARM::VLD1DUPq8:
|
|
|
|
case ARM::VLD1DUPq16:
|
|
|
|
case ARM::VLD1DUPq32:
|
|
|
|
case ARM::VLD1DUPq8wb_fixed:
|
|
|
|
case ARM::VLD1DUPq16wb_fixed:
|
|
|
|
case ARM::VLD1DUPq32wb_fixed:
|
|
|
|
case ARM::VLD1DUPq8wb_register:
|
|
|
|
case ARM::VLD1DUPq16wb_register:
|
|
|
|
case ARM::VLD1DUPq32wb_register:
|
|
|
|
case ARM::VLD2DUPd8:
|
|
|
|
case ARM::VLD2DUPd16:
|
|
|
|
case ARM::VLD2DUPd32:
|
|
|
|
case ARM::VLD2DUPd8wb_fixed:
|
|
|
|
case ARM::VLD2DUPd16wb_fixed:
|
|
|
|
case ARM::VLD2DUPd32wb_fixed:
|
|
|
|
case ARM::VLD2DUPd8wb_register:
|
|
|
|
case ARM::VLD2DUPd16wb_register:
|
|
|
|
case ARM::VLD2DUPd32wb_register:
|
2011-04-19 09:21:49 +08:00
|
|
|
case ARM::VLD4DUPd8Pseudo:
|
|
|
|
case ARM::VLD4DUPd16Pseudo:
|
|
|
|
case ARM::VLD4DUPd32Pseudo:
|
|
|
|
case ARM::VLD4DUPd8Pseudo_UPD:
|
|
|
|
case ARM::VLD4DUPd16Pseudo_UPD:
|
|
|
|
case ARM::VLD4DUPd32Pseudo_UPD:
|
|
|
|
case ARM::VLD1LNq8Pseudo:
|
|
|
|
case ARM::VLD1LNq16Pseudo:
|
|
|
|
case ARM::VLD1LNq32Pseudo:
|
|
|
|
case ARM::VLD1LNq8Pseudo_UPD:
|
|
|
|
case ARM::VLD1LNq16Pseudo_UPD:
|
|
|
|
case ARM::VLD1LNq32Pseudo_UPD:
|
|
|
|
case ARM::VLD2LNd8Pseudo:
|
|
|
|
case ARM::VLD2LNd16Pseudo:
|
|
|
|
case ARM::VLD2LNd32Pseudo:
|
|
|
|
case ARM::VLD2LNq16Pseudo:
|
|
|
|
case ARM::VLD2LNq32Pseudo:
|
|
|
|
case ARM::VLD2LNd8Pseudo_UPD:
|
|
|
|
case ARM::VLD2LNd16Pseudo_UPD:
|
|
|
|
case ARM::VLD2LNd32Pseudo_UPD:
|
|
|
|
case ARM::VLD2LNq16Pseudo_UPD:
|
|
|
|
case ARM::VLD2LNq32Pseudo_UPD:
|
|
|
|
case ARM::VLD4LNd8Pseudo:
|
|
|
|
case ARM::VLD4LNd16Pseudo:
|
|
|
|
case ARM::VLD4LNd32Pseudo:
|
|
|
|
case ARM::VLD4LNq16Pseudo:
|
|
|
|
case ARM::VLD4LNq32Pseudo:
|
|
|
|
case ARM::VLD4LNd8Pseudo_UPD:
|
|
|
|
case ARM::VLD4LNd16Pseudo_UPD:
|
|
|
|
case ARM::VLD4LNd32Pseudo_UPD:
|
|
|
|
case ARM::VLD4LNq16Pseudo_UPD:
|
|
|
|
case ARM::VLD4LNq32Pseudo_UPD:
|
|
|
|
// If the address is not 64-bit aligned, the latencies of these
|
|
|
|
// instructions increases by one.
|
|
|
|
++Latency;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-10-28 14:47:08 +08:00
|
|
|
return Latency;
|
2010-10-06 14:27:31 +08:00
|
|
|
}
|
2010-10-20 02:58:51 +08:00
|
|
|
|
2013-09-30 23:28:56 +08:00
|
|
|
unsigned ARMBaseInstrInfo::getPredicationCost(const MachineInstr *MI) const {
|
|
|
|
if (MI->isCopyLike() || MI->isInsertSubreg() ||
|
|
|
|
MI->isRegSequence() || MI->isImplicitDef())
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (MI->isBundle())
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
const MCInstrDesc &MCID = MI->getDesc();
|
|
|
|
|
|
|
|
if (MCID.isCall() || MCID.hasImplicitDefOfPhysReg(ARM::CPSR)) {
|
|
|
|
// When predicated, CPSR is an additional source operand for CPSR updating
|
|
|
|
// instructions, this apparently increases their latencies.
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-06-06 05:11:27 +08:00
|
|
|
unsigned ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
|
|
|
|
const MachineInstr *MI,
|
|
|
|
unsigned *PredCost) const {
|
2010-11-03 08:45:17 +08:00
|
|
|
if (MI->isCopyLike() || MI->isInsertSubreg() ||
|
|
|
|
MI->isRegSequence() || MI->isImplicitDef())
|
|
|
|
return 1;
|
|
|
|
|
2012-06-08 03:41:55 +08:00
|
|
|
// An instruction scheduler typically runs on unbundled instructions, however
|
|
|
|
// other passes may query the latency of a bundled instruction.
|
2011-12-14 10:11:42 +08:00
|
|
|
if (MI->isBundle()) {
|
2012-06-08 03:41:55 +08:00
|
|
|
unsigned Latency = 0;
|
2011-12-14 10:11:42 +08:00
|
|
|
MachineBasicBlock::const_instr_iterator I = MI;
|
|
|
|
MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
|
|
|
|
while (++I != E && I->isInsideBundle()) {
|
|
|
|
if (I->getOpcode() != ARM::t2IT)
|
|
|
|
Latency += getInstrLatency(ItinData, I, PredCost);
|
|
|
|
}
|
|
|
|
return Latency;
|
|
|
|
}
|
|
|
|
|
2011-06-29 03:10:37 +08:00
|
|
|
const MCInstrDesc &MCID = MI->getDesc();
|
2012-06-08 03:41:55 +08:00
|
|
|
if (PredCost && (MCID.isCall() || MCID.hasImplicitDefOfPhysReg(ARM::CPSR))) {
|
2010-11-03 08:45:17 +08:00
|
|
|
// When predicated, CPSR is an additional source operand for CPSR updating
|
|
|
|
// instructions, this apparently increases their latencies.
|
|
|
|
*PredCost = 1;
|
2012-06-08 03:41:55 +08:00
|
|
|
}
|
|
|
|
// Be sure to call getStageLatency for an empty itinerary in case it has a
|
|
|
|
// valid MinLatency property.
|
|
|
|
if (!ItinData)
|
|
|
|
return MI->mayLoad() ? 3 : 1;
|
|
|
|
|
|
|
|
unsigned Class = MCID.getSchedClass();
|
|
|
|
|
|
|
|
// For instructions with variable uops, use uops as latency.
|
2012-07-03 03:12:29 +08:00
|
|
|
if (!ItinData->isEmpty() && ItinData->getNumMicroOps(Class) < 0)
|
2012-06-08 03:41:55 +08:00
|
|
|
return getNumMicroOps(ItinData, MI);
|
2012-07-03 03:12:29 +08:00
|
|
|
|
2012-06-08 03:41:55 +08:00
|
|
|
// For the common case, fall back on the itinerary's latency.
|
2012-06-08 03:42:00 +08:00
|
|
|
unsigned Latency = ItinData->getStageLatency(Class);
|
|
|
|
|
|
|
|
// Adjust for dynamic def-side opcode variants not captured by the itinerary.
|
|
|
|
unsigned DefAlign = MI->hasOneMemOperand()
|
|
|
|
? (*MI->memoperands_begin())->getAlignment() : 0;
|
|
|
|
int Adj = adjustDefLatency(Subtarget, MI, &MCID, DefAlign);
|
|
|
|
if (Adj >= 0 || (int)Latency > -Adj) {
|
|
|
|
return Latency + Adj;
|
|
|
|
}
|
|
|
|
return Latency;
|
2010-11-03 08:45:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
|
|
|
|
SDNode *Node) const {
|
|
|
|
if (!Node->isMachineOpcode())
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (!ItinData || ItinData->isEmpty())
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
unsigned Opcode = Node->getMachineOpcode();
|
|
|
|
switch (Opcode) {
|
|
|
|
default:
|
|
|
|
return ItinData->getStageLatency(get(Opcode).getSchedClass());
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::VLDMQIA:
|
|
|
|
case ARM::VSTMQIA:
|
2010-11-03 08:45:17 +08:00
|
|
|
return 2;
|
2010-11-19 03:40:05 +08:00
|
|
|
}
|
2010-11-03 08:45:17 +08:00
|
|
|
}
|
|
|
|
|
2010-10-20 02:58:51 +08:00
|
|
|
bool ARMBaseInstrInfo::
|
|
|
|
hasHighOperandLatency(const InstrItineraryData *ItinData,
|
|
|
|
const MachineRegisterInfo *MRI,
|
|
|
|
const MachineInstr *DefMI, unsigned DefIdx,
|
|
|
|
const MachineInstr *UseMI, unsigned UseIdx) const {
|
|
|
|
unsigned DDomain = DefMI->getDesc().TSFlags & ARMII::DomainMask;
|
|
|
|
unsigned UDomain = UseMI->getDesc().TSFlags & ARMII::DomainMask;
|
|
|
|
if (Subtarget.isCortexA8() &&
|
|
|
|
(DDomain == ARMII::DomainVFP || UDomain == ARMII::DomainVFP))
|
|
|
|
// CortexA8 VFP instructions are not pipelined.
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Hoist VFP / NEON instructions with 4 or higher latency.
|
2013-06-15 12:49:57 +08:00
|
|
|
int Latency = computeOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx);
|
2012-06-08 03:41:58 +08:00
|
|
|
if (Latency < 0)
|
|
|
|
Latency = getInstrLatency(ItinData, DefMI);
|
2010-10-20 02:58:51 +08:00
|
|
|
if (Latency <= 3)
|
|
|
|
return false;
|
|
|
|
return DDomain == ARMII::DomainVFP || DDomain == ARMII::DomainNEON ||
|
|
|
|
UDomain == ARMII::DomainVFP || UDomain == ARMII::DomainNEON;
|
|
|
|
}
|
2010-10-26 10:08:50 +08:00
|
|
|
|
|
|
|
bool ARMBaseInstrInfo::
|
|
|
|
hasLowDefLatency(const InstrItineraryData *ItinData,
|
|
|
|
const MachineInstr *DefMI, unsigned DefIdx) const {
|
|
|
|
if (!ItinData || ItinData->isEmpty())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned DDomain = DefMI->getDesc().TSFlags & ARMII::DomainMask;
|
|
|
|
if (DDomain == ARMII::DomainGeneral) {
|
|
|
|
unsigned DefClass = DefMI->getDesc().getSchedClass();
|
|
|
|
int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
|
|
|
|
return (DefCycle != -1 && DefCycle <= 2);
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
2010-12-06 06:04:16 +08:00
|
|
|
|
2011-09-21 10:20:46 +08:00
|
|
|
bool ARMBaseInstrInfo::verifyInstruction(const MachineInstr *MI,
|
|
|
|
StringRef &ErrInfo) const {
|
|
|
|
if (convertAddSubFlagsOpcode(MI->getOpcode())) {
|
|
|
|
ErrInfo = "Pseudo flag setting opcodes only exist in Selection DAG";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-12-06 06:04:16 +08:00
|
|
|
bool
|
|
|
|
ARMBaseInstrInfo::isFpMLxInstruction(unsigned Opcode, unsigned &MulOpc,
|
|
|
|
unsigned &AddSubOpc,
|
|
|
|
bool &NegAcc, bool &HasLane) const {
|
|
|
|
DenseMap<unsigned, unsigned>::const_iterator I = MLxEntryMap.find(Opcode);
|
|
|
|
if (I == MLxEntryMap.end())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const ARM_MLxEntry &Entry = ARM_MLxTable[I->second];
|
|
|
|
MulOpc = Entry.MulOpc;
|
|
|
|
AddSubOpc = Entry.AddSubOpc;
|
|
|
|
NegAcc = Entry.NegAcc;
|
|
|
|
HasLane = Entry.HasLane;
|
|
|
|
return true;
|
|
|
|
}
|
2011-09-28 06:57:21 +08:00
|
|
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Execution domains.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// Some instructions go down the NEON pipeline, some go down the VFP pipeline,
|
|
|
|
// and some can go down both. The vmov instructions go down the VFP pipeline,
|
|
|
|
// but they can be changed to vorr equivalents that are executed by the NEON
|
|
|
|
// pipeline.
|
|
|
|
//
|
|
|
|
// We use the following execution domain numbering:
|
|
|
|
//
|
2011-09-29 10:48:41 +08:00
|
|
|
enum ARMExeDomain {
|
|
|
|
ExeGeneric = 0,
|
|
|
|
ExeVFP = 1,
|
|
|
|
ExeNEON = 2
|
|
|
|
};
|
2011-09-28 06:57:21 +08:00
|
|
|
//
|
|
|
|
// Also see ARMInstrFormats.td and Domain* enums in ARMBaseInfo.h
|
|
|
|
//
|
|
|
|
std::pair<uint16_t, uint16_t>
|
|
|
|
ARMBaseInstrInfo::getExecutionDomain(const MachineInstr *MI) const {
|
2012-08-17 19:32:52 +08:00
|
|
|
// VMOVD, VMOVRS and VMOVSR are VFP instructions, but can be changed to NEON
|
|
|
|
// if they are not predicated.
|
2011-09-28 06:57:21 +08:00
|
|
|
if (MI->getOpcode() == ARM::VMOVD && !isPredicated(MI))
|
2011-09-29 10:48:41 +08:00
|
|
|
return std::make_pair(ExeVFP, (1<<ExeVFP) | (1<<ExeNEON));
|
2011-09-28 06:57:21 +08:00
|
|
|
|
2013-03-27 20:38:44 +08:00
|
|
|
// CortexA9 is particularly picky about mixing the two and wants these
|
2012-08-17 19:32:52 +08:00
|
|
|
// converted.
|
2013-03-27 20:38:44 +08:00
|
|
|
if (Subtarget.isCortexA9() && !isPredicated(MI) &&
|
2012-08-17 19:32:52 +08:00
|
|
|
(MI->getOpcode() == ARM::VMOVRS ||
|
2012-08-30 18:17:45 +08:00
|
|
|
MI->getOpcode() == ARM::VMOVSR ||
|
|
|
|
MI->getOpcode() == ARM::VMOVS))
|
2012-08-17 19:32:52 +08:00
|
|
|
return std::make_pair(ExeVFP, (1<<ExeVFP) | (1<<ExeNEON));
|
|
|
|
|
2011-09-28 06:57:21 +08:00
|
|
|
// No other instructions can be swizzled, so just determine their domain.
|
|
|
|
unsigned Domain = MI->getDesc().TSFlags & ARMII::DomainMask;
|
|
|
|
|
|
|
|
if (Domain & ARMII::DomainNEON)
|
2011-09-29 10:48:41 +08:00
|
|
|
return std::make_pair(ExeNEON, 0);
|
2011-09-28 06:57:21 +08:00
|
|
|
|
|
|
|
// Certain instructions can go either way on Cortex-A8.
|
|
|
|
// Treat them as NEON instructions.
|
|
|
|
if ((Domain & ARMII::DomainNEONA8) && Subtarget.isCortexA8())
|
2011-09-29 10:48:41 +08:00
|
|
|
return std::make_pair(ExeNEON, 0);
|
2011-09-28 06:57:21 +08:00
|
|
|
|
|
|
|
if (Domain & ARMII::DomainVFP)
|
2011-09-29 10:48:41 +08:00
|
|
|
return std::make_pair(ExeVFP, 0);
|
2011-09-28 06:57:21 +08:00
|
|
|
|
2011-09-29 10:48:41 +08:00
|
|
|
return std::make_pair(ExeGeneric, 0);
|
2011-09-28 06:57:21 +08:00
|
|
|
}
|
|
|
|
|
2012-08-30 00:36:07 +08:00
|
|
|
static unsigned getCorrespondingDRegAndLane(const TargetRegisterInfo *TRI,
|
|
|
|
unsigned SReg, unsigned &Lane) {
|
|
|
|
unsigned DReg = TRI->getMatchingSuperReg(SReg, ARM::ssub_0, &ARM::DPRRegClass);
|
|
|
|
Lane = 0;
|
|
|
|
|
|
|
|
if (DReg != ARM::NoRegister)
|
|
|
|
return DReg;
|
|
|
|
|
|
|
|
Lane = 1;
|
|
|
|
DReg = TRI->getMatchingSuperReg(SReg, ARM::ssub_1, &ARM::DPRRegClass);
|
|
|
|
|
|
|
|
assert(DReg && "S-register with no D super-register?");
|
|
|
|
return DReg;
|
|
|
|
}
|
|
|
|
|
2012-10-10 13:43:01 +08:00
|
|
|
/// getImplicitSPRUseForDPRUse - Given a use of a DPR register and lane,
|
2012-09-18 16:31:15 +08:00
|
|
|
/// set ImplicitSReg to a register number that must be marked as implicit-use or
|
|
|
|
/// zero if no register needs to be defined as implicit-use.
|
|
|
|
///
|
|
|
|
/// If the function cannot determine if an SPR should be marked implicit use or
|
|
|
|
/// not, it returns false.
|
|
|
|
///
|
|
|
|
/// This function handles cases where an instruction is being modified from taking
|
2012-10-10 13:43:01 +08:00
|
|
|
/// an SPR to a DPR[Lane]. A use of the DPR is being added, which may conflict
|
2012-09-18 16:31:15 +08:00
|
|
|
/// with an earlier def of an SPR corresponding to DPR[Lane^1] (i.e. the other
|
|
|
|
/// lane of the DPR).
|
|
|
|
///
|
|
|
|
/// If the other SPR is defined, an implicit-use of it should be added. Else,
|
|
|
|
/// (including the case where the DPR itself is defined), it should not.
|
2012-10-10 13:43:01 +08:00
|
|
|
///
|
2012-09-18 16:31:15 +08:00
|
|
|
static bool getImplicitSPRUseForDPRUse(const TargetRegisterInfo *TRI,
|
|
|
|
MachineInstr *MI,
|
|
|
|
unsigned DReg, unsigned Lane,
|
|
|
|
unsigned &ImplicitSReg) {
|
|
|
|
// If the DPR is defined or used already, the other SPR lane will be chained
|
|
|
|
// correctly, so there is nothing to be done.
|
|
|
|
if (MI->definesRegister(DReg, TRI) || MI->readsRegister(DReg, TRI)) {
|
|
|
|
ImplicitSReg = 0;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise we need to go searching to see if the SPR is set explicitly.
|
|
|
|
ImplicitSReg = TRI->getSubReg(DReg,
|
|
|
|
(Lane & 1) ? ARM::ssub_0 : ARM::ssub_1);
|
|
|
|
MachineBasicBlock::LivenessQueryResult LQR =
|
|
|
|
MI->getParent()->computeRegisterLiveness(TRI, ImplicitSReg, MI);
|
|
|
|
|
|
|
|
if (LQR == MachineBasicBlock::LQR_Live)
|
|
|
|
return true;
|
|
|
|
else if (LQR == MachineBasicBlock::LQR_Unknown)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// If the register is known not to be live, there is no need to add an
|
|
|
|
// implicit-use.
|
|
|
|
ImplicitSReg = 0;
|
|
|
|
return true;
|
|
|
|
}
|
2012-08-30 00:36:07 +08:00
|
|
|
|
2011-09-28 06:57:21 +08:00
|
|
|
void
|
|
|
|
ARMBaseInstrInfo::setExecutionDomain(MachineInstr *MI, unsigned Domain) const {
|
2012-08-17 19:32:52 +08:00
|
|
|
unsigned DstReg, SrcReg, DReg;
|
|
|
|
unsigned Lane;
|
2012-12-20 05:31:56 +08:00
|
|
|
MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
|
2012-08-17 19:32:52 +08:00
|
|
|
const TargetRegisterInfo *TRI = &getRegisterInfo();
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("cannot handle opcode!");
|
|
|
|
break;
|
|
|
|
case ARM::VMOVD:
|
|
|
|
if (Domain != ExeNEON)
|
|
|
|
break;
|
|
|
|
|
|
|
|
// Zap the predicate operands.
|
|
|
|
assert(!isPredicated(MI) && "Cannot predicate a VORRd");
|
|
|
|
|
2012-08-30 00:36:07 +08:00
|
|
|
// Source instruction is %DDst = VMOVD %DSrc, 14, %noreg (; implicits)
|
|
|
|
DstReg = MI->getOperand(0).getReg();
|
|
|
|
SrcReg = MI->getOperand(1).getReg();
|
2012-08-17 19:32:52 +08:00
|
|
|
|
2012-08-30 00:36:07 +08:00
|
|
|
for (unsigned i = MI->getDesc().getNumOperands(); i; --i)
|
|
|
|
MI->RemoveOperand(i-1);
|
|
|
|
|
|
|
|
// Change to a %DDst = VORRd %DSrc, %DSrc, 14, %noreg (; implicits)
|
|
|
|
MI->setDesc(get(ARM::VORRd));
|
|
|
|
AddDefaultPred(MIB.addReg(DstReg, RegState::Define)
|
|
|
|
.addReg(SrcReg)
|
|
|
|
.addReg(SrcReg));
|
2012-08-17 19:32:52 +08:00
|
|
|
break;
|
|
|
|
case ARM::VMOVRS:
|
|
|
|
if (Domain != ExeNEON)
|
|
|
|
break;
|
|
|
|
assert(!isPredicated(MI) && "Cannot predicate a VGETLN");
|
|
|
|
|
2012-08-30 00:36:07 +08:00
|
|
|
// Source instruction is %RDst = VMOVRS %SSrc, 14, %noreg (; implicits)
|
2012-08-17 19:32:52 +08:00
|
|
|
DstReg = MI->getOperand(0).getReg();
|
|
|
|
SrcReg = MI->getOperand(1).getReg();
|
2011-09-28 06:57:21 +08:00
|
|
|
|
2012-08-30 00:36:07 +08:00
|
|
|
for (unsigned i = MI->getDesc().getNumOperands(); i; --i)
|
|
|
|
MI->RemoveOperand(i-1);
|
2012-08-17 19:32:52 +08:00
|
|
|
|
2012-08-30 00:36:07 +08:00
|
|
|
DReg = getCorrespondingDRegAndLane(TRI, SrcReg, Lane);
|
2012-08-17 19:32:52 +08:00
|
|
|
|
2012-08-30 00:36:07 +08:00
|
|
|
// Convert to %RDst = VGETLNi32 %DSrc, Lane, 14, %noreg (; imps)
|
|
|
|
// Note that DSrc has been widened and the other lane may be undef, which
|
|
|
|
// contaminates the entire register.
|
2012-08-17 19:32:52 +08:00
|
|
|
MI->setDesc(get(ARM::VGETLNi32));
|
2012-08-30 00:36:07 +08:00
|
|
|
AddDefaultPred(MIB.addReg(DstReg, RegState::Define)
|
|
|
|
.addReg(DReg, RegState::Undef)
|
|
|
|
.addImm(Lane));
|
2012-08-17 19:32:52 +08:00
|
|
|
|
2012-08-30 00:36:07 +08:00
|
|
|
// The old source should be an implicit use, otherwise we might think it
|
|
|
|
// was dead before here.
|
2012-08-17 19:32:52 +08:00
|
|
|
MIB.addReg(SrcReg, RegState::Implicit);
|
|
|
|
break;
|
2012-09-18 16:31:15 +08:00
|
|
|
case ARM::VMOVSR: {
|
2012-08-17 19:32:52 +08:00
|
|
|
if (Domain != ExeNEON)
|
|
|
|
break;
|
|
|
|
assert(!isPredicated(MI) && "Cannot predicate a VSETLN");
|
|
|
|
|
2012-08-30 00:36:07 +08:00
|
|
|
// Source instruction is %SDst = VMOVSR %RSrc, 14, %noreg (; implicits)
|
2012-08-17 19:32:52 +08:00
|
|
|
DstReg = MI->getOperand(0).getReg();
|
|
|
|
SrcReg = MI->getOperand(1).getReg();
|
|
|
|
|
2012-08-30 00:36:07 +08:00
|
|
|
DReg = getCorrespondingDRegAndLane(TRI, DstReg, Lane);
|
2012-08-17 19:32:52 +08:00
|
|
|
|
2012-09-18 16:31:15 +08:00
|
|
|
unsigned ImplicitSReg;
|
|
|
|
if (!getImplicitSPRUseForDPRUse(TRI, MI, DReg, Lane, ImplicitSReg))
|
|
|
|
break;
|
2012-09-02 02:07:29 +08:00
|
|
|
|
2012-09-06 02:37:53 +08:00
|
|
|
for (unsigned i = MI->getDesc().getNumOperands(); i; --i)
|
|
|
|
MI->RemoveOperand(i-1);
|
|
|
|
|
2012-08-30 00:36:07 +08:00
|
|
|
// Convert to %DDst = VSETLNi32 %DDst, %RSrc, Lane, 14, %noreg (; imps)
|
|
|
|
// Again DDst may be undefined at the beginning of this instruction.
|
|
|
|
MI->setDesc(get(ARM::VSETLNi32));
|
2012-09-02 02:07:29 +08:00
|
|
|
MIB.addReg(DReg, RegState::Define)
|
|
|
|
.addReg(DReg, getUndefRegState(!MI->readsRegister(DReg, TRI)))
|
|
|
|
.addReg(SrcReg)
|
|
|
|
.addImm(Lane);
|
|
|
|
AddDefaultPred(MIB);
|
2012-08-30 18:17:45 +08:00
|
|
|
|
2012-09-02 02:07:29 +08:00
|
|
|
// The narrower destination must be marked as set to keep previous chains
|
|
|
|
// in place.
|
2012-08-30 00:36:07 +08:00
|
|
|
MIB.addReg(DstReg, RegState::Define | RegState::Implicit);
|
2012-09-18 16:31:15 +08:00
|
|
|
if (ImplicitSReg != 0)
|
|
|
|
MIB.addReg(ImplicitSReg, RegState::Implicit);
|
2012-08-17 19:32:52 +08:00
|
|
|
break;
|
2012-09-18 16:31:15 +08:00
|
|
|
}
|
2012-08-30 18:17:45 +08:00
|
|
|
case ARM::VMOVS: {
|
|
|
|
if (Domain != ExeNEON)
|
|
|
|
break;
|
|
|
|
|
|
|
|
// Source instruction is %SDst = VMOVS %SSrc, 14, %noreg (; implicits)
|
|
|
|
DstReg = MI->getOperand(0).getReg();
|
|
|
|
SrcReg = MI->getOperand(1).getReg();
|
|
|
|
|
|
|
|
unsigned DstLane = 0, SrcLane = 0, DDst, DSrc;
|
|
|
|
DDst = getCorrespondingDRegAndLane(TRI, DstReg, DstLane);
|
|
|
|
DSrc = getCorrespondingDRegAndLane(TRI, SrcReg, SrcLane);
|
|
|
|
|
2012-09-18 16:31:15 +08:00
|
|
|
unsigned ImplicitSReg;
|
|
|
|
if (!getImplicitSPRUseForDPRUse(TRI, MI, DSrc, SrcLane, ImplicitSReg))
|
|
|
|
break;
|
2012-09-02 02:07:29 +08:00
|
|
|
|
2012-09-06 02:37:53 +08:00
|
|
|
for (unsigned i = MI->getDesc().getNumOperands(); i; --i)
|
|
|
|
MI->RemoveOperand(i-1);
|
|
|
|
|
2012-08-30 18:17:45 +08:00
|
|
|
if (DSrc == DDst) {
|
|
|
|
// Destination can be:
|
|
|
|
// %DDst = VDUPLN32d %DDst, Lane, 14, %noreg (; implicits)
|
|
|
|
MI->setDesc(get(ARM::VDUPLN32d));
|
2012-09-02 02:07:29 +08:00
|
|
|
MIB.addReg(DDst, RegState::Define)
|
|
|
|
.addReg(DDst, getUndefRegState(!MI->readsRegister(DDst, TRI)))
|
|
|
|
.addImm(SrcLane);
|
|
|
|
AddDefaultPred(MIB);
|
2012-08-30 18:17:45 +08:00
|
|
|
|
|
|
|
// Neither the source or the destination are naturally represented any
|
|
|
|
// more, so add them in manually.
|
|
|
|
MIB.addReg(DstReg, RegState::Implicit | RegState::Define);
|
|
|
|
MIB.addReg(SrcReg, RegState::Implicit);
|
2012-09-18 16:31:15 +08:00
|
|
|
if (ImplicitSReg != 0)
|
|
|
|
MIB.addReg(ImplicitSReg, RegState::Implicit);
|
2012-08-30 18:17:45 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// In general there's no single instruction that can perform an S <-> S
|
|
|
|
// move in NEON space, but a pair of VEXT instructions *can* do the
|
|
|
|
// job. It turns out that the VEXTs needed will only use DSrc once, with
|
|
|
|
// the position based purely on the combination of lane-0 and lane-1
|
|
|
|
// involved. For example
|
|
|
|
// vmov s0, s2 -> vext.32 d0, d0, d1, #1 vext.32 d0, d0, d0, #1
|
|
|
|
// vmov s1, s3 -> vext.32 d0, d1, d0, #1 vext.32 d0, d0, d0, #1
|
|
|
|
// vmov s0, s3 -> vext.32 d0, d0, d0, #1 vext.32 d0, d1, d0, #1
|
|
|
|
// vmov s1, s2 -> vext.32 d0, d0, d0, #1 vext.32 d0, d0, d1, #1
|
|
|
|
//
|
|
|
|
// Pattern of the MachineInstrs is:
|
|
|
|
// %DDst = VEXTd32 %DSrc1, %DSrc2, Lane, 14, %noreg (;implicits)
|
|
|
|
MachineInstrBuilder NewMIB;
|
|
|
|
NewMIB = BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
|
|
|
|
get(ARM::VEXTd32), DDst);
|
2012-09-02 02:07:29 +08:00
|
|
|
|
|
|
|
// On the first instruction, both DSrc and DDst may be <undef> if present.
|
|
|
|
// Specifically when the original instruction didn't have them as an
|
|
|
|
// <imp-use>.
|
|
|
|
unsigned CurReg = SrcLane == 1 && DstLane == 1 ? DSrc : DDst;
|
|
|
|
bool CurUndef = !MI->readsRegister(CurReg, TRI);
|
|
|
|
NewMIB.addReg(CurReg, getUndefRegState(CurUndef));
|
|
|
|
|
|
|
|
CurReg = SrcLane == 0 && DstLane == 0 ? DSrc : DDst;
|
|
|
|
CurUndef = !MI->readsRegister(CurReg, TRI);
|
|
|
|
NewMIB.addReg(CurReg, getUndefRegState(CurUndef));
|
|
|
|
|
2012-08-30 18:17:45 +08:00
|
|
|
NewMIB.addImm(1);
|
|
|
|
AddDefaultPred(NewMIB);
|
|
|
|
|
|
|
|
if (SrcLane == DstLane)
|
|
|
|
NewMIB.addReg(SrcReg, RegState::Implicit);
|
|
|
|
|
|
|
|
MI->setDesc(get(ARM::VEXTd32));
|
|
|
|
MIB.addReg(DDst, RegState::Define);
|
2012-09-02 02:07:29 +08:00
|
|
|
|
|
|
|
// On the second instruction, DDst has definitely been defined above, so
|
|
|
|
// it is not <undef>. DSrc, if present, can be <undef> as above.
|
|
|
|
CurReg = SrcLane == 1 && DstLane == 0 ? DSrc : DDst;
|
|
|
|
CurUndef = CurReg == DSrc && !MI->readsRegister(CurReg, TRI);
|
|
|
|
MIB.addReg(CurReg, getUndefRegState(CurUndef));
|
|
|
|
|
|
|
|
CurReg = SrcLane == 0 && DstLane == 1 ? DSrc : DDst;
|
|
|
|
CurUndef = CurReg == DSrc && !MI->readsRegister(CurReg, TRI);
|
|
|
|
MIB.addReg(CurReg, getUndefRegState(CurUndef));
|
|
|
|
|
2012-08-30 18:17:45 +08:00
|
|
|
MIB.addImm(1);
|
|
|
|
AddDefaultPred(MIB);
|
|
|
|
|
|
|
|
if (SrcLane != DstLane)
|
|
|
|
MIB.addReg(SrcReg, RegState::Implicit);
|
|
|
|
|
|
|
|
// As before, the original destination is no longer represented, add it
|
|
|
|
// implicitly.
|
|
|
|
MIB.addReg(DstReg, RegState::Define | RegState::Implicit);
|
2012-09-18 16:31:15 +08:00
|
|
|
if (ImplicitSReg != 0)
|
|
|
|
MIB.addReg(ImplicitSReg, RegState::Implicit);
|
2012-08-30 18:17:45 +08:00
|
|
|
break;
|
|
|
|
}
|
2012-08-17 19:32:52 +08:00
|
|
|
}
|
2011-09-29 10:48:41 +08:00
|
|
|
|
2011-09-28 06:57:21 +08:00
|
|
|
}
|
2012-02-29 07:53:30 +08:00
|
|
|
|
2012-09-30 05:43:49 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Partial register updates
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// Swift renames NEON registers with 64-bit granularity. That means any
|
|
|
|
// instruction writing an S-reg implicitly reads the containing D-reg. The
|
|
|
|
// problem is mostly avoided by translating f32 operations to v2f32 operations
|
|
|
|
// on D-registers, but f32 loads are still a problem.
|
|
|
|
//
|
|
|
|
// These instructions can load an f32 into a NEON register:
|
|
|
|
//
|
|
|
|
// VLDRS - Only writes S, partial D update.
|
|
|
|
// VLD1LNd32 - Writes all D-regs, explicit partial D update, 2 uops.
|
|
|
|
// VLD1DUPd32 - Writes all D-regs, no partial reg update, 2 uops.
|
|
|
|
//
|
|
|
|
// FCONSTD can be used as a dependency-breaking instruction.
|
|
|
|
unsigned ARMBaseInstrInfo::
|
|
|
|
getPartialRegUpdateClearance(const MachineInstr *MI,
|
|
|
|
unsigned OpNum,
|
|
|
|
const TargetRegisterInfo *TRI) const {
|
2013-03-27 20:38:44 +08:00
|
|
|
if (!SwiftPartialUpdateClearance ||
|
|
|
|
!(Subtarget.isSwift() || Subtarget.isCortexA15()))
|
2012-09-30 05:43:49 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
assert(TRI && "Need TRI instance");
|
|
|
|
|
|
|
|
const MachineOperand &MO = MI->getOperand(OpNum);
|
|
|
|
if (MO.readsReg())
|
|
|
|
return 0;
|
|
|
|
unsigned Reg = MO.getReg();
|
|
|
|
int UseOp = -1;
|
|
|
|
|
|
|
|
switch(MI->getOpcode()) {
|
|
|
|
// Normal instructions writing only an S-register.
|
|
|
|
case ARM::VLDRS:
|
|
|
|
case ARM::FCONSTS:
|
|
|
|
case ARM::VMOVSR:
|
|
|
|
case ARM::VMOVv8i8:
|
|
|
|
case ARM::VMOVv4i16:
|
|
|
|
case ARM::VMOVv2i32:
|
|
|
|
case ARM::VMOVv2f32:
|
|
|
|
case ARM::VMOVv1i64:
|
|
|
|
UseOp = MI->findRegisterUseOperandIdx(Reg, false, TRI);
|
|
|
|
break;
|
|
|
|
|
|
|
|
// Explicitly reads the dependency.
|
|
|
|
case ARM::VLD1LNd32:
|
2013-03-27 20:38:44 +08:00
|
|
|
UseOp = 3;
|
2012-09-30 05:43:49 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this instruction actually reads a value from Reg, there is no unwanted
|
|
|
|
// dependency.
|
|
|
|
if (UseOp != -1 && MI->getOperand(UseOp).readsReg())
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
// We must be able to clobber the whole D-reg.
|
|
|
|
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
|
|
|
|
// Virtual register must be a foo:ssub_0<def,undef> operand.
|
|
|
|
if (!MO.getSubReg() || MI->readsVirtualRegister(Reg))
|
|
|
|
return 0;
|
|
|
|
} else if (ARM::SPRRegClass.contains(Reg)) {
|
|
|
|
// Physical register: MI must define the full D-reg.
|
|
|
|
unsigned DReg = TRI->getMatchingSuperReg(Reg, ARM::ssub_0,
|
|
|
|
&ARM::DPRRegClass);
|
|
|
|
if (!DReg || !MI->definesRegister(DReg, TRI))
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// MI has an unwanted D-register dependency.
|
|
|
|
// Avoid defs in the previous N instructrions.
|
|
|
|
return SwiftPartialUpdateClearance;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Break a partial register dependency after getPartialRegUpdateClearance
|
|
|
|
// returned non-zero.
|
|
|
|
void ARMBaseInstrInfo::
|
|
|
|
breakPartialRegDependency(MachineBasicBlock::iterator MI,
|
|
|
|
unsigned OpNum,
|
|
|
|
const TargetRegisterInfo *TRI) const {
|
|
|
|
assert(MI && OpNum < MI->getDesc().getNumDefs() && "OpNum is not a def");
|
|
|
|
assert(TRI && "Need TRI instance");
|
|
|
|
|
|
|
|
const MachineOperand &MO = MI->getOperand(OpNum);
|
|
|
|
unsigned Reg = MO.getReg();
|
|
|
|
assert(TargetRegisterInfo::isPhysicalRegister(Reg) &&
|
|
|
|
"Can't break virtual register dependencies.");
|
|
|
|
unsigned DReg = Reg;
|
|
|
|
|
|
|
|
// If MI defines an S-reg, find the corresponding D super-register.
|
|
|
|
if (ARM::SPRRegClass.contains(Reg)) {
|
|
|
|
DReg = ARM::D0 + (Reg - ARM::S0) / 2;
|
|
|
|
assert(TRI->isSuperRegister(Reg, DReg) && "Register enums broken");
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(ARM::DPRRegClass.contains(DReg) && "Can only break D-reg deps");
|
|
|
|
assert(MI->definesRegister(DReg, TRI) && "MI doesn't clobber full D-reg");
|
|
|
|
|
|
|
|
// FIXME: In some cases, VLDRS can be changed to a VLD1DUPd32 which defines
|
|
|
|
// the full D-register by loading the same value to both lanes. The
|
|
|
|
// instruction is micro-coded with 2 uops, so don't do this until we can
|
2013-09-14 17:34:24 +08:00
|
|
|
// properly schedule micro-coded instructions. The dispatcher stalls cause
|
2012-09-30 05:43:49 +08:00
|
|
|
// too big regressions.
|
|
|
|
|
|
|
|
// Insert the dependency-breaking FCONSTD before MI.
|
|
|
|
// 96 is the encoding of 0.5, but the actual value doesn't matter here.
|
|
|
|
AddDefaultPred(BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
|
|
|
|
get(ARM::FCONSTD), DReg).addImm(96));
|
|
|
|
MI->addRegisterKilled(DReg, TRI, true);
|
|
|
|
}
|
|
|
|
|
2012-02-29 07:53:30 +08:00
|
|
|
bool ARMBaseInstrInfo::hasNOP() const {
|
|
|
|
return (Subtarget.getFeatureBits() & ARM::HasV6T2Ops) != 0;
|
|
|
|
}
|
2013-04-05 12:42:00 +08:00
|
|
|
|
|
|
|
bool ARMBaseInstrInfo::isSwiftFastImmShift(const MachineInstr *MI) const {
|
2013-06-05 22:59:36 +08:00
|
|
|
if (MI->getNumOperands() < 4)
|
|
|
|
return true;
|
2013-04-05 12:42:00 +08:00
|
|
|
unsigned ShOpVal = MI->getOperand(3).getImm();
|
|
|
|
unsigned ShImm = ARM_AM::getSORegOffset(ShOpVal);
|
|
|
|
// Swift supports faster shifts for: lsl 2, lsl 1, and lsr 1.
|
|
|
|
if ((ShImm == 1 && ARM_AM::getSORegShOp(ShOpVal) == ARM_AM::lsr) ||
|
|
|
|
((ShImm == 1 || ShImm == 2) &&
|
|
|
|
ARM_AM::getSORegShOp(ShOpVal) == ARM_AM::lsl))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|