forked from OSchip/llvm-project
[CodeGen][NFC] Make `TII::getMemOpBaseImmOfs` return a base operand
Currently, instructions doing memory accesses through a base operand that is not a register can not be analyzed using `TII::getMemOpBaseRegImmOfs`. This means that functions such as `TII::shouldClusterMemOps` will bail out on instructions using an FI as a base instead of a register. The goal of this patch is to refactor all this to return a base operand instead of a base register. Then in a separate patch, I will add FI support to the mem op clustering in the MachineScheduler. Differential Revision: https://reviews.llvm.org/D54846 llvm-svn: 347746
This commit is contained in:
parent
dda6290f16
commit
d7eebd6d83
|
@ -1136,11 +1136,11 @@ public:
|
|||
return false;
|
||||
}
|
||||
|
||||
/// Get the base register and byte offset of an instruction that reads/writes
|
||||
/// Get the base operand and byte offset of an instruction that reads/writes
|
||||
/// memory.
|
||||
virtual bool getMemOpBaseRegImmOfs(MachineInstr &MemOp, unsigned &BaseReg,
|
||||
int64_t &Offset,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
virtual bool getMemOperandWithOffset(MachineInstr &MI,
|
||||
MachineOperand *&BaseOp, int64_t &Offset,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1164,8 +1164,8 @@ public:
|
|||
/// or
|
||||
/// DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
|
||||
/// to TargetPassConfig::createMachineScheduler() to have an effect.
|
||||
virtual bool shouldClusterMemOps(MachineInstr &FirstLdSt, unsigned BaseReg1,
|
||||
MachineInstr &SecondLdSt, unsigned BaseReg2,
|
||||
virtual bool shouldClusterMemOps(MachineOperand &BaseOp1,
|
||||
MachineOperand &BaseOp2,
|
||||
unsigned NumLoads) const {
|
||||
llvm_unreachable("target did not implement shouldClusterMemOps()");
|
||||
}
|
||||
|
|
|
@ -360,10 +360,10 @@ ImplicitNullChecks::SuitabilityResult
|
|||
ImplicitNullChecks::isSuitableMemoryOp(MachineInstr &MI, unsigned PointerReg,
|
||||
ArrayRef<MachineInstr *> PrevInsts) {
|
||||
int64_t Offset;
|
||||
unsigned BaseReg;
|
||||
MachineOperand *BaseOp;
|
||||
|
||||
if (!TII->getMemOpBaseRegImmOfs(MI, BaseReg, Offset, TRI) ||
|
||||
BaseReg != PointerReg)
|
||||
if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, TRI) ||
|
||||
!BaseOp->isReg() || BaseOp->getReg() != PointerReg)
|
||||
return SR_Unsuitable;
|
||||
|
||||
// We want the mem access to be issued at a sane offset from PointerReg,
|
||||
|
|
|
@ -1121,11 +1121,12 @@ void SwingSchedulerDAG::addLoopCarriedDependences(AliasAnalysis *AA) {
|
|||
// First, perform the cheaper check that compares the base register.
|
||||
// If they are the same and the load offset is less than the store
|
||||
// offset, then mark the dependence as loop carried potentially.
|
||||
unsigned BaseReg1, BaseReg2;
|
||||
MachineOperand *BaseOp1, *BaseOp2;
|
||||
int64_t Offset1, Offset2;
|
||||
if (TII->getMemOpBaseRegImmOfs(LdMI, BaseReg1, Offset1, TRI) &&
|
||||
TII->getMemOpBaseRegImmOfs(MI, BaseReg2, Offset2, TRI)) {
|
||||
if (BaseReg1 == BaseReg2 && (int)Offset1 < (int)Offset2) {
|
||||
if (TII->getMemOperandWithOffset(LdMI, BaseOp1, Offset1, TRI) &&
|
||||
TII->getMemOperandWithOffset(MI, BaseOp2, Offset2, TRI)) {
|
||||
if (BaseOp1->isIdenticalTo(*BaseOp2) &&
|
||||
(int)Offset1 < (int)Offset2) {
|
||||
assert(TII->areMemAccessesTriviallyDisjoint(LdMI, MI, AA) &&
|
||||
"What happened to the chain edge?");
|
||||
SDep Dep(Load, SDep::Barrier);
|
||||
|
@ -3246,11 +3247,16 @@ void SwingSchedulerDAG::addBranches(MBBVectorTy &PrologBBs,
|
|||
/// during each iteration. Set Delta to the amount of the change.
|
||||
bool SwingSchedulerDAG::computeDelta(MachineInstr &MI, unsigned &Delta) {
|
||||
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
|
||||
unsigned BaseReg;
|
||||
MachineOperand *BaseOp;
|
||||
int64_t Offset;
|
||||
if (!TII->getMemOpBaseRegImmOfs(MI, BaseReg, Offset, TRI))
|
||||
if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, TRI))
|
||||
return false;
|
||||
|
||||
if (!BaseOp->isReg())
|
||||
return false;
|
||||
|
||||
unsigned BaseReg = BaseOp->getReg();
|
||||
|
||||
MachineRegisterInfo &MRI = MF.getRegInfo();
|
||||
// Check if there is a Phi. If so, get the definition in the loop.
|
||||
MachineInstr *BaseDef = MRI.getVRegDef(BaseReg);
|
||||
|
@ -3653,19 +3659,19 @@ bool SwingSchedulerDAG::isLoopCarriedDep(SUnit *Source, const SDep &Dep,
|
|||
if (!computeDelta(*SI, DeltaS) || !computeDelta(*DI, DeltaD))
|
||||
return true;
|
||||
|
||||
unsigned BaseRegS, BaseRegD;
|
||||
MachineOperand *BaseOpS, *BaseOpD;
|
||||
int64_t OffsetS, OffsetD;
|
||||
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
|
||||
if (!TII->getMemOpBaseRegImmOfs(*SI, BaseRegS, OffsetS, TRI) ||
|
||||
!TII->getMemOpBaseRegImmOfs(*DI, BaseRegD, OffsetD, TRI))
|
||||
if (!TII->getMemOperandWithOffset(*SI, BaseOpS, OffsetS, TRI) ||
|
||||
!TII->getMemOperandWithOffset(*DI, BaseOpD, OffsetD, TRI))
|
||||
return true;
|
||||
|
||||
if (BaseRegS != BaseRegD)
|
||||
if (!BaseOpS->isIdenticalTo(*BaseOpD))
|
||||
return true;
|
||||
|
||||
// Check that the base register is incremented by a constant value for each
|
||||
// iteration.
|
||||
MachineInstr *Def = MRI.getVRegDef(BaseRegS);
|
||||
MachineInstr *Def = MRI.getVRegDef(BaseOpS->getReg());
|
||||
if (!Def || !Def->isPHI())
|
||||
return true;
|
||||
unsigned InitVal = 0;
|
||||
|
|
|
@ -1483,15 +1483,15 @@ namespace {
|
|||
class BaseMemOpClusterMutation : public ScheduleDAGMutation {
|
||||
struct MemOpInfo {
|
||||
SUnit *SU;
|
||||
unsigned BaseReg;
|
||||
MachineOperand *BaseOp;
|
||||
int64_t Offset;
|
||||
|
||||
MemOpInfo(SUnit *su, unsigned reg, int64_t ofs)
|
||||
: SU(su), BaseReg(reg), Offset(ofs) {}
|
||||
MemOpInfo(SUnit *su, MachineOperand *Op, int64_t ofs)
|
||||
: SU(su), BaseOp(Op), Offset(ofs) {}
|
||||
|
||||
bool operator<(const MemOpInfo&RHS) const {
|
||||
return std::tie(BaseReg, Offset, SU->NodeNum) <
|
||||
std::tie(RHS.BaseReg, RHS.Offset, RHS.SU->NodeNum);
|
||||
bool operator<(const MemOpInfo &RHS) const {
|
||||
return std::make_tuple(BaseOp->getReg(), Offset, SU->NodeNum) <
|
||||
std::make_tuple(RHS.BaseOp->getReg(), RHS.Offset, RHS.SU->NodeNum);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -1547,10 +1547,10 @@ void BaseMemOpClusterMutation::clusterNeighboringMemOps(
|
|||
ArrayRef<SUnit *> MemOps, ScheduleDAGMI *DAG) {
|
||||
SmallVector<MemOpInfo, 32> MemOpRecords;
|
||||
for (SUnit *SU : MemOps) {
|
||||
unsigned BaseReg;
|
||||
MachineOperand *BaseOp;
|
||||
int64_t Offset;
|
||||
if (TII->getMemOpBaseRegImmOfs(*SU->getInstr(), BaseReg, Offset, TRI))
|
||||
MemOpRecords.push_back(MemOpInfo(SU, BaseReg, Offset));
|
||||
if (TII->getMemOperandWithOffset(*SU->getInstr(), BaseOp, Offset, TRI))
|
||||
MemOpRecords.push_back(MemOpInfo(SU, BaseOp, Offset));
|
||||
}
|
||||
if (MemOpRecords.size() < 2)
|
||||
return;
|
||||
|
@ -1560,8 +1560,8 @@ void BaseMemOpClusterMutation::clusterNeighboringMemOps(
|
|||
for (unsigned Idx = 0, End = MemOpRecords.size(); Idx < (End - 1); ++Idx) {
|
||||
SUnit *SUa = MemOpRecords[Idx].SU;
|
||||
SUnit *SUb = MemOpRecords[Idx+1].SU;
|
||||
if (TII->shouldClusterMemOps(*SUa->getInstr(), MemOpRecords[Idx].BaseReg,
|
||||
*SUb->getInstr(), MemOpRecords[Idx+1].BaseReg,
|
||||
if (TII->shouldClusterMemOps(*MemOpRecords[Idx].BaseOp,
|
||||
*MemOpRecords[Idx + 1].BaseOp,
|
||||
ClusterLength) &&
|
||||
DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) {
|
||||
LLVM_DEBUG(dbgs() << "Cluster ld/st SU(" << SUa->NodeNum << ") - SU("
|
||||
|
|
|
@ -716,9 +716,12 @@ static bool SinkingPreventsImplicitNullCheck(MachineInstr &MI,
|
|||
!PredBB->getTerminator()->getMetadata(LLVMContext::MD_make_implicit))
|
||||
return false;
|
||||
|
||||
unsigned BaseReg;
|
||||
MachineOperand *BaseOp;
|
||||
int64_t Offset;
|
||||
if (!TII->getMemOpBaseRegImmOfs(MI, BaseReg, Offset, TRI))
|
||||
if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, TRI))
|
||||
return false;
|
||||
|
||||
if (!BaseOp->isReg())
|
||||
return false;
|
||||
|
||||
if (!(MI.mayLoad() && !MI.isPredicable()))
|
||||
|
@ -731,7 +734,7 @@ static bool SinkingPreventsImplicitNullCheck(MachineInstr &MI,
|
|||
return MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 &&
|
||||
(MBP.Predicate == MachineBranchPredicate::PRED_NE ||
|
||||
MBP.Predicate == MachineBranchPredicate::PRED_EQ) &&
|
||||
MBP.LHS.getReg() == BaseReg;
|
||||
MBP.LHS.getReg() == BaseOp->getReg();
|
||||
}
|
||||
|
||||
/// Sink an instruction and its associated debug instructions. If the debug
|
||||
|
|
|
@ -1135,7 +1135,7 @@ bool AArch64InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
|
|||
bool AArch64InstrInfo::areMemAccessesTriviallyDisjoint(
|
||||
MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA) const {
|
||||
const TargetRegisterInfo *TRI = &getRegisterInfo();
|
||||
unsigned BaseRegA = 0, BaseRegB = 0;
|
||||
MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
|
||||
int64_t OffsetA = 0, OffsetB = 0;
|
||||
unsigned WidthA = 0, WidthB = 0;
|
||||
|
||||
|
@ -1146,14 +1146,14 @@ bool AArch64InstrInfo::areMemAccessesTriviallyDisjoint(
|
|||
MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
|
||||
return false;
|
||||
|
||||
// Retrieve the base register, offset from the base register and width. Width
|
||||
// Retrieve the base register, offset from the base and width. Width
|
||||
// is the size of memory that is being loaded/stored (e.g. 1, 2, 4, 8). If
|
||||
// base registers are identical, and the offset of a lower memory access +
|
||||
// the width doesn't overlap the offset of a higher memory access,
|
||||
// then the memory accesses are different.
|
||||
if (getMemOpBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) &&
|
||||
getMemOpBaseRegImmOfsWidth(MIb, BaseRegB, OffsetB, WidthB, TRI)) {
|
||||
if (BaseRegA == BaseRegB) {
|
||||
if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
|
||||
getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
|
||||
if (BaseOpA->isIdenticalTo(*BaseOpB)) {
|
||||
int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
|
||||
int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
|
||||
int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
|
||||
|
@ -2042,10 +2042,13 @@ bool AArch64InstrInfo::isCandidateToMergeOrPair(MachineInstr &MI) const {
|
|||
|
||||
// Can't merge/pair if the instruction modifies the base register.
|
||||
// e.g., ldr x0, [x0]
|
||||
unsigned BaseReg = MI.getOperand(1).getReg();
|
||||
const TargetRegisterInfo *TRI = &getRegisterInfo();
|
||||
if (MI.modifiesRegister(BaseReg, TRI))
|
||||
return false;
|
||||
// This case will never occur with an FI base.
|
||||
if (MI.getOperand(1).isReg()) {
|
||||
unsigned BaseReg = MI.getOperand(1).getReg();
|
||||
const TargetRegisterInfo *TRI = &getRegisterInfo();
|
||||
if (MI.modifiesRegister(BaseReg, TRI))
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check if this load/store has a hint to avoid pair formation.
|
||||
// MachineMemOperands hints are set by the AArch64StorePairSuppress pass.
|
||||
|
@ -2068,16 +2071,17 @@ bool AArch64InstrInfo::isCandidateToMergeOrPair(MachineInstr &MI) const {
|
|||
return true;
|
||||
}
|
||||
|
||||
bool AArch64InstrInfo::getMemOpBaseRegImmOfs(
|
||||
MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
bool AArch64InstrInfo::getMemOperandWithOffset(MachineInstr &LdSt,
|
||||
MachineOperand *&BaseOp,
|
||||
int64_t &Offset,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
unsigned Width;
|
||||
return getMemOpBaseRegImmOfsWidth(LdSt, BaseReg, Offset, Width, TRI);
|
||||
return getMemOperandWithOffsetWidth(LdSt, BaseOp, Offset, Width, TRI);
|
||||
}
|
||||
|
||||
bool AArch64InstrInfo::getMemOpBaseRegImmOfsWidth(
|
||||
MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset, unsigned &Width,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
bool AArch64InstrInfo::getMemOperandWithOffsetWidth(
|
||||
MachineInstr &LdSt, MachineOperand *&BaseOp, int64_t &Offset,
|
||||
unsigned &Width, const TargetRegisterInfo *TRI) const {
|
||||
assert(LdSt.mayLoadOrStore() && "Expected a memory operation.");
|
||||
// Handle only loads/stores with base register followed by immediate offset.
|
||||
if (LdSt.getNumExplicitOperands() == 3) {
|
||||
|
@ -2105,13 +2109,18 @@ bool AArch64InstrInfo::getMemOpBaseRegImmOfsWidth(
|
|||
// multiplied by the scaling factor. Unscaled instructions have scaling factor
|
||||
// set to 1.
|
||||
if (LdSt.getNumExplicitOperands() == 3) {
|
||||
BaseReg = LdSt.getOperand(1).getReg();
|
||||
BaseOp = &LdSt.getOperand(1);
|
||||
Offset = LdSt.getOperand(2).getImm() * Scale;
|
||||
} else {
|
||||
assert(LdSt.getNumExplicitOperands() == 4 && "invalid number of operands");
|
||||
BaseReg = LdSt.getOperand(2).getReg();
|
||||
BaseOp = &LdSt.getOperand(2);
|
||||
Offset = LdSt.getOperand(3).getImm() * Scale;
|
||||
}
|
||||
|
||||
assert(
|
||||
BaseOp->isReg() &&
|
||||
"getMemOperandWithOffset only supports base operands of type register.");
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -2322,13 +2331,19 @@ static bool canPairLdStOpc(unsigned FirstOpc, unsigned SecondOpc) {
|
|||
|
||||
/// Detect opportunities for ldp/stp formation.
|
||||
///
|
||||
/// Only called for LdSt for which getMemOpBaseRegImmOfs returns true.
|
||||
bool AArch64InstrInfo::shouldClusterMemOps(MachineInstr &FirstLdSt,
|
||||
unsigned BaseReg1,
|
||||
MachineInstr &SecondLdSt,
|
||||
unsigned BaseReg2,
|
||||
/// Only called for LdSt for which getMemOperandWithOffset returns true.
|
||||
bool AArch64InstrInfo::shouldClusterMemOps(MachineOperand &BaseOp1,
|
||||
MachineOperand &BaseOp2,
|
||||
unsigned NumLoads) const {
|
||||
if (BaseReg1 != BaseReg2)
|
||||
MachineInstr &FirstLdSt = *BaseOp1.getParent();
|
||||
MachineInstr &SecondLdSt = *BaseOp2.getParent();
|
||||
if (BaseOp1.getType() != BaseOp2.getType())
|
||||
return false;
|
||||
|
||||
assert(BaseOp1.isReg() && "Only base registers are supported.");
|
||||
|
||||
// Check for base regs.
|
||||
if (BaseOp1.isReg() && BaseOp1.getReg() != BaseOp2.getReg())
|
||||
return false;
|
||||
|
||||
// Only cluster up to a single pair.
|
||||
|
@ -5402,19 +5417,20 @@ AArch64InstrInfo::getOutliningType(MachineBasicBlock::iterator &MIT,
|
|||
// At this point, we have a stack instruction that we might need to fix
|
||||
// up. We'll handle it if it's a load or store.
|
||||
if (MI.mayLoadOrStore()) {
|
||||
unsigned Base; // Filled with the base regiser of MI.
|
||||
MachineOperand *Base; // Filled with the base operand of MI.
|
||||
int64_t Offset; // Filled with the offset of MI.
|
||||
unsigned DummyWidth;
|
||||
|
||||
// Does it allow us to offset the base register and is the base SP?
|
||||
if (!getMemOpBaseRegImmOfsWidth(MI, Base, Offset, DummyWidth, &RI) ||
|
||||
Base != AArch64::SP)
|
||||
// Does it allow us to offset the base operand and is the base the
|
||||
// register SP?
|
||||
if (!getMemOperandWithOffset(MI, Base, Offset, &RI) ||
|
||||
!Base->isReg() || Base->getReg() != AArch64::SP)
|
||||
return outliner::InstrType::Illegal;
|
||||
|
||||
// Find the minimum/maximum offset for this instruction and check if
|
||||
// fixing it up would be in range.
|
||||
int64_t MinOffset, MaxOffset; // Unscaled offsets for the instruction.
|
||||
unsigned Scale; // The scale to multiply the offsets by.
|
||||
unsigned DummyWidth;
|
||||
getMemOpInfo(MI.getOpcode(), Scale, DummyWidth, MinOffset, MaxOffset);
|
||||
|
||||
// TODO: We should really test what happens if an instruction overflows.
|
||||
|
@ -5439,13 +5455,14 @@ AArch64InstrInfo::getOutliningType(MachineBasicBlock::iterator &MIT,
|
|||
|
||||
void AArch64InstrInfo::fixupPostOutline(MachineBasicBlock &MBB) const {
|
||||
for (MachineInstr &MI : MBB) {
|
||||
unsigned Base, Width;
|
||||
MachineOperand *Base;
|
||||
unsigned Width;
|
||||
int64_t Offset;
|
||||
|
||||
// Is this a load or store with an immediate offset with SP as the base?
|
||||
if (!MI.mayLoadOrStore() ||
|
||||
!getMemOpBaseRegImmOfsWidth(MI, Base, Offset, Width, &RI) ||
|
||||
Base != AArch64::SP)
|
||||
!getMemOperandWithOffsetWidth(MI, Base, Offset, Width, &RI) ||
|
||||
(Base->isReg() && Base->getReg() != AArch64::SP))
|
||||
continue;
|
||||
|
||||
// It is, so we have to fix it up.
|
||||
|
|
|
@ -97,13 +97,13 @@ public:
|
|||
/// Hint that pairing the given load or store is unprofitable.
|
||||
static void suppressLdStPair(MachineInstr &MI);
|
||||
|
||||
bool getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg,
|
||||
int64_t &Offset,
|
||||
const TargetRegisterInfo *TRI) const override;
|
||||
bool getMemOperandWithOffset(MachineInstr &MI, MachineOperand *&BaseOp,
|
||||
int64_t &Offset,
|
||||
const TargetRegisterInfo *TRI) const override;
|
||||
|
||||
bool getMemOpBaseRegImmOfsWidth(MachineInstr &LdSt, unsigned &BaseReg,
|
||||
int64_t &Offset, unsigned &Width,
|
||||
const TargetRegisterInfo *TRI) const;
|
||||
bool getMemOperandWithOffsetWidth(MachineInstr &MI, MachineOperand *&BaseOp,
|
||||
int64_t &Offset, unsigned &Width,
|
||||
const TargetRegisterInfo *TRI) const;
|
||||
|
||||
/// Return the immediate offset of the base register in a load/store \p LdSt.
|
||||
MachineOperand &getMemOpBaseRegImmOfsOffsetOperand(MachineInstr &LdSt) const;
|
||||
|
@ -115,8 +115,7 @@ public:
|
|||
bool getMemOpInfo(unsigned Opcode, unsigned &Scale, unsigned &Width,
|
||||
int64_t &MinOffset, int64_t &MaxOffset) const;
|
||||
|
||||
bool shouldClusterMemOps(MachineInstr &FirstLdSt, unsigned BaseReg1,
|
||||
MachineInstr &SecondLdSt, unsigned BaseReg2,
|
||||
bool shouldClusterMemOps(MachineOperand &BaseOp1, MachineOperand &BaseOp2,
|
||||
unsigned NumLoads) const override;
|
||||
|
||||
void copyPhysRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
|
||||
|
|
|
@ -148,9 +148,11 @@ bool AArch64StorePairSuppress::runOnMachineFunction(MachineFunction &MF) {
|
|||
for (auto &MI : MBB) {
|
||||
if (!isNarrowFPStore(MI))
|
||||
continue;
|
||||
unsigned BaseReg;
|
||||
MachineOperand *BaseOp;
|
||||
int64_t Offset;
|
||||
if (TII->getMemOpBaseRegImmOfs(MI, BaseReg, Offset, TRI)) {
|
||||
if (TII->getMemOperandWithOffset(MI, BaseOp, Offset, TRI) &&
|
||||
BaseOp->isReg()) {
|
||||
unsigned BaseReg = BaseOp->getReg();
|
||||
if (PrevBaseReg == BaseReg) {
|
||||
// If this block can take STPs, skip ahead to the next block.
|
||||
if (!SuppressSTP && shouldAddSTPToBlock(MI.getParent()))
|
||||
|
|
|
@ -265,9 +265,10 @@ static bool isStride64(unsigned Opc) {
|
|||
}
|
||||
}
|
||||
|
||||
bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg,
|
||||
int64_t &Offset,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
bool SIInstrInfo::getMemOperandWithOffset(MachineInstr &LdSt,
|
||||
MachineOperand *&BaseOp,
|
||||
int64_t &Offset,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
unsigned Opc = LdSt.getOpcode();
|
||||
|
||||
if (isDS(LdSt)) {
|
||||
|
@ -275,11 +276,10 @@ bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg,
|
|||
getNamedOperand(LdSt, AMDGPU::OpName::offset);
|
||||
if (OffsetImm) {
|
||||
// Normal, single offset LDS instruction.
|
||||
const MachineOperand *AddrReg =
|
||||
getNamedOperand(LdSt, AMDGPU::OpName::addr);
|
||||
|
||||
BaseReg = AddrReg->getReg();
|
||||
BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr);
|
||||
Offset = OffsetImm->getImm();
|
||||
assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base "
|
||||
"operands of type register.");
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -310,10 +310,10 @@ bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg,
|
|||
if (isStride64(Opc))
|
||||
EltSize *= 64;
|
||||
|
||||
const MachineOperand *AddrReg =
|
||||
getNamedOperand(LdSt, AMDGPU::OpName::addr);
|
||||
BaseReg = AddrReg->getReg();
|
||||
BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr);
|
||||
Offset = EltSize * Offset0;
|
||||
assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base "
|
||||
"operands of type register.");
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -325,19 +325,20 @@ bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg,
|
|||
if (SOffset && SOffset->isReg())
|
||||
return false;
|
||||
|
||||
const MachineOperand *AddrReg =
|
||||
getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
|
||||
MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
|
||||
if (!AddrReg)
|
||||
return false;
|
||||
|
||||
const MachineOperand *OffsetImm =
|
||||
getNamedOperand(LdSt, AMDGPU::OpName::offset);
|
||||
BaseReg = AddrReg->getReg();
|
||||
BaseOp = AddrReg;
|
||||
Offset = OffsetImm->getImm();
|
||||
|
||||
if (SOffset) // soffset can be an inline immediate.
|
||||
Offset += SOffset->getImm();
|
||||
|
||||
assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base "
|
||||
"operands of type register.");
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -347,36 +348,46 @@ bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg,
|
|||
if (!OffsetImm)
|
||||
return false;
|
||||
|
||||
const MachineOperand *SBaseReg =
|
||||
getNamedOperand(LdSt, AMDGPU::OpName::sbase);
|
||||
BaseReg = SBaseReg->getReg();
|
||||
MachineOperand *SBaseReg = getNamedOperand(LdSt, AMDGPU::OpName::sbase);
|
||||
BaseOp = SBaseReg;
|
||||
Offset = OffsetImm->getImm();
|
||||
assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base "
|
||||
"operands of type register.");
|
||||
return true;
|
||||
}
|
||||
|
||||
if (isFLAT(LdSt)) {
|
||||
const MachineOperand *VAddr = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
|
||||
MachineOperand *VAddr = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
|
||||
if (VAddr) {
|
||||
// Can't analyze 2 offsets.
|
||||
if (getNamedOperand(LdSt, AMDGPU::OpName::saddr))
|
||||
return false;
|
||||
|
||||
BaseReg = VAddr->getReg();
|
||||
BaseOp = VAddr;
|
||||
} else {
|
||||
// scratch instructions have either vaddr or saddr.
|
||||
BaseReg = getNamedOperand(LdSt, AMDGPU::OpName::saddr)->getReg();
|
||||
BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::saddr);
|
||||
}
|
||||
|
||||
Offset = getNamedOperand(LdSt, AMDGPU::OpName::offset)->getImm();
|
||||
assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base "
|
||||
"operands of type register.");
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, unsigned BaseReg1,
|
||||
const MachineInstr &MI2, unsigned BaseReg2) {
|
||||
if (BaseReg1 == BaseReg2)
|
||||
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1,
|
||||
const MachineOperand &BaseOp1,
|
||||
const MachineInstr &MI2,
|
||||
const MachineOperand &BaseOp2) {
|
||||
// Support only base operands with base registers.
|
||||
// Note: this could be extended to support FI operands.
|
||||
if (!BaseOp1.isReg() || !BaseOp2.isReg())
|
||||
return false;
|
||||
|
||||
if (BaseOp1.isIdenticalTo(BaseOp2))
|
||||
return true;
|
||||
|
||||
if (!MI1.hasOneMemOperand() || !MI2.hasOneMemOperand())
|
||||
|
@ -402,12 +413,13 @@ static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, unsigned BaseReg1,
|
|||
return Base1 == Base2;
|
||||
}
|
||||
|
||||
bool SIInstrInfo::shouldClusterMemOps(MachineInstr &FirstLdSt,
|
||||
unsigned BaseReg1,
|
||||
MachineInstr &SecondLdSt,
|
||||
unsigned BaseReg2,
|
||||
bool SIInstrInfo::shouldClusterMemOps(MachineOperand &BaseOp1,
|
||||
MachineOperand &BaseOp2,
|
||||
unsigned NumLoads) const {
|
||||
if (!memOpsHaveSameBasePtr(FirstLdSt, BaseReg1, SecondLdSt, BaseReg2))
|
||||
MachineInstr &FirstLdSt = *BaseOp1.getParent();
|
||||
MachineInstr &SecondLdSt = *BaseOp2.getParent();
|
||||
|
||||
if (!memOpsHaveSameBasePtr(FirstLdSt, BaseOp1, SecondLdSt, BaseOp2))
|
||||
return false;
|
||||
|
||||
const MachineOperand *FirstDst = nullptr;
|
||||
|
@ -2162,11 +2174,13 @@ static bool offsetsDoNotOverlap(int WidthA, int OffsetA,
|
|||
|
||||
bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr &MIa,
|
||||
MachineInstr &MIb) const {
|
||||
unsigned BaseReg0, BaseReg1;
|
||||
MachineOperand *BaseOp0, *BaseOp1;
|
||||
int64_t Offset0, Offset1;
|
||||
|
||||
if (getMemOpBaseRegImmOfs(MIa, BaseReg0, Offset0, &RI) &&
|
||||
getMemOpBaseRegImmOfs(MIb, BaseReg1, Offset1, &RI)) {
|
||||
if (getMemOperandWithOffset(MIa, BaseOp0, Offset0, &RI) &&
|
||||
getMemOperandWithOffset(MIb, BaseOp1, Offset1, &RI)) {
|
||||
if (!BaseOp0->isIdenticalTo(*BaseOp1))
|
||||
return false;
|
||||
|
||||
if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) {
|
||||
// FIXME: Handle ds_read2 / ds_write2.
|
||||
|
@ -2174,8 +2188,7 @@ bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr &MIa,
|
|||
}
|
||||
unsigned Width0 = (*MIa.memoperands_begin())->getSize();
|
||||
unsigned Width1 = (*MIb.memoperands_begin())->getSize();
|
||||
if (BaseReg0 == BaseReg1 &&
|
||||
offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1)) {
|
||||
if (offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -164,12 +164,11 @@ public:
|
|||
int64_t &Offset1,
|
||||
int64_t &Offset2) const override;
|
||||
|
||||
bool getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg,
|
||||
int64_t &Offset,
|
||||
const TargetRegisterInfo *TRI) const final;
|
||||
bool getMemOperandWithOffset(MachineInstr &LdSt, MachineOperand *&BaseOp,
|
||||
int64_t &Offset,
|
||||
const TargetRegisterInfo *TRI) const final;
|
||||
|
||||
bool shouldClusterMemOps(MachineInstr &FirstLdSt, unsigned BaseReg1,
|
||||
MachineInstr &SecondLdSt, unsigned BaseReg2,
|
||||
bool shouldClusterMemOps(MachineOperand &BaseOp1, MachineOperand &BaseOp2,
|
||||
unsigned NumLoads) const override;
|
||||
|
||||
bool shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1, int64_t Offset0,
|
||||
|
|
|
@ -1955,12 +1955,12 @@ void SIScheduleDAGMI::schedule()
|
|||
|
||||
for (unsigned i = 0, e = (unsigned)SUnits.size(); i != e; ++i) {
|
||||
SUnit *SU = &SUnits[i];
|
||||
unsigned BaseLatReg;
|
||||
MachineOperand *BaseLatOp;
|
||||
int64_t OffLatReg;
|
||||
if (SITII->isLowLatencyInstruction(*SU->getInstr())) {
|
||||
IsLowLatencySU[i] = 1;
|
||||
if (SITII->getMemOpBaseRegImmOfs(*SU->getInstr(), BaseLatReg, OffLatReg,
|
||||
TRI))
|
||||
if (SITII->getMemOperandWithOffset(*SU->getInstr(), BaseLatOp, OffLatReg,
|
||||
TRI))
|
||||
LowLatencyOffset[i] = OffLatReg;
|
||||
} else if (SITII->isHighLatencyInstruction(*SU->getInstr()))
|
||||
IsHighLatencySU[i] = 1;
|
||||
|
|
|
@ -2894,14 +2894,15 @@ bool HexagonInstrInfo::addLatencyToSchedule(const MachineInstr &MI1,
|
|||
}
|
||||
|
||||
/// Get the base register and byte offset of a load/store instr.
|
||||
bool HexagonInstrInfo::getMemOpBaseRegImmOfs(MachineInstr &LdSt,
|
||||
unsigned &BaseReg, int64_t &Offset, const TargetRegisterInfo *TRI)
|
||||
const {
|
||||
bool HexagonInstrInfo::getMemOperandWithOffset(
|
||||
MachineInstr &LdSt, MachineOperand *&BaseOp, int64_t &Offset,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
unsigned AccessSize = 0;
|
||||
int OffsetVal = 0;
|
||||
BaseReg = getBaseAndOffset(LdSt, OffsetVal, AccessSize);
|
||||
Offset = OffsetVal;
|
||||
return BaseReg != 0;
|
||||
BaseOp = getBaseAndOffset(LdSt, Offset, AccessSize);
|
||||
assert(!BaseOp || BaseOp->isReg() &&
|
||||
"getMemOperandWithOffset only supports base "
|
||||
"operands of type register.");
|
||||
return BaseOp != nullptr;
|
||||
}
|
||||
|
||||
/// Can these instructions execute at the same time in a bundle.
|
||||
|
@ -3108,21 +3109,22 @@ unsigned HexagonInstrInfo::getAddrMode(const MachineInstr &MI) const {
|
|||
|
||||
// Returns the base register in a memory access (load/store). The offset is
|
||||
// returned in Offset and the access size is returned in AccessSize.
|
||||
// If the base register has a subregister or the offset field does not contain
|
||||
// an immediate value, return 0.
|
||||
unsigned HexagonInstrInfo::getBaseAndOffset(const MachineInstr &MI,
|
||||
int &Offset, unsigned &AccessSize) const {
|
||||
// If the base operand has a subregister or the offset field does not contain
|
||||
// an immediate value, return nullptr.
|
||||
MachineOperand *HexagonInstrInfo::getBaseAndOffset(const MachineInstr &MI,
|
||||
int64_t &Offset,
|
||||
unsigned &AccessSize) const {
|
||||
// Return if it is not a base+offset type instruction or a MemOp.
|
||||
if (getAddrMode(MI) != HexagonII::BaseImmOffset &&
|
||||
getAddrMode(MI) != HexagonII::BaseLongOffset &&
|
||||
!isMemOp(MI) && !isPostIncrement(MI))
|
||||
return 0;
|
||||
return nullptr;
|
||||
|
||||
AccessSize = getMemAccessSize(MI);
|
||||
|
||||
unsigned BasePos = 0, OffsetPos = 0;
|
||||
if (!getBaseAndOffsetPosition(MI, BasePos, OffsetPos))
|
||||
return 0;
|
||||
return nullptr;
|
||||
|
||||
// Post increment updates its EA after the mem access,
|
||||
// so we need to treat its offset as zero.
|
||||
|
@ -3131,14 +3133,14 @@ unsigned HexagonInstrInfo::getBaseAndOffset(const MachineInstr &MI,
|
|||
} else {
|
||||
const MachineOperand &OffsetOp = MI.getOperand(OffsetPos);
|
||||
if (!OffsetOp.isImm())
|
||||
return 0;
|
||||
return nullptr;
|
||||
Offset = OffsetOp.getImm();
|
||||
}
|
||||
|
||||
const MachineOperand &BaseOp = MI.getOperand(BasePos);
|
||||
if (BaseOp.getSubReg() != 0)
|
||||
return 0;
|
||||
return BaseOp.getReg();
|
||||
return nullptr;
|
||||
return &const_cast<MachineOperand&>(BaseOp);
|
||||
}
|
||||
|
||||
/// Return the position of the base and offset operands for this instruction.
|
||||
|
|
|
@ -216,9 +216,9 @@ public:
|
|||
bool expandPostRAPseudo(MachineInstr &MI) const override;
|
||||
|
||||
/// Get the base register and byte offset of a load/store instr.
|
||||
bool getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg,
|
||||
int64_t &Offset,
|
||||
const TargetRegisterInfo *TRI) const override;
|
||||
bool getMemOperandWithOffset(MachineInstr &LdSt, MachineOperand *&BaseOp,
|
||||
int64_t &Offset,
|
||||
const TargetRegisterInfo *TRI) const override;
|
||||
|
||||
/// Reverses the branch condition of the specified condition list,
|
||||
/// returning false on success and true if it cannot be reversed.
|
||||
|
@ -436,8 +436,8 @@ public:
|
|||
bool predOpcodeHasNot(ArrayRef<MachineOperand> Cond) const;
|
||||
|
||||
unsigned getAddrMode(const MachineInstr &MI) const;
|
||||
unsigned getBaseAndOffset(const MachineInstr &MI, int &Offset,
|
||||
unsigned &AccessSize) const;
|
||||
MachineOperand *getBaseAndOffset(const MachineInstr &MI, int64_t &Offset,
|
||||
unsigned &AccessSize) const;
|
||||
SmallVector<MachineInstr*,2> getBranchingInstrs(MachineBasicBlock& MBB) const;
|
||||
unsigned getCExtOpNum(const MachineInstr &MI) const;
|
||||
HexagonII::CompoundGroup
|
||||
|
|
|
@ -275,11 +275,11 @@ void HexagonSubtarget::BankConflictMutation::apply(ScheduleDAGInstrs *DAG) {
|
|||
if (!L0.mayLoad() || L0.mayStore() ||
|
||||
HII.getAddrMode(L0) != HexagonII::BaseImmOffset)
|
||||
continue;
|
||||
int Offset0;
|
||||
int64_t Offset0;
|
||||
unsigned Size0;
|
||||
unsigned Base0 = HII.getBaseAndOffset(L0, Offset0, Size0);
|
||||
MachineOperand *BaseOp0 = HII.getBaseAndOffset(L0, Offset0, Size0);
|
||||
// Is the access size is longer than the L1 cache line, skip the check.
|
||||
if (Base0 == 0 || Size0 >= 32)
|
||||
if (BaseOp0 == nullptr || !BaseOp0->isReg() || Size0 >= 32)
|
||||
continue;
|
||||
// Scan only up to 32 instructions ahead (to avoid n^2 complexity).
|
||||
for (unsigned j = i+1, m = std::min(i+32, e); j != m; ++j) {
|
||||
|
@ -288,10 +288,11 @@ void HexagonSubtarget::BankConflictMutation::apply(ScheduleDAGInstrs *DAG) {
|
|||
if (!L1.mayLoad() || L1.mayStore() ||
|
||||
HII.getAddrMode(L1) != HexagonII::BaseImmOffset)
|
||||
continue;
|
||||
int Offset1;
|
||||
int64_t Offset1;
|
||||
unsigned Size1;
|
||||
unsigned Base1 = HII.getBaseAndOffset(L1, Offset1, Size1);
|
||||
if (Base1 == 0 || Size1 >= 32 || Base0 != Base1)
|
||||
MachineOperand *BaseOp1 = HII.getBaseAndOffset(L1, Offset1, Size1);
|
||||
if (BaseOp1 == nullptr || !BaseOp1->isReg() || Size1 >= 32 ||
|
||||
BaseOp0->getReg() != BaseOp1->getReg())
|
||||
continue;
|
||||
// Check bits 3 and 4 of the offset: if they differ, a bank conflict
|
||||
// is unlikely.
|
||||
|
|
|
@ -101,12 +101,12 @@ bool LanaiInstrInfo::areMemAccessesTriviallyDisjoint(
|
|||
// the width doesn't overlap the offset of a higher memory access,
|
||||
// then the memory accesses are different.
|
||||
const TargetRegisterInfo *TRI = &getRegisterInfo();
|
||||
unsigned BaseRegA = 0, BaseRegB = 0;
|
||||
MachineOperand *BaseOpA = nullptr, *BaseOpB = nullptr;
|
||||
int64_t OffsetA = 0, OffsetB = 0;
|
||||
unsigned int WidthA = 0, WidthB = 0;
|
||||
if (getMemOpBaseRegImmOfsWidth(MIa, BaseRegA, OffsetA, WidthA, TRI) &&
|
||||
getMemOpBaseRegImmOfsWidth(MIb, BaseRegB, OffsetB, WidthB, TRI)) {
|
||||
if (BaseRegA == BaseRegB) {
|
||||
if (getMemOperandWithOffsetWidth(MIa, BaseOpA, OffsetA, WidthA, TRI) &&
|
||||
getMemOperandWithOffsetWidth(MIb, BaseOpB, OffsetB, WidthB, TRI)) {
|
||||
if (BaseOpA->isIdenticalTo(*BaseOpB)) {
|
||||
int LowOffset = std::min(OffsetA, OffsetB);
|
||||
int HighOffset = std::max(OffsetA, OffsetB);
|
||||
int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
|
||||
|
@ -755,9 +755,9 @@ unsigned LanaiInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
|
|||
return 0;
|
||||
}
|
||||
|
||||
bool LanaiInstrInfo::getMemOpBaseRegImmOfsWidth(
|
||||
MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset, unsigned &Width,
|
||||
const TargetRegisterInfo * /*TRI*/) const {
|
||||
bool LanaiInstrInfo::getMemOperandWithOffsetWidth(
|
||||
MachineInstr &LdSt, MachineOperand *&BaseOp, int64_t &Offset,
|
||||
unsigned &Width, const TargetRegisterInfo * /*TRI*/) const {
|
||||
// Handle only loads/stores with base register followed by immediate offset
|
||||
// and with add as ALU op.
|
||||
if (LdSt.getNumOperands() != 4)
|
||||
|
@ -787,14 +787,17 @@ bool LanaiInstrInfo::getMemOpBaseRegImmOfsWidth(
|
|||
break;
|
||||
}
|
||||
|
||||
BaseReg = LdSt.getOperand(1).getReg();
|
||||
BaseOp = &LdSt.getOperand(1);
|
||||
Offset = LdSt.getOperand(2).getImm();
|
||||
assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base "
|
||||
"operands of type register.");
|
||||
return true;
|
||||
}
|
||||
|
||||
bool LanaiInstrInfo::getMemOpBaseRegImmOfs(
|
||||
MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
bool LanaiInstrInfo::getMemOperandWithOffset(MachineInstr &LdSt,
|
||||
MachineOperand *&BaseOp,
|
||||
int64_t &Offset,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
switch (LdSt.getOpcode()) {
|
||||
default:
|
||||
return false;
|
||||
|
@ -808,6 +811,6 @@ bool LanaiInstrInfo::getMemOpBaseRegImmOfs(
|
|||
case Lanai::LDBs_RI:
|
||||
case Lanai::LDBz_RI:
|
||||
unsigned Width;
|
||||
return getMemOpBaseRegImmOfsWidth(LdSt, BaseReg, Offset, Width, TRI);
|
||||
return getMemOperandWithOffsetWidth(LdSt, BaseOp, Offset, Width, TRI);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -68,13 +68,13 @@ public:
|
|||
|
||||
bool expandPostRAPseudo(MachineInstr &MI) const override;
|
||||
|
||||
bool getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg,
|
||||
int64_t &Offset,
|
||||
const TargetRegisterInfo *TRI) const override;
|
||||
bool getMemOperandWithOffset(MachineInstr &LdSt, MachineOperand *&BaseOp,
|
||||
int64_t &Offset,
|
||||
const TargetRegisterInfo *TRI) const override;
|
||||
|
||||
bool getMemOpBaseRegImmOfsWidth(MachineInstr &LdSt, unsigned &BaseReg,
|
||||
int64_t &Offset, unsigned &Width,
|
||||
const TargetRegisterInfo *TRI) const;
|
||||
bool getMemOperandWithOffsetWidth(MachineInstr &LdSt, MachineOperand *&BaseOp,
|
||||
int64_t &Offset, unsigned &Width,
|
||||
const TargetRegisterInfo *TRI) const;
|
||||
|
||||
std::pair<unsigned, unsigned>
|
||||
decomposeMachineOperandsTargetFlags(unsigned TF) const override;
|
||||
|
|
|
@ -3257,9 +3257,9 @@ static unsigned getLoadStoreRegOpcode(unsigned Reg,
|
|||
}
|
||||
}
|
||||
|
||||
bool X86InstrInfo::getMemOpBaseRegImmOfs(MachineInstr &MemOp, unsigned &BaseReg,
|
||||
int64_t &Offset,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
bool X86InstrInfo::getMemOperandWithOffset(
|
||||
MachineInstr &MemOp, MachineOperand *&BaseOp, int64_t &Offset,
|
||||
const TargetRegisterInfo *TRI) const {
|
||||
const MCInstrDesc &Desc = MemOp.getDesc();
|
||||
int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags);
|
||||
if (MemRefBegin < 0)
|
||||
|
@ -3267,11 +3267,10 @@ bool X86InstrInfo::getMemOpBaseRegImmOfs(MachineInstr &MemOp, unsigned &BaseReg,
|
|||
|
||||
MemRefBegin += X86II::getOperandBias(Desc);
|
||||
|
||||
MachineOperand &BaseMO = MemOp.getOperand(MemRefBegin + X86::AddrBaseReg);
|
||||
if (!BaseMO.isReg()) // Can be an MO_FrameIndex
|
||||
BaseOp = &MemOp.getOperand(MemRefBegin + X86::AddrBaseReg);
|
||||
if (!BaseOp->isReg()) // Can be an MO_FrameIndex
|
||||
return false;
|
||||
|
||||
BaseReg = BaseMO.getReg();
|
||||
if (MemOp.getOperand(MemRefBegin + X86::AddrScaleAmt).getImm() != 1)
|
||||
return false;
|
||||
|
||||
|
@ -3287,6 +3286,8 @@ bool X86InstrInfo::getMemOpBaseRegImmOfs(MachineInstr &MemOp, unsigned &BaseReg,
|
|||
|
||||
Offset = DispMO.getImm();
|
||||
|
||||
assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base "
|
||||
"operands of type register.");
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -327,9 +327,9 @@ public:
|
|||
SmallVectorImpl<MachineOperand> &Cond,
|
||||
bool AllowModify) const override;
|
||||
|
||||
bool getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg,
|
||||
int64_t &Offset,
|
||||
const TargetRegisterInfo *TRI) const override;
|
||||
bool getMemOperandWithOffset(MachineInstr &LdSt, MachineOperand *&BaseOp,
|
||||
int64_t &Offset,
|
||||
const TargetRegisterInfo *TRI) const override;
|
||||
bool analyzeBranchPredicate(MachineBasicBlock &MBB,
|
||||
TargetInstrInfo::MachineBranchPredicate &MBP,
|
||||
bool AllowModify = false) const override;
|
||||
|
|
Loading…
Reference in New Issue