[MachineScheduler] Allow clustering mem ops with complex addresses

The generic BaseMemOpClusterMutation calls into TargetInstrInfo to
analyze the address of each load/store instruction, and again to decide
whether two instructions should be clustered. Previously this had to
represent each address as a single base operand plus a constant byte
offset. This patch extends it to support any number of base operands.

The old target hook getMemOperandWithOffset is now a convenience
function for callers that are only prepared to handle a single base
operand. It calls the new more general target hook
getMemOperandsWithOffset.

The only requirements for the base operands returned by
getMemOperandsWithOffset are:
- they can be sorted by MemOpInfo::Compare, such that clusterable ops
  get sorted next to each other, and
- shouldClusterMemOps knows what they mean.

One simple follow-on is to enable clustering of AMDGPU FLAT instructions
with both vaddr and saddr (base register + offset register). I've left
a FIXME in the code for this case.

Differential Revision: https://reviews.llvm.org/D71655
This commit is contained in:
Jay Foad 2020-01-06 11:22:51 +00:00
parent 70096ca111
commit e0f0d0e55c
13 changed files with 143 additions and 90 deletions

View File

@ -1238,15 +1238,21 @@ public:
} }
/// Get the base operand and byte offset of an instruction that reads/writes /// Get the base operand and byte offset of an instruction that reads/writes
/// memory. This is a convenience function for callers that are only prepared
/// to handle a single base operand.
bool getMemOperandWithOffset(const MachineInstr &MI,
const MachineOperand *&BaseOp, int64_t &Offset,
const TargetRegisterInfo *TRI) const;
/// Get the base operands and byte offset of an instruction that reads/writes
/// memory. /// memory.
/// It returns false if MI does not read/write memory. /// It returns false if MI does not read/write memory.
/// It returns false if no base operand and offset was found. /// It returns false if no base operands and offset was found.
/// It is not guaranteed to always recognize base operand and offsets in all /// It is not guaranteed to always recognize base operands and offsets in all
/// cases. /// cases.
virtual bool getMemOperandWithOffset(const MachineInstr &MI, virtual bool getMemOperandsWithOffset(
const MachineOperand *&BaseOp, const MachineInstr &MI, SmallVectorImpl<const MachineOperand *> &BaseOps,
int64_t &Offset, int64_t &Offset, const TargetRegisterInfo *TRI) const {
const TargetRegisterInfo *TRI) const {
return false; return false;
} }
@ -1270,8 +1276,8 @@ public:
/// or /// or
/// DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI)); /// DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
/// to TargetPassConfig::createMachineScheduler() to have an effect. /// to TargetPassConfig::createMachineScheduler() to have an effect.
virtual bool shouldClusterMemOps(const MachineOperand &BaseOp1, virtual bool shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1,
const MachineOperand &BaseOp2, ArrayRef<const MachineOperand *> BaseOps2,
unsigned NumLoads) const { unsigned NumLoads) const {
llvm_unreachable("target did not implement shouldClusterMemOps()"); llvm_unreachable("target did not implement shouldClusterMemOps()");
} }

View File

@ -1471,41 +1471,46 @@ namespace {
class BaseMemOpClusterMutation : public ScheduleDAGMutation { class BaseMemOpClusterMutation : public ScheduleDAGMutation {
struct MemOpInfo { struct MemOpInfo {
SUnit *SU; SUnit *SU;
const MachineOperand *BaseOp; SmallVector<const MachineOperand *, 4> BaseOps;
int64_t Offset; int64_t Offset;
MemOpInfo(SUnit *su, const MachineOperand *Op, int64_t ofs) MemOpInfo(SUnit *SU, ArrayRef<const MachineOperand *> BaseOps,
: SU(su), BaseOp(Op), Offset(ofs) {} int64_t Offset)
: SU(SU), BaseOps(BaseOps.begin(), BaseOps.end()), Offset(Offset) {}
bool operator<(const MemOpInfo &RHS) const { static bool Compare(const MachineOperand *const &A,
if (BaseOp->getType() != RHS.BaseOp->getType()) const MachineOperand *const &B) {
return BaseOp->getType() < RHS.BaseOp->getType(); if (A->getType() != B->getType())
return A->getType() < B->getType();
if (BaseOp->isReg()) if (A->isReg())
return std::make_tuple(BaseOp->getReg(), Offset, SU->NodeNum) < return A->getReg() < B->getReg();
std::make_tuple(RHS.BaseOp->getReg(), RHS.Offset, if (A->isFI()) {
RHS.SU->NodeNum); const MachineFunction &MF = *A->getParent()->getParent()->getParent();
if (BaseOp->isFI()) {
const MachineFunction &MF =
*BaseOp->getParent()->getParent()->getParent();
const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering(); const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering();
bool StackGrowsDown = TFI.getStackGrowthDirection() == bool StackGrowsDown = TFI.getStackGrowthDirection() ==
TargetFrameLowering::StackGrowsDown; TargetFrameLowering::StackGrowsDown;
// Can't use tuple comparison here since we might need to use a return StackGrowsDown ? A->getIndex() > B->getIndex()
// different order when the stack grows down. : A->getIndex() < B->getIndex();
if (BaseOp->getIndex() != RHS.BaseOp->getIndex())
return StackGrowsDown ? BaseOp->getIndex() > RHS.BaseOp->getIndex()
: BaseOp->getIndex() < RHS.BaseOp->getIndex();
if (Offset != RHS.Offset)
return Offset < RHS.Offset;
return SU->NodeNum < RHS.SU->NodeNum;
} }
llvm_unreachable("MemOpClusterMutation only supports register or frame " llvm_unreachable("MemOpClusterMutation only supports register or frame "
"index bases."); "index bases.");
} }
bool operator<(const MemOpInfo &RHS) const {
// FIXME: Don't compare everything twice. Maybe use C++20 three way
// comparison instead when it's available.
if (std::lexicographical_compare(BaseOps.begin(), BaseOps.end(),
RHS.BaseOps.begin(), RHS.BaseOps.end(),
Compare))
return true;
if (std::lexicographical_compare(RHS.BaseOps.begin(), RHS.BaseOps.end(),
BaseOps.begin(), BaseOps.end(), Compare))
return false;
if (Offset != RHS.Offset)
return Offset < RHS.Offset;
return SU->NodeNum < RHS.SU->NodeNum;
}
}; };
const TargetInstrInfo *TII; const TargetInstrInfo *TII;
@ -1560,10 +1565,14 @@ void BaseMemOpClusterMutation::clusterNeighboringMemOps(
ArrayRef<SUnit *> MemOps, ScheduleDAGInstrs *DAG) { ArrayRef<SUnit *> MemOps, ScheduleDAGInstrs *DAG) {
SmallVector<MemOpInfo, 32> MemOpRecords; SmallVector<MemOpInfo, 32> MemOpRecords;
for (SUnit *SU : MemOps) { for (SUnit *SU : MemOps) {
const MachineOperand *BaseOp; SmallVector<const MachineOperand *, 4> BaseOps;
int64_t Offset; int64_t Offset;
if (TII->getMemOperandWithOffset(*SU->getInstr(), BaseOp, Offset, TRI)) if (TII->getMemOperandsWithOffset(*SU->getInstr(), BaseOps, Offset, TRI))
MemOpRecords.push_back(MemOpInfo(SU, BaseOp, Offset)); MemOpRecords.push_back(MemOpInfo(SU, BaseOps, Offset));
#ifndef NDEBUG
for (auto *Op : BaseOps)
assert(Op);
#endif
} }
if (MemOpRecords.size() < 2) if (MemOpRecords.size() < 2)
return; return;
@ -1573,8 +1582,8 @@ void BaseMemOpClusterMutation::clusterNeighboringMemOps(
for (unsigned Idx = 0, End = MemOpRecords.size(); Idx < (End - 1); ++Idx) { for (unsigned Idx = 0, End = MemOpRecords.size(); Idx < (End - 1); ++Idx) {
SUnit *SUa = MemOpRecords[Idx].SU; SUnit *SUa = MemOpRecords[Idx].SU;
SUnit *SUb = MemOpRecords[Idx+1].SU; SUnit *SUb = MemOpRecords[Idx+1].SU;
if (TII->shouldClusterMemOps(*MemOpRecords[Idx].BaseOp, if (TII->shouldClusterMemOps(MemOpRecords[Idx].BaseOps,
*MemOpRecords[Idx + 1].BaseOp, MemOpRecords[Idx + 1].BaseOps,
ClusterLength)) { ClusterLength)) {
if (SUa->NodeNum > SUb->NodeNum) if (SUa->NodeNum > SUb->NodeNum)
std::swap(SUa, SUb); std::swap(SUa, SUb);

View File

@ -1028,6 +1028,18 @@ CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
return new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched"); return new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
} }
// Default implementation of getMemOperandWithOffset.
bool TargetInstrInfo::getMemOperandWithOffset(
const MachineInstr &MI, const MachineOperand *&BaseOp, int64_t &Offset,
const TargetRegisterInfo *TRI) const {
SmallVector<const MachineOperand *, 4> BaseOps;
if (!getMemOperandsWithOffset(MI, BaseOps, Offset, TRI) ||
BaseOps.size() != 1)
return false;
BaseOp = BaseOps.front();
return true;
}
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// SelectionDAG latency interface. // SelectionDAG latency interface.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//

View File

@ -1985,15 +1985,18 @@ bool AArch64InstrInfo::isCandidateToMergeOrPair(const MachineInstr &MI) const {
return true; return true;
} }
bool AArch64InstrInfo::getMemOperandWithOffset(const MachineInstr &LdSt, bool AArch64InstrInfo::getMemOperandsWithOffset(
const MachineOperand *&BaseOp, const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps,
int64_t &Offset, int64_t &Offset, const TargetRegisterInfo *TRI) const {
const TargetRegisterInfo *TRI) const {
if (!LdSt.mayLoadOrStore()) if (!LdSt.mayLoadOrStore())
return false; return false;
const MachineOperand *BaseOp;
unsigned Width; unsigned Width;
return getMemOperandWithOffsetWidth(LdSt, BaseOp, Offset, Width, TRI); if (!getMemOperandWithOffsetWidth(LdSt, BaseOp, Offset, Width, TRI))
return false;
BaseOps.push_back(BaseOp);
return true;
} }
bool AArch64InstrInfo::getMemOperandWithOffsetWidth( bool AArch64InstrInfo::getMemOperandWithOffsetWidth(
@ -2370,9 +2373,12 @@ static bool shouldClusterFI(const MachineFrameInfo &MFI, int FI1,
/// Detect opportunities for ldp/stp formation. /// Detect opportunities for ldp/stp formation.
/// ///
/// Only called for LdSt for which getMemOperandWithOffset returns true. /// Only called for LdSt for which getMemOperandWithOffset returns true.
bool AArch64InstrInfo::shouldClusterMemOps(const MachineOperand &BaseOp1, bool AArch64InstrInfo::shouldClusterMemOps(
const MachineOperand &BaseOp2, ArrayRef<const MachineOperand *> BaseOps1,
unsigned NumLoads) const { ArrayRef<const MachineOperand *> BaseOps2, unsigned NumLoads) const {
assert(BaseOps1.size() == 1 && BaseOps2.size() == 1);
const MachineOperand &BaseOp1 = *BaseOps1.front();
const MachineOperand &BaseOp2 = *BaseOps2.front();
const MachineInstr &FirstLdSt = *BaseOp1.getParent(); const MachineInstr &FirstLdSt = *BaseOp1.getParent();
const MachineInstr &SecondLdSt = *BaseOp2.getParent(); const MachineInstr &SecondLdSt = *BaseOp2.getParent();
if (BaseOp1.getType() != BaseOp2.getType()) if (BaseOp1.getType() != BaseOp2.getType())

View File

@ -112,10 +112,9 @@ public:
/// Hint that pairing the given load or store is unprofitable. /// Hint that pairing the given load or store is unprofitable.
static void suppressLdStPair(MachineInstr &MI); static void suppressLdStPair(MachineInstr &MI);
bool getMemOperandWithOffset(const MachineInstr &MI, bool getMemOperandsWithOffset(
const MachineOperand *&BaseOp, const MachineInstr &MI, SmallVectorImpl<const MachineOperand *> &BaseOps,
int64_t &Offset, int64_t &Offset, const TargetRegisterInfo *TRI) const override;
const TargetRegisterInfo *TRI) const override;
bool getMemOperandWithOffsetWidth(const MachineInstr &MI, bool getMemOperandWithOffsetWidth(const MachineInstr &MI,
const MachineOperand *&BaseOp, const MachineOperand *&BaseOp,
@ -132,8 +131,8 @@ public:
static bool getMemOpInfo(unsigned Opcode, unsigned &Scale, unsigned &Width, static bool getMemOpInfo(unsigned Opcode, unsigned &Scale, unsigned &Width,
int64_t &MinOffset, int64_t &MaxOffset); int64_t &MinOffset, int64_t &MaxOffset);
bool shouldClusterMemOps(const MachineOperand &BaseOp1, bool shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1,
const MachineOperand &BaseOp2, ArrayRef<const MachineOperand *> BaseOps2,
unsigned NumLoads) const override; unsigned NumLoads) const override;
void copyPhysRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, void copyPhysRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,

View File

@ -258,14 +258,14 @@ static bool isStride64(unsigned Opc) {
} }
} }
bool SIInstrInfo::getMemOperandWithOffset(const MachineInstr &LdSt, bool SIInstrInfo::getMemOperandsWithOffset(
const MachineOperand *&BaseOp, const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps,
int64_t &Offset, int64_t &Offset, const TargetRegisterInfo *TRI) const {
const TargetRegisterInfo *TRI) const {
if (!LdSt.mayLoadOrStore()) if (!LdSt.mayLoadOrStore())
return false; return false;
unsigned Opc = LdSt.getOpcode(); unsigned Opc = LdSt.getOpcode();
const MachineOperand *BaseOp;
if (isDS(LdSt)) { if (isDS(LdSt)) {
const MachineOperand *OffsetImm = const MachineOperand *OffsetImm =
@ -278,6 +278,7 @@ bool SIInstrInfo::getMemOperandWithOffset(const MachineInstr &LdSt,
if (!BaseOp || !BaseOp->isReg()) if (!BaseOp || !BaseOp->isReg())
return false; return false;
BaseOps.push_back(BaseOp);
Offset = OffsetImm->getImm(); Offset = OffsetImm->getImm();
return true; return true;
@ -314,6 +315,7 @@ bool SIInstrInfo::getMemOperandWithOffset(const MachineInstr &LdSt,
if (!BaseOp->isReg()) if (!BaseOp->isReg())
return false; return false;
BaseOps.push_back(BaseOp);
Offset = EltSize * Offset0; Offset = EltSize * Offset0;
return true; return true;
@ -339,7 +341,7 @@ bool SIInstrInfo::getMemOperandWithOffset(const MachineInstr &LdSt,
const MachineOperand *OffsetImm = const MachineOperand *OffsetImm =
getNamedOperand(LdSt, AMDGPU::OpName::offset); getNamedOperand(LdSt, AMDGPU::OpName::offset);
BaseOp = SOffset; BaseOps.push_back(SOffset);
Offset = OffsetImm->getImm(); Offset = OffsetImm->getImm();
return true; return true;
} }
@ -358,6 +360,7 @@ bool SIInstrInfo::getMemOperandWithOffset(const MachineInstr &LdSt,
if (!BaseOp->isReg()) if (!BaseOp->isReg())
return false; return false;
BaseOps.push_back(BaseOp);
return true; return true;
} }
@ -373,6 +376,7 @@ bool SIInstrInfo::getMemOperandWithOffset(const MachineInstr &LdSt,
if (!BaseOp->isReg()) if (!BaseOp->isReg())
return false; return false;
BaseOps.push_back(BaseOp);
return true; return true;
} }
@ -380,6 +384,7 @@ bool SIInstrInfo::getMemOperandWithOffset(const MachineInstr &LdSt,
const MachineOperand *VAddr = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); const MachineOperand *VAddr = getNamedOperand(LdSt, AMDGPU::OpName::vaddr);
if (VAddr) { if (VAddr) {
// Can't analyze 2 offsets. // Can't analyze 2 offsets.
// FIXME remove this restriction!
if (getNamedOperand(LdSt, AMDGPU::OpName::saddr)) if (getNamedOperand(LdSt, AMDGPU::OpName::saddr))
return false; return false;
@ -392,6 +397,7 @@ bool SIInstrInfo::getMemOperandWithOffset(const MachineInstr &LdSt,
Offset = getNamedOperand(LdSt, AMDGPU::OpName::offset)->getImm(); Offset = getNamedOperand(LdSt, AMDGPU::OpName::offset)->getImm();
if (!BaseOp->isReg()) if (!BaseOp->isReg())
return false; return false;
BaseOps.push_back(BaseOp);
return true; return true;
} }
@ -433,9 +439,12 @@ static bool memOpsHaveSameBasePtr(const MachineInstr &MI1,
return Base1 == Base2; return Base1 == Base2;
} }
bool SIInstrInfo::shouldClusterMemOps(const MachineOperand &BaseOp1, bool SIInstrInfo::shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1,
const MachineOperand &BaseOp2, ArrayRef<const MachineOperand *> BaseOps2,
unsigned NumLoads) const { unsigned NumLoads) const {
assert(BaseOps1.size() == 1 && BaseOps2.size() == 1);
const MachineOperand &BaseOp1 = *BaseOps1.front();
const MachineOperand &BaseOp2 = *BaseOps2.front();
const MachineInstr &FirstLdSt = *BaseOp1.getParent(); const MachineInstr &FirstLdSt = *BaseOp1.getParent();
const MachineInstr &SecondLdSt = *BaseOp2.getParent(); const MachineInstr &SecondLdSt = *BaseOp2.getParent();

View File

@ -181,13 +181,14 @@ public:
int64_t &Offset1, int64_t &Offset1,
int64_t &Offset2) const override; int64_t &Offset2) const override;
bool getMemOperandWithOffset(const MachineInstr &LdSt, bool
const MachineOperand *&BaseOp, getMemOperandsWithOffset(const MachineInstr &LdSt,
int64_t &Offset, SmallVectorImpl<const MachineOperand *> &BaseOps,
const TargetRegisterInfo *TRI) const final; int64_t &Offset,
const TargetRegisterInfo *TRI) const final;
bool shouldClusterMemOps(const MachineOperand &BaseOp1, bool shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1,
const MachineOperand &BaseOp2, ArrayRef<const MachineOperand *> BaseOps2,
unsigned NumLoads) const override; unsigned NumLoads) const override;
bool shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1, int64_t Offset0, bool shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1, int64_t Offset0,

View File

@ -2946,12 +2946,15 @@ bool HexagonInstrInfo::addLatencyToSchedule(const MachineInstr &MI1,
} }
/// Get the base register and byte offset of a load/store instr. /// Get the base register and byte offset of a load/store instr.
bool HexagonInstrInfo::getMemOperandWithOffset( bool HexagonInstrInfo::getMemOperandsWithOffset(
const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps,
const TargetRegisterInfo *TRI) const { int64_t &Offset, const TargetRegisterInfo *TRI) const {
unsigned AccessSize = 0; unsigned AccessSize = 0;
BaseOp = getBaseAndOffset(LdSt, Offset, AccessSize); const MachineOperand *BaseOp = getBaseAndOffset(LdSt, Offset, AccessSize);
return BaseOp != nullptr && BaseOp->isReg(); if (!BaseOp || !BaseOp->isReg())
return false;
BaseOps.push_back(BaseOp);
return true;
} }
/// Can these instructions execute at the same time in a bundle. /// Can these instructions execute at the same time in a bundle.

View File

@ -204,10 +204,11 @@ public:
bool expandPostRAPseudo(MachineInstr &MI) const override; bool expandPostRAPseudo(MachineInstr &MI) const override;
/// Get the base register and byte offset of a load/store instr. /// Get the base register and byte offset of a load/store instr.
bool getMemOperandWithOffset(const MachineInstr &LdSt, bool
const MachineOperand *&BaseOp, getMemOperandsWithOffset(const MachineInstr &LdSt,
int64_t &Offset, SmallVectorImpl<const MachineOperand *> &BaseOps,
const TargetRegisterInfo *TRI) const override; int64_t &Offset,
const TargetRegisterInfo *TRI) const override;
/// Reverses the branch condition of the specified condition list, /// Reverses the branch condition of the specified condition list,
/// returning false on success and true if it cannot be reversed. /// returning false on success and true if it cannot be reversed.

View File

@ -795,10 +795,9 @@ bool LanaiInstrInfo::getMemOperandWithOffsetWidth(
return true; return true;
} }
bool LanaiInstrInfo::getMemOperandWithOffset(const MachineInstr &LdSt, bool LanaiInstrInfo::getMemOperandsWithOffset(
const MachineOperand *&BaseOp, const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps,
int64_t &Offset, int64_t &Offset, const TargetRegisterInfo *TRI) const {
const TargetRegisterInfo *TRI) const {
switch (LdSt.getOpcode()) { switch (LdSt.getOpcode()) {
default: default:
return false; return false;
@ -811,7 +810,11 @@ bool LanaiInstrInfo::getMemOperandWithOffset(const MachineInstr &LdSt,
case Lanai::STH_RI: case Lanai::STH_RI:
case Lanai::LDBs_RI: case Lanai::LDBs_RI:
case Lanai::LDBz_RI: case Lanai::LDBz_RI:
const MachineOperand *BaseOp;
unsigned Width; unsigned Width;
return getMemOperandWithOffsetWidth(LdSt, BaseOp, Offset, Width, TRI); if (!getMemOperandWithOffsetWidth(LdSt, BaseOp, Offset, Width, TRI))
return false;
BaseOps.push_back(BaseOp);
return true;
} }
} }

View File

@ -67,10 +67,11 @@ public:
bool expandPostRAPseudo(MachineInstr &MI) const override; bool expandPostRAPseudo(MachineInstr &MI) const override;
bool getMemOperandWithOffset(const MachineInstr &LdSt, bool
const MachineOperand *&BaseOp, getMemOperandsWithOffset(const MachineInstr &LdSt,
int64_t &Offset, SmallVectorImpl<const MachineOperand *> &BaseOps,
const TargetRegisterInfo *TRI) const override; int64_t &Offset,
const TargetRegisterInfo *TRI) const override;
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt,
const MachineOperand *&BaseOp, const MachineOperand *&BaseOp,

View File

@ -3189,9 +3189,9 @@ static unsigned getLoadStoreRegOpcode(unsigned Reg,
} }
} }
bool X86InstrInfo::getMemOperandWithOffset( bool X86InstrInfo::getMemOperandsWithOffset(
const MachineInstr &MemOp, const MachineOperand *&BaseOp, int64_t &Offset, const MachineInstr &MemOp, SmallVectorImpl<const MachineOperand *> &BaseOps,
const TargetRegisterInfo *TRI) const { int64_t &Offset, const TargetRegisterInfo *TRI) const {
const MCInstrDesc &Desc = MemOp.getDesc(); const MCInstrDesc &Desc = MemOp.getDesc();
int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags); int MemRefBegin = X86II::getMemoryOperandNo(Desc.TSFlags);
if (MemRefBegin < 0) if (MemRefBegin < 0)
@ -3199,7 +3199,8 @@ bool X86InstrInfo::getMemOperandWithOffset(
MemRefBegin += X86II::getOperandBias(Desc); MemRefBegin += X86II::getOperandBias(Desc);
BaseOp = &MemOp.getOperand(MemRefBegin + X86::AddrBaseReg); const MachineOperand *BaseOp =
&MemOp.getOperand(MemRefBegin + X86::AddrBaseReg);
if (!BaseOp->isReg()) // Can be an MO_FrameIndex if (!BaseOp->isReg()) // Can be an MO_FrameIndex
return false; return false;
@ -3221,6 +3222,7 @@ bool X86InstrInfo::getMemOperandWithOffset(
if (!BaseOp->isReg()) if (!BaseOp->isReg())
return false; return false;
BaseOps.push_back(BaseOp);
return true; return true;
} }

View File

@ -291,10 +291,11 @@ public:
SmallVectorImpl<MachineOperand> &Cond, SmallVectorImpl<MachineOperand> &Cond,
bool AllowModify) const override; bool AllowModify) const override;
bool getMemOperandWithOffset(const MachineInstr &LdSt, bool
const MachineOperand *&BaseOp, getMemOperandsWithOffset(const MachineInstr &LdSt,
int64_t &Offset, SmallVectorImpl<const MachineOperand *> &BaseOps,
const TargetRegisterInfo *TRI) const override; int64_t &Offset,
const TargetRegisterInfo *TRI) const override;
bool analyzeBranchPredicate(MachineBasicBlock &MBB, bool analyzeBranchPredicate(MachineBasicBlock &MBB,
TargetInstrInfo::MachineBranchPredicate &MBP, TargetInstrInfo::MachineBranchPredicate &MBP,
bool AllowModify = false) const override; bool AllowModify = false) const override;