forked from OSchip/llvm-project
Make more use of MachineInstr::mayLoadOrStore.
This commit is contained in:
parent
bbcf1c3496
commit
c5c935ab66
|
@ -1647,9 +1647,9 @@ public:
|
|||
virtual bool
|
||||
areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
|
||||
const MachineInstr &MIb) const {
|
||||
assert((MIa.mayLoad() || MIa.mayStore()) &&
|
||||
assert(MIa.mayLoadOrStore() &&
|
||||
"MIa must load from or modify a memory location");
|
||||
assert((MIb.mayLoad() || MIb.mayStore()) &&
|
||||
assert(MIb.mayLoadOrStore() &&
|
||||
"MIb must load from or modify a memory location");
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -449,7 +449,7 @@ static unsigned EstimateRuntime(MachineBasicBlock::iterator I,
|
|||
continue;
|
||||
if (I->isCall())
|
||||
Time += 10;
|
||||
else if (I->mayLoad() || I->mayStore())
|
||||
else if (I->mayLoadOrStore())
|
||||
Time += 2;
|
||||
else
|
||||
++Time;
|
||||
|
@ -835,7 +835,7 @@ mergeOperations(MachineBasicBlock::iterator MBBIStartPos,
|
|||
assert(MBBICommon->isIdenticalTo(*MBBI) && "Expected matching MIIs!");
|
||||
|
||||
// Merge MMOs from memory operations in the common block.
|
||||
if (MBBICommon->mayLoad() || MBBICommon->mayStore())
|
||||
if (MBBICommon->mayLoadOrStore())
|
||||
MBBICommon->cloneMergedMemRefs(*MBB->getParent(), {&*MBBICommon, &*MBBI});
|
||||
// Drop undef flags if they aren't present in all merged instructions.
|
||||
for (unsigned I = 0, E = MBBICommon->getNumOperands(); I != E; ++I) {
|
||||
|
|
|
@ -372,7 +372,7 @@ ImplicitNullChecks::isSuitableMemoryOp(const MachineInstr &MI,
|
|||
|
||||
// We want the mem access to be issued at a sane offset from PointerReg,
|
||||
// so that if PointerReg is null then the access reliably page faults.
|
||||
if (!((MI.mayLoad() || MI.mayStore()) && !MI.isPredicable() &&
|
||||
if (!(MI.mayLoadOrStore() && !MI.isPredicable() &&
|
||||
-PageSize < Offset && Offset < PageSize))
|
||||
return SR_Unsuitable;
|
||||
|
||||
|
|
|
@ -1004,7 +1004,7 @@ void StackColoring::remapInstructions(DenseMap<int, int> &SlotRemap) {
|
|||
// zone are okay, despite the fact that we don't have a good way
|
||||
// for validating all of the usages of the calculation.
|
||||
#ifndef NDEBUG
|
||||
bool TouchesMemory = I.mayLoad() || I.mayStore();
|
||||
bool TouchesMemory = I.mayLoadOrStore();
|
||||
// If we *don't* protect the user from escaped allocas, don't bother
|
||||
// validating the instructions.
|
||||
if (!I.isDebugInstr() && TouchesMemory && ProtectFromEscapedAllocas) {
|
||||
|
|
|
@ -185,7 +185,7 @@ GCNHazardRecognizer::getHazardType(SUnit *SU, int Stalls) {
|
|||
if (SIInstrInfo::isMAI(*MI) && checkMAIHazards(MI) > 0)
|
||||
return NoopHazard;
|
||||
|
||||
if ((MI->mayLoad() || MI->mayStore()) && checkMAILdStHazards(MI) > 0)
|
||||
if (MI->mayLoadOrStore() && checkMAILdStHazards(MI) > 0)
|
||||
return NoopHazard;
|
||||
|
||||
if (MI->isInlineAsm() && checkInlineAsmHazards(MI) > 0)
|
||||
|
@ -296,7 +296,7 @@ unsigned GCNHazardRecognizer::PreEmitNoopsCommon(MachineInstr *MI) {
|
|||
if (SIInstrInfo::isMAI(*MI))
|
||||
return std::max(WaitStates, checkMAIHazards(MI));
|
||||
|
||||
if (MI->mayLoad() || MI->mayStore())
|
||||
if (MI->mayLoadOrStore())
|
||||
return std::max(WaitStates, checkMAILdStHazards(MI));
|
||||
|
||||
return WaitStates;
|
||||
|
|
|
@ -1211,7 +1211,7 @@ void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst,
|
|||
ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst);
|
||||
}
|
||||
} else if (TII->isFLAT(Inst)) {
|
||||
assert(Inst.mayLoad() || Inst.mayStore());
|
||||
assert(Inst.mayLoadOrStore());
|
||||
|
||||
if (TII->usesVM_CNT(Inst)) {
|
||||
if (!ST->hasVscnt())
|
||||
|
|
|
@ -2549,9 +2549,9 @@ bool SIInstrInfo::checkInstOffsetsDoNotOverlap(const MachineInstr &MIa,
|
|||
|
||||
bool SIInstrInfo::areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
|
||||
const MachineInstr &MIb) const {
|
||||
assert((MIa.mayLoad() || MIa.mayStore()) &&
|
||||
assert(MIa.mayLoadOrStore() &&
|
||||
"MIa must load from or modify a memory location");
|
||||
assert((MIb.mayLoad() || MIb.mayStore()) &&
|
||||
assert(MIb.mayLoadOrStore() &&
|
||||
"MIb must load from or modify a memory location");
|
||||
|
||||
if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects())
|
||||
|
|
|
@ -186,7 +186,7 @@ bool ARCOptAddrMode::noUseOfAddBeforeLoadOrStore(const MachineInstr *Add,
|
|||
}
|
||||
|
||||
MachineInstr *ARCOptAddrMode::tryToCombine(MachineInstr &Ldst) {
|
||||
assert((Ldst.mayLoad() || Ldst.mayStore()) && "LD/ST instruction expected");
|
||||
assert(Ldst.mayLoadOrStore() && "LD/ST instruction expected");
|
||||
|
||||
unsigned BasePos, OffsetPos;
|
||||
|
||||
|
|
|
@ -1639,7 +1639,7 @@ bool HCE::replaceInstrExact(const ExtDesc &ED, Register ExtR) {
|
|||
return true;
|
||||
}
|
||||
|
||||
if ((MI.mayLoad() || MI.mayStore()) && !isStoreImmediate(ExtOpc)) {
|
||||
if (MI.mayLoadOrStore() && !isStoreImmediate(ExtOpc)) {
|
||||
// For memory instructions, there is an asymmetry in the addressing
|
||||
// modes. Addressing modes allowing extenders can be replaced with
|
||||
// addressing modes that use registers, but the order of operands
|
||||
|
@ -1794,7 +1794,7 @@ bool HCE::replaceInstrExpr(const ExtDesc &ED, const ExtenderInit &ExtI,
|
|||
return true;
|
||||
}
|
||||
|
||||
if (MI.mayLoad() || MI.mayStore()) {
|
||||
if (MI.mayLoadOrStore()) {
|
||||
unsigned IdxOpc = getRegOffOpcode(ExtOpc);
|
||||
assert(IdxOpc && "Expecting indexed opcode");
|
||||
MachineInstrBuilder MIB = BuildMI(MBB, At, dl, HII->get(IdxOpc));
|
||||
|
@ -1844,7 +1844,7 @@ bool HCE::replaceInstr(unsigned Idx, Register ExtR, const ExtenderInit &ExtI) {
|
|||
// These two addressing modes must be converted into indexed forms
|
||||
// regardless of what the initializer looks like.
|
||||
bool IsAbs = false, IsAbsSet = false;
|
||||
if (MI.mayLoad() || MI.mayStore()) {
|
||||
if (MI.mayLoadOrStore()) {
|
||||
unsigned AM = HII->getAddrMode(MI);
|
||||
IsAbs = AM == HexagonII::Absolute;
|
||||
IsAbsSet = AM == HexagonII::AbsoluteSet;
|
||||
|
|
|
@ -682,7 +682,7 @@ bool HexagonEarlyIfConversion::isPredicableStore(const MachineInstr *MI)
|
|||
|
||||
bool HexagonEarlyIfConversion::isSafeToSpeculate(const MachineInstr *MI)
|
||||
const {
|
||||
if (MI->mayLoad() || MI->mayStore())
|
||||
if (MI->mayLoadOrStore())
|
||||
return false;
|
||||
if (MI->isCall() || MI->isBarrier() || MI->isBranch())
|
||||
return false;
|
||||
|
|
|
@ -1041,7 +1041,7 @@ bool HexagonExpandCondsets::predicate(MachineInstr &TfrI, bool Cond,
|
|||
bool CanDown = canMoveOver(*DefI, Defs, Uses);
|
||||
// The TfrI does not access memory, but DefI could. Check if it's safe
|
||||
// to move DefI down to TfrI.
|
||||
if (DefI->mayLoad() || DefI->mayStore())
|
||||
if (DefI->mayLoadOrStore())
|
||||
if (!canMoveMemTo(*DefI, TfrI, true))
|
||||
CanDown = false;
|
||||
|
||||
|
|
|
@ -2147,7 +2147,7 @@ bool HexagonInstrInfo::isDuplexPair(const MachineInstr &MIa,
|
|||
}
|
||||
|
||||
bool HexagonInstrInfo::isEarlySourceInstr(const MachineInstr &MI) const {
|
||||
if (MI.mayLoad() || MI.mayStore() || MI.isCompare())
|
||||
if (MI.mayLoadOrStore() || MI.isCompare())
|
||||
return true;
|
||||
|
||||
// Multiply
|
||||
|
|
|
@ -159,7 +159,7 @@ bool HexagonSplitDoubleRegs::isVolatileInstr(const MachineInstr *MI) const {
|
|||
}
|
||||
|
||||
bool HexagonSplitDoubleRegs::isFixedInstr(const MachineInstr *MI) const {
|
||||
if (MI->mayLoad() || MI->mayStore())
|
||||
if (MI->mayLoadOrStore())
|
||||
if (MemRefsFixed || isVolatileInstr(MI))
|
||||
return true;
|
||||
if (MI->isDebugInstr())
|
||||
|
|
|
@ -271,7 +271,7 @@ void HexagonStoreWidening::createStoreGroup(MachineInstr *BaseStore,
|
|||
if (MI->isCall() || MI->hasUnmodeledSideEffects())
|
||||
return;
|
||||
|
||||
if (MI->mayLoad() || MI->mayStore()) {
|
||||
if (MI->mayLoadOrStore()) {
|
||||
if (MI->hasOrderedMemoryRef() || instrAliased(Group, MI))
|
||||
return;
|
||||
Other.push_back(MI);
|
||||
|
|
Loading…
Reference in New Issue