2014-05-24 20:50:23 +08:00
|
|
|
//===--- AArch64StorePairSuppress.cpp --- Suppress store pair formation ---===//
|
2014-03-29 18:18:08 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2014-03-29 18:18:08 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This pass identifies floating point stores that should not be combined into
|
|
|
|
// store pairs. Later we may do the same for floating point loads.
|
|
|
|
// ===---------------------------------------------------------------------===//
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
#include "AArch64InstrInfo.h"
|
2014-03-29 18:18:08 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstr.h"
|
|
|
|
#include "llvm/CodeGen/MachineTraceMetrics.h"
|
2017-11-08 09:01:31 +08:00
|
|
|
#include "llvm/CodeGen/TargetInstrInfo.h"
|
2014-03-29 18:18:08 +08:00
|
|
|
#include "llvm/CodeGen/TargetSchedule.h"
|
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Support/raw_ostream.h"
|
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
#define DEBUG_TYPE "aarch64-stp-suppress"
|
2014-04-22 10:41:26 +08:00
|
|
|
|
2016-08-01 13:56:57 +08:00
|
|
|
#define STPSUPPRESS_PASS_NAME "AArch64 Store Pair Suppression"
|
|
|
|
|
2014-03-29 18:18:08 +08:00
|
|
|
namespace {
|
2014-05-24 20:50:23 +08:00
|
|
|
class AArch64StorePairSuppress : public MachineFunctionPass {
|
|
|
|
const AArch64InstrInfo *TII;
|
2014-03-29 18:18:08 +08:00
|
|
|
const TargetRegisterInfo *TRI;
|
|
|
|
const MachineRegisterInfo *MRI;
|
|
|
|
TargetSchedModel SchedModel;
|
|
|
|
MachineTraceMetrics *Traces;
|
|
|
|
MachineTraceMetrics::Ensemble *MinInstr;
|
|
|
|
|
|
|
|
public:
|
|
|
|
static char ID;
|
2016-08-01 13:56:57 +08:00
|
|
|
AArch64StorePairSuppress() : MachineFunctionPass(ID) {
|
|
|
|
initializeAArch64StorePairSuppressPass(*PassRegistry::getPassRegistry());
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
2016-10-01 10:56:57 +08:00
|
|
|
StringRef getPassName() const override { return STPSUPPRESS_PASS_NAME; }
|
2016-08-01 13:56:57 +08:00
|
|
|
|
2014-04-03 02:00:59 +08:00
|
|
|
bool runOnMachineFunction(MachineFunction &F) override;
|
2014-03-29 18:18:08 +08:00
|
|
|
|
|
|
|
private:
|
|
|
|
bool shouldAddSTPToBlock(const MachineBasicBlock *BB);
|
|
|
|
|
2014-04-03 02:00:59 +08:00
|
|
|
bool isNarrowFPStore(const MachineInstr &MI);
|
2014-03-29 18:18:08 +08:00
|
|
|
|
2014-08-31 00:48:34 +08:00
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
2014-03-29 18:18:08 +08:00
|
|
|
AU.setPreservesCFG();
|
|
|
|
AU.addRequired<MachineTraceMetrics>();
|
|
|
|
AU.addPreserved<MachineTraceMetrics>();
|
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
|
|
|
}
|
|
|
|
};
|
2014-05-24 20:50:23 +08:00
|
|
|
char AArch64StorePairSuppress::ID = 0;
|
2015-06-23 17:49:53 +08:00
|
|
|
} // anonymous
|
2014-03-29 18:18:08 +08:00
|
|
|
|
2016-08-01 13:56:57 +08:00
|
|
|
INITIALIZE_PASS(AArch64StorePairSuppress, "aarch64-stp-suppress",
|
|
|
|
STPSUPPRESS_PASS_NAME, false, false)
|
|
|
|
|
2014-05-24 20:50:23 +08:00
|
|
|
FunctionPass *llvm::createAArch64StorePairSuppressPass() {
|
|
|
|
return new AArch64StorePairSuppress();
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Return true if an STP can be added to this block without increasing the
|
|
|
|
/// critical resource height. STP is good to form in Ld/St limited blocks and
|
|
|
|
/// bad to form in float-point limited blocks. This is true independent of the
|
|
|
|
/// critical path. If the critical path is longer than the resource height, the
|
|
|
|
/// extra vector ops can limit physreg renaming. Otherwise, it could simply
|
|
|
|
/// oversaturate the vector units.
|
2014-05-24 20:50:23 +08:00
|
|
|
bool AArch64StorePairSuppress::shouldAddSTPToBlock(const MachineBasicBlock *BB) {
|
2014-03-29 18:18:08 +08:00
|
|
|
if (!MinInstr)
|
|
|
|
MinInstr = Traces->getEnsemble(MachineTraceMetrics::TS_MinInstrCount);
|
|
|
|
|
|
|
|
MachineTraceMetrics::Trace BBTrace = MinInstr->getTrace(BB);
|
|
|
|
unsigned ResLength = BBTrace.getResourceLength();
|
|
|
|
|
|
|
|
// Get the machine model's scheduling class for STPQi.
|
|
|
|
// Bypass TargetSchedule's SchedClass resolution since we only have an opcode.
|
2014-05-24 20:50:23 +08:00
|
|
|
unsigned SCIdx = TII->get(AArch64::STPDi).getSchedClass();
|
2014-03-29 18:18:08 +08:00
|
|
|
const MCSchedClassDesc *SCDesc =
|
|
|
|
SchedModel.getMCSchedModel()->getSchedClassDesc(SCIdx);
|
|
|
|
|
|
|
|
// If a subtarget does not define resources for STPQi, bail here.
|
|
|
|
if (SCDesc->isValid() && !SCDesc->isVariant()) {
|
2014-08-27 13:25:25 +08:00
|
|
|
unsigned ResLenWithSTP = BBTrace.getResourceLength(None, SCDesc);
|
2014-03-29 18:18:08 +08:00
|
|
|
if (ResLenWithSTP > ResLength) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << " Suppress STP in BB: " << BB->getNumber()
|
|
|
|
<< " resources " << ResLength << " -> " << ResLenWithSTP
|
|
|
|
<< "\n");
|
2014-03-29 18:18:08 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Return true if this is a floating-point store smaller than the V reg. On
|
|
|
|
/// cyclone, these require a vector shuffle before storing a pair.
|
|
|
|
/// Ideally we would call getMatchingPairOpcode() and have the machine model
|
|
|
|
/// tell us if it's profitable with no cpu knowledge here.
|
|
|
|
///
|
|
|
|
/// FIXME: We plan to develop a decent Target abstraction for simple loads and
|
2014-05-24 20:50:23 +08:00
|
|
|
/// stores. Until then use a nasty switch similar to AArch64LoadStoreOptimizer.
|
|
|
|
bool AArch64StorePairSuppress::isNarrowFPStore(const MachineInstr &MI) {
|
2014-04-03 02:00:59 +08:00
|
|
|
switch (MI.getOpcode()) {
|
2014-03-29 18:18:08 +08:00
|
|
|
default:
|
|
|
|
return false;
|
2014-05-24 20:50:23 +08:00
|
|
|
case AArch64::STRSui:
|
|
|
|
case AArch64::STRDui:
|
|
|
|
case AArch64::STURSi:
|
|
|
|
case AArch64::STURDi:
|
2014-03-29 18:18:08 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-27 15:54:36 +08:00
|
|
|
bool AArch64StorePairSuppress::runOnMachineFunction(MachineFunction &MF) {
|
2017-12-16 06:22:58 +08:00
|
|
|
if (skipFunction(MF.getFunction()))
|
2016-04-26 05:58:52 +08:00
|
|
|
return false;
|
|
|
|
|
2015-01-27 15:54:36 +08:00
|
|
|
const TargetSubtargetInfo &ST = MF.getSubtarget();
|
|
|
|
TII = static_cast<const AArch64InstrInfo *>(ST.getInstrInfo());
|
|
|
|
TRI = ST.getRegisterInfo();
|
|
|
|
MRI = &MF.getRegInfo();
|
2018-04-09 03:56:04 +08:00
|
|
|
SchedModel.init(&ST);
|
2014-03-29 18:18:08 +08:00
|
|
|
Traces = &getAnalysis<MachineTraceMetrics>();
|
2014-04-25 13:30:21 +08:00
|
|
|
MinInstr = nullptr;
|
2014-03-29 18:18:08 +08:00
|
|
|
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "*** " << getPassName() << ": " << MF.getName() << '\n');
|
2014-03-29 18:18:08 +08:00
|
|
|
|
|
|
|
if (!SchedModel.hasInstrSchedModel()) {
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << " Skipping pass: no machine model present.\n");
|
2014-03-29 18:18:08 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for a sequence of stores to the same base address. We don't need to
|
|
|
|
// precisely determine whether a store pair can be formed. But we do want to
|
|
|
|
// filter out most situations where we can't form store pairs to avoid
|
|
|
|
// computing trace metrics in those cases.
|
2015-01-27 15:54:36 +08:00
|
|
|
for (auto &MBB : MF) {
|
2014-03-29 18:18:08 +08:00
|
|
|
bool SuppressSTP = false;
|
|
|
|
unsigned PrevBaseReg = 0;
|
2014-04-04 07:43:26 +08:00
|
|
|
for (auto &MI : MBB) {
|
2014-04-03 02:00:59 +08:00
|
|
|
if (!isNarrowFPStore(MI))
|
2014-03-29 18:18:08 +08:00
|
|
|
continue;
|
2019-04-19 17:08:38 +08:00
|
|
|
const MachineOperand *BaseOp;
|
2016-03-10 00:00:35 +08:00
|
|
|
int64_t Offset;
|
Add OffsetIsScalable to getMemOperandWithOffset
Summary:
Making `Scale` a `TypeSize` in AArch64InstrInfo::getMemOpInfo,
has the effect that all places where this information is used
(notably, TargetInstrInfo::getMemOperandWithOffset) will need
to consider Scale - and derived, Offset - possibly being scalable.
This patch adds a new operand `bool &OffsetIsScalable` to
TargetInstrInfo::getMemOperandWithOffset and fixes up all
the places where this function is used, to consider the
offset possibly being scalable.
In most cases, this means bailing out because the algorithm does not
(or cannot) support scalable offsets in places where it does some
form of alias checking for example.
Reviewers: rovka, efriedma, kristof.beyls
Reviewed By: efriedma
Subscribers: wuzish, kerbowa, MatzeB, arsenm, nemanjai, jvesely, nhaehnle, hiraditya, kbarton, javed.absar, asb, rbar, johnrusso, simoncook, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, rogfer01, MartinMosbeck, brucehoult, the_o, PkmX, jocewei, jsji, Jim, lenary, s.egerton, pzheng, sameer.abuasal, apazos, luismarques, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D72758
2020-02-18 22:32:26 +08:00
|
|
|
bool OffsetIsScalable;
|
|
|
|
if (TII->getMemOperandWithOffset(MI, BaseOp, Offset, OffsetIsScalable,
|
|
|
|
TRI) &&
|
2018-11-28 20:00:20 +08:00
|
|
|
BaseOp->isReg()) {
|
[aarch64] Apply llvm-prefer-register-over-unsigned from clang-tidy to LLVM
Summary:
This clang-tidy check is looking for unsigned integer variables whose initializer
starts with an implicit cast from llvm::Register and changes the type of the
variable to llvm::Register (dropping the llvm:: where possible).
Manual fixups in:
AArch64InstrInfo.cpp - genFusedMultiply() now takes a Register* instead of unsigned*
AArch64LoadStoreOptimizer.cpp - Ternary operator was ambiguous between Register/MCRegister. Settled on Register
Depends on D65919
Reviewers: aemerson
Subscribers: jholewinski, MatzeB, qcolombet, dschuff, jyknight, dylanmckay, sdardis, nemanjai, jvesely, wdng, nhaehnle, sbc100, jgravelle-google, kristof.beyls, hiraditya, aheejin, kbarton, fedor.sergeev, javed.absar, asb, rbar, johnrusso, simoncook, apazos, sabuasal, niosHD, jrtc27, MaskRay, zzheng, edward-jones, atanasyan, rogfer01, MartinMosbeck, brucehoult, the_o, tpr, PkmX, jocewei, jsji, Petar.Avramovic, asbirlea, Jim, s.egerton, llvm-commits
Tags: #llvm
Differential Revision for full review was: https://reviews.llvm.org/D65962
llvm-svn: 368628
2019-08-13 06:40:53 +08:00
|
|
|
Register BaseReg = BaseOp->getReg();
|
2014-03-29 18:18:08 +08:00
|
|
|
if (PrevBaseReg == BaseReg) {
|
|
|
|
// If this block can take STPs, skip ahead to the next block.
|
2014-04-03 02:00:59 +08:00
|
|
|
if (!SuppressSTP && shouldAddSTPToBlock(MI.getParent()))
|
2014-03-29 18:18:08 +08:00
|
|
|
break;
|
|
|
|
// Otherwise, continue unpairing the stores in this block.
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Unpairing store " << MI << "\n");
|
2014-03-29 18:18:08 +08:00
|
|
|
SuppressSTP = true;
|
2016-06-30 08:01:54 +08:00
|
|
|
TII->suppressLdStPair(MI);
|
2014-03-29 18:18:08 +08:00
|
|
|
}
|
|
|
|
PrevBaseReg = BaseReg;
|
|
|
|
} else
|
|
|
|
PrevBaseReg = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// This pass just sets some internal MachineMemOperand flags. It can't really
|
|
|
|
// invalidate anything.
|
|
|
|
return false;
|
|
|
|
}
|