2019-06-25 18:45:51 +08:00
|
|
|
//===-- ARMLowOverheadLoops.cpp - CodeGen Low-overhead Loops ---*- C++ -*-===//
|
|
|
|
//
|
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
/// \file
|
|
|
|
/// Finalize v8.1-m low-overhead loops by converting the associated pseudo
|
|
|
|
/// instructions into machine operations.
|
|
|
|
/// The expectation is that the loop contains three pseudo instructions:
|
|
|
|
/// - t2*LoopStart - placed in the preheader or pre-preheader. The do-loop
|
|
|
|
/// form should be in the preheader, whereas the while form should be in the
|
2019-08-07 15:39:19 +08:00
|
|
|
/// preheaders only predecessor.
|
2019-06-25 18:45:51 +08:00
|
|
|
/// - t2LoopDec - placed within in the loop body.
|
|
|
|
/// - t2LoopEnd - the loop latch terminator.
|
|
|
|
///
|
2020-01-06 17:56:02 +08:00
|
|
|
/// In addition to this, we also look for the presence of the VCTP instruction,
|
|
|
|
/// which determines whether we can generated the tail-predicated low-overhead
|
|
|
|
/// loop form.
|
|
|
|
///
|
2020-01-09 20:52:50 +08:00
|
|
|
/// Assumptions and Dependencies:
|
|
|
|
/// Low-overhead loops are constructed and executed using a setup instruction:
|
|
|
|
/// DLS, WLS, DLSTP or WLSTP and an instruction that loops back: LE or LETP.
|
|
|
|
/// WLS(TP) and LE(TP) are branching instructions with a (large) limited range
|
|
|
|
/// but fixed polarity: WLS can only branch forwards and LE can only branch
|
|
|
|
/// backwards. These restrictions mean that this pass is dependent upon block
|
|
|
|
/// layout and block sizes, which is why it's the last pass to run. The same is
|
|
|
|
/// true for ConstantIslands, but this pass does not increase the size of the
|
|
|
|
/// basic blocks, nor does it change the CFG. Instructions are mainly removed
|
|
|
|
/// during the transform and pseudo instructions are replaced by real ones. In
|
|
|
|
/// some cases, when we have to revert to a 'normal' loop, we have to introduce
|
|
|
|
/// multiple instructions for a single pseudo (see RevertWhile and
|
|
|
|
/// RevertLoopEnd). To handle this situation, t2WhileLoopStart and t2LoopEnd
|
|
|
|
/// are defined to be as large as this maximum sequence of replacement
|
|
|
|
/// instructions.
|
|
|
|
///
|
2020-04-08 21:31:21 +08:00
|
|
|
/// A note on VPR.P0 (the lane mask):
|
|
|
|
/// VPT, VCMP, VPNOT and VCTP won't overwrite VPR.P0 when they update it in a
|
|
|
|
/// "VPT Active" context (which includes low-overhead loops and vpt blocks).
|
|
|
|
/// They will simply "and" the result of their calculation with the current
|
|
|
|
/// value of VPR.P0. You can think of it like this:
|
|
|
|
/// \verbatim
|
|
|
|
/// if VPT active: ; Between a DLSTP/LETP, or for predicated instrs
|
|
|
|
/// VPR.P0 &= Value
|
|
|
|
/// else
|
|
|
|
/// VPR.P0 = Value
|
|
|
|
/// \endverbatim
|
|
|
|
/// When we're inside the low-overhead loop (between DLSTP and LETP), we always
|
|
|
|
/// fall in the "VPT active" case, so we can consider that all VPR writes by
|
|
|
|
/// one of those instruction is actually a "and".
|
2019-06-25 18:45:51 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "ARM.h"
|
|
|
|
#include "ARMBaseInstrInfo.h"
|
|
|
|
#include "ARMBaseRegisterInfo.h"
|
|
|
|
#include "ARMBasicBlockInfo.h"
|
|
|
|
#include "ARMSubtarget.h"
|
2020-01-10 22:47:29 +08:00
|
|
|
#include "Thumb2InstrInfo.h"
|
2019-12-20 16:42:11 +08:00
|
|
|
#include "llvm/ADT/SetOperations.h"
|
2019-12-20 17:32:36 +08:00
|
|
|
#include "llvm/ADT/SmallSet.h"
|
2020-01-17 21:08:24 +08:00
|
|
|
#include "llvm/CodeGen/LivePhysRegs.h"
|
2019-06-25 18:45:51 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
|
|
|
#include "llvm/CodeGen/MachineLoopInfo.h"
|
[ARM][LowOverheadLoops] Remove dead loop update instructions.
After creating a low-overhead loop, the loop update instruction was still
lingering around hurting performance. This removes dead loop update
instructions, which in our case are mostly SUBS instructions.
To support this, some helper functions were added to MachineLoopUtils and
ReachingDefAnalysis to analyse live-ins of loop exit blocks and find uses
before a particular loop instruction, respectively.
This is a first version that removes a SUBS instruction when there are no other
uses inside and outside the loop block, but there are some more interesting
cases in test/CodeGen/Thumb2/LowOverheadLoops/mve-tail-data-types.ll which
shows that there is room for improvement. For example, we can't handle this
case yet:
..
dlstp.32 lr, r2
.LBB0_1:
mov r3, r2
subs r2, #4
vldrh.u32 q2, [r1], #8
vmov q1, q0
vmla.u32 q0, q2, r0
letp lr, .LBB0_1
@ %bb.2:
vctp.32 r3
..
which is a lot more tricky because r2 is not only used by the subs, but also by
the mov to r3, which is used outside the low-overhead loop by the vctp
instruction, and that requires a bit of a different approach, and I will follow
up on this.
Differential Revision: https://reviews.llvm.org/D71007
2019-12-11 18:11:48 +08:00
|
|
|
#include "llvm/CodeGen/MachineLoopUtils.h"
|
2019-06-25 18:45:51 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2019-11-26 18:03:25 +08:00
|
|
|
#include "llvm/CodeGen/Passes.h"
|
|
|
|
#include "llvm/CodeGen/ReachingDefAnalysis.h"
|
2019-11-19 01:07:56 +08:00
|
|
|
#include "llvm/MC/MCInstrDesc.h"
|
2019-06-25 18:45:51 +08:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
#define DEBUG_TYPE "arm-low-overhead-loops"
|
|
|
|
#define ARM_LOW_OVERHEAD_LOOPS_NAME "ARM Low Overhead Loops pass"
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
2020-03-24 16:41:48 +08:00
|
|
|
using InstSet = SmallPtrSetImpl<MachineInstr *>;
|
|
|
|
|
2020-01-16 23:42:41 +08:00
|
|
|
class PostOrderLoopTraversal {
|
|
|
|
MachineLoop &ML;
|
|
|
|
MachineLoopInfo &MLI;
|
|
|
|
SmallPtrSet<MachineBasicBlock*, 4> Visited;
|
|
|
|
SmallVector<MachineBasicBlock*, 4> Order;
|
|
|
|
|
|
|
|
public:
|
|
|
|
PostOrderLoopTraversal(MachineLoop &ML, MachineLoopInfo &MLI)
|
|
|
|
: ML(ML), MLI(MLI) { }
|
|
|
|
|
|
|
|
const SmallVectorImpl<MachineBasicBlock*> &getOrder() const {
|
|
|
|
return Order;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Visit all the blocks within the loop, as well as exit blocks and any
|
|
|
|
// blocks properly dominating the header.
|
|
|
|
void ProcessLoop() {
|
|
|
|
std::function<void(MachineBasicBlock*)> Search = [this, &Search]
|
|
|
|
(MachineBasicBlock *MBB) -> void {
|
|
|
|
if (Visited.count(MBB))
|
|
|
|
return;
|
|
|
|
|
|
|
|
Visited.insert(MBB);
|
|
|
|
for (auto *Succ : MBB->successors()) {
|
|
|
|
if (!ML.contains(Succ))
|
|
|
|
continue;
|
|
|
|
Search(Succ);
|
|
|
|
}
|
|
|
|
Order.push_back(MBB);
|
|
|
|
};
|
|
|
|
|
|
|
|
// Insert exit blocks.
|
|
|
|
SmallVector<MachineBasicBlock*, 2> ExitBlocks;
|
|
|
|
ML.getExitBlocks(ExitBlocks);
|
|
|
|
for (auto *MBB : ExitBlocks)
|
|
|
|
Order.push_back(MBB);
|
|
|
|
|
|
|
|
// Then add the loop body.
|
|
|
|
Search(ML.getHeader());
|
|
|
|
|
|
|
|
// Then try the preheader and its predecessors.
|
|
|
|
std::function<void(MachineBasicBlock*)> GetPredecessor =
|
|
|
|
[this, &GetPredecessor] (MachineBasicBlock *MBB) -> void {
|
|
|
|
Order.push_back(MBB);
|
|
|
|
if (MBB->pred_size() == 1)
|
|
|
|
GetPredecessor(*MBB->pred_begin());
|
|
|
|
};
|
|
|
|
|
|
|
|
if (auto *Preheader = ML.getLoopPreheader())
|
|
|
|
GetPredecessor(Preheader);
|
|
|
|
else if (auto *Preheader = MLI.findLoopPreheader(&ML, true))
|
|
|
|
GetPredecessor(Preheader);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-12-20 16:42:11 +08:00
|
|
|
struct PredicatedMI {
|
|
|
|
MachineInstr *MI = nullptr;
|
|
|
|
SetVector<MachineInstr*> Predicates;
|
|
|
|
|
|
|
|
public:
|
2020-04-08 21:31:21 +08:00
|
|
|
PredicatedMI(MachineInstr *I, SetVector<MachineInstr *> &Preds) : MI(I) {
|
|
|
|
assert(I && "Instruction must not be null!");
|
|
|
|
Predicates.insert(Preds.begin(), Preds.end());
|
|
|
|
}
|
2019-12-20 16:42:11 +08:00
|
|
|
};
|
|
|
|
|
2020-04-08 21:31:21 +08:00
|
|
|
// Represent a VPT block, a list of instructions that begins with a VPT/VPST
|
|
|
|
// and has a maximum of four proceeding instructions. All instructions within
|
|
|
|
// the block are predicated upon the vpr and we allow instructions to define
|
|
|
|
// the vpr within in the block too.
|
2019-12-20 16:42:11 +08:00
|
|
|
class VPTBlock {
|
2020-04-08 21:31:21 +08:00
|
|
|
// The predicate then instruction, which is either a VPT, or a VPST
|
|
|
|
// instruction.
|
|
|
|
std::unique_ptr<PredicatedMI> PredicateThen;
|
2019-12-20 16:42:11 +08:00
|
|
|
PredicatedMI *Divergent = nullptr;
|
|
|
|
SmallVector<PredicatedMI, 4> Insts;
|
|
|
|
|
|
|
|
public:
|
|
|
|
VPTBlock(MachineInstr *MI, SetVector<MachineInstr*> &Preds) {
|
2020-04-08 21:31:21 +08:00
|
|
|
PredicateThen = std::make_unique<PredicatedMI>(MI, Preds);
|
2019-12-20 16:42:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void addInst(MachineInstr *MI, SetVector<MachineInstr*> &Preds) {
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Adding predicated MI: " << *MI);
|
2020-04-08 21:31:21 +08:00
|
|
|
if (!Divergent && !set_difference(Preds, PredicateThen->Predicates).empty()) {
|
2019-12-20 16:42:11 +08:00
|
|
|
Divergent = &Insts.back();
|
|
|
|
LLVM_DEBUG(dbgs() << " - has divergent predicate: " << *Divergent->MI);
|
|
|
|
}
|
|
|
|
Insts.emplace_back(MI, Preds);
|
|
|
|
assert(Insts.size() <= 4 && "Too many instructions in VPT block!");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Have we found an instruction within the block which defines the vpr? If
|
|
|
|
// so, not all the instructions in the block will have the same predicate.
|
|
|
|
bool HasNonUniformPredicate() const {
|
|
|
|
return Divergent != nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Is the given instruction part of the predicate set controlling the entry
|
|
|
|
// to the block.
|
|
|
|
bool IsPredicatedOn(MachineInstr *MI) const {
|
2020-04-08 21:31:21 +08:00
|
|
|
return PredicateThen->Predicates.count(MI);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns true if this is a VPT instruction.
|
|
|
|
bool isVPT() const { return !isVPST(); }
|
|
|
|
|
|
|
|
// Returns true if this is a VPST instruction.
|
|
|
|
bool isVPST() const {
|
|
|
|
return PredicateThen->MI->getOpcode() == ARM::MVE_VPST;
|
2019-12-20 16:42:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Is the given instruction the only predicate which controls the entry to
|
|
|
|
// the block.
|
|
|
|
bool IsOnlyPredicatedOn(MachineInstr *MI) const {
|
2020-04-08 21:31:21 +08:00
|
|
|
return IsPredicatedOn(MI) && PredicateThen->Predicates.size() == 1;
|
2019-12-20 16:42:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned size() const { return Insts.size(); }
|
|
|
|
SmallVectorImpl<PredicatedMI> &getInsts() { return Insts; }
|
2020-04-08 21:31:21 +08:00
|
|
|
MachineInstr *getPredicateThen() const { return PredicateThen->MI; }
|
2019-12-20 16:42:11 +08:00
|
|
|
PredicatedMI *getDivergent() const { return Divergent; }
|
|
|
|
};
|
|
|
|
|
2020-07-01 15:27:12 +08:00
|
|
|
struct Reduction {
|
|
|
|
MachineInstr *Init;
|
|
|
|
MachineInstr &Copy;
|
|
|
|
MachineInstr &Reduce;
|
|
|
|
MachineInstr &VPSEL;
|
|
|
|
|
|
|
|
Reduction(MachineInstr *Init, MachineInstr *Mov, MachineInstr *Add,
|
|
|
|
MachineInstr *Sel)
|
|
|
|
: Init(Init), Copy(*Mov), Reduce(*Add), VPSEL(*Sel) { }
|
|
|
|
};
|
|
|
|
|
2019-11-19 01:07:56 +08:00
|
|
|
struct LowOverheadLoop {
|
|
|
|
|
2020-02-14 16:28:26 +08:00
|
|
|
MachineLoop &ML;
|
2020-07-01 15:27:12 +08:00
|
|
|
MachineBasicBlock *Preheader = nullptr;
|
2020-02-14 16:28:26 +08:00
|
|
|
MachineLoopInfo &MLI;
|
|
|
|
ReachingDefAnalysis &RDA;
|
2020-02-18 22:05:39 +08:00
|
|
|
const TargetRegisterInfo &TRI;
|
2020-07-01 15:27:12 +08:00
|
|
|
const ARMBaseInstrInfo &TII;
|
2019-11-19 01:07:56 +08:00
|
|
|
MachineFunction *MF = nullptr;
|
|
|
|
MachineInstr *InsertPt = nullptr;
|
|
|
|
MachineInstr *Start = nullptr;
|
|
|
|
MachineInstr *Dec = nullptr;
|
|
|
|
MachineInstr *End = nullptr;
|
|
|
|
MachineInstr *VCTP = nullptr;
|
2020-08-17 23:03:55 +08:00
|
|
|
MachineOperand TPNumElements;
|
2020-04-08 21:31:21 +08:00
|
|
|
SmallPtrSet<MachineInstr*, 4> SecondaryVCTPs;
|
2019-12-20 16:42:11 +08:00
|
|
|
VPTBlock *CurrentBlock = nullptr;
|
|
|
|
SetVector<MachineInstr*> CurrentPredicate;
|
|
|
|
SmallVector<VPTBlock, 4> VPTBlocks;
|
2020-01-17 21:08:24 +08:00
|
|
|
SmallPtrSet<MachineInstr*, 4> ToRemove;
|
2020-07-01 15:27:12 +08:00
|
|
|
SmallVector<std::unique_ptr<Reduction>, 1> Reductions;
|
2020-04-08 18:55:09 +08:00
|
|
|
SmallPtrSet<MachineInstr*, 4> BlockMasksToRecompute;
|
2019-11-19 01:07:56 +08:00
|
|
|
bool Revert = false;
|
|
|
|
bool CannotTailPredicate = false;
|
|
|
|
|
2020-02-14 16:28:26 +08:00
|
|
|
LowOverheadLoop(MachineLoop &ML, MachineLoopInfo &MLI,
|
2020-07-01 15:27:12 +08:00
|
|
|
ReachingDefAnalysis &RDA, const TargetRegisterInfo &TRI,
|
|
|
|
const ARMBaseInstrInfo &TII)
|
2020-08-17 23:03:55 +08:00
|
|
|
: ML(ML), MLI(MLI), RDA(RDA), TRI(TRI), TII(TII),
|
|
|
|
TPNumElements(MachineOperand::CreateImm(0)) {
|
2020-02-14 16:28:26 +08:00
|
|
|
MF = ML.getHeader()->getParent();
|
2020-07-01 15:27:12 +08:00
|
|
|
if (auto *MBB = ML.getLoopPreheader())
|
|
|
|
Preheader = MBB;
|
|
|
|
else if (auto *MBB = MLI.findLoopPreheader(&ML, true))
|
|
|
|
Preheader = MBB;
|
2019-11-19 01:07:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// If this is an MVE instruction, check that we know how to use tail
|
2020-01-14 20:02:32 +08:00
|
|
|
// predication with it. Record VPT blocks and return whether the
|
|
|
|
// instruction is valid for tail predication.
|
|
|
|
bool ValidateMVEInst(MachineInstr *MI);
|
2019-11-19 01:07:56 +08:00
|
|
|
|
2020-01-14 20:02:32 +08:00
|
|
|
void AnalyseMVEInst(MachineInstr *MI) {
|
|
|
|
CannotTailPredicate = !ValidateMVEInst(MI);
|
2019-11-19 01:07:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool IsTailPredicationLegal() const {
|
|
|
|
// For now, let's keep things really simple and only support a single
|
|
|
|
// block for tail predication.
|
2019-12-20 16:42:11 +08:00
|
|
|
return !Revert && FoundAllComponents() && VCTP &&
|
2020-02-14 16:28:26 +08:00
|
|
|
!CannotTailPredicate && ML.getNumBlocks() == 1;
|
2019-11-19 01:07:56 +08:00
|
|
|
}
|
|
|
|
|
2020-02-18 22:05:39 +08:00
|
|
|
// Check that the predication in the loop will be equivalent once we
|
|
|
|
// perform the conversion. Also ensure that we can provide the number
|
|
|
|
// of elements to the loop start instruction.
|
2020-01-24 18:17:43 +08:00
|
|
|
bool ValidateTailPredicate(MachineInstr *StartInsertPt);
|
2020-01-03 16:48:33 +08:00
|
|
|
|
2020-07-01 15:27:12 +08:00
|
|
|
// See whether the live-out instructions are a reduction that we can fixup
|
|
|
|
// later.
|
|
|
|
bool FindValidReduction(InstSet &LiveMIs, InstSet &LiveOutUsers);
|
|
|
|
|
2020-02-18 22:05:39 +08:00
|
|
|
// Check that any values available outside of the loop will be the same
|
|
|
|
// after tail predication conversion.
|
2020-07-01 15:27:12 +08:00
|
|
|
bool ValidateLiveOuts();
|
2020-02-18 22:05:39 +08:00
|
|
|
|
2019-11-19 01:07:56 +08:00
|
|
|
// Is it safe to define LR with DLS/WLS?
|
|
|
|
// LR can be defined if it is the operand to start, because it's the same
|
|
|
|
// value, or if it's going to be equivalent to the operand to Start.
|
2020-01-29 16:26:11 +08:00
|
|
|
MachineInstr *isSafeToDefineLR();
|
2019-11-19 01:07:56 +08:00
|
|
|
|
2019-11-26 18:03:25 +08:00
|
|
|
// Check the branch targets are within range and we satisfy our
|
|
|
|
// restrictions.
|
2020-01-24 18:17:43 +08:00
|
|
|
void CheckLegality(ARMBasicBlockUtils *BBUtils);
|
2019-11-19 01:07:56 +08:00
|
|
|
|
|
|
|
bool FoundAllComponents() const {
|
|
|
|
return Start && Dec && End;
|
|
|
|
}
|
|
|
|
|
2019-12-20 16:42:11 +08:00
|
|
|
SmallVectorImpl<VPTBlock> &getVPTBlocks() { return VPTBlocks; }
|
|
|
|
|
2020-08-17 23:03:55 +08:00
|
|
|
// Return the operand for the loop start instruction. This will be the loop
|
|
|
|
// iteration count, or the number of elements if we're tail predicating.
|
|
|
|
MachineOperand &getLoopStartOperand() {
|
|
|
|
return IsTailPredicationLegal() ? TPNumElements : Start->getOperand(0);
|
2019-11-26 18:25:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned getStartOpcode() const {
|
|
|
|
bool IsDo = Start->getOpcode() == ARM::t2DoLoopStart;
|
|
|
|
if (!IsTailPredicationLegal())
|
|
|
|
return IsDo ? ARM::t2DLS : ARM::t2WLS;
|
2019-12-16 17:11:47 +08:00
|
|
|
|
|
|
|
return VCTPOpcodeToLSTP(VCTP->getOpcode(), IsDo);
|
2019-11-26 18:25:04 +08:00
|
|
|
}
|
|
|
|
|
2019-11-19 01:07:56 +08:00
|
|
|
void dump() const {
|
|
|
|
if (Start) dbgs() << "ARM Loops: Found Loop Start: " << *Start;
|
|
|
|
if (Dec) dbgs() << "ARM Loops: Found Loop Dec: " << *Dec;
|
|
|
|
if (End) dbgs() << "ARM Loops: Found Loop End: " << *End;
|
|
|
|
if (VCTP) dbgs() << "ARM Loops: Found VCTP: " << *VCTP;
|
|
|
|
if (!FoundAllComponents())
|
|
|
|
dbgs() << "ARM Loops: Not a low-overhead loop.\n";
|
|
|
|
else if (!(Start && Dec && End))
|
|
|
|
dbgs() << "ARM Loops: Failed to find all loop components.\n";
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-06-25 18:45:51 +08:00
|
|
|
class ARMLowOverheadLoops : public MachineFunctionPass {
|
2019-09-17 20:19:32 +08:00
|
|
|
MachineFunction *MF = nullptr;
|
2019-11-26 18:25:04 +08:00
|
|
|
MachineLoopInfo *MLI = nullptr;
|
2019-11-26 18:03:25 +08:00
|
|
|
ReachingDefAnalysis *RDA = nullptr;
|
2019-06-25 18:45:51 +08:00
|
|
|
const ARMBaseInstrInfo *TII = nullptr;
|
|
|
|
MachineRegisterInfo *MRI = nullptr;
|
[ARM][LowOverheadLoops] Remove dead loop update instructions.
After creating a low-overhead loop, the loop update instruction was still
lingering around hurting performance. This removes dead loop update
instructions, which in our case are mostly SUBS instructions.
To support this, some helper functions were added to MachineLoopUtils and
ReachingDefAnalysis to analyse live-ins of loop exit blocks and find uses
before a particular loop instruction, respectively.
This is a first version that removes a SUBS instruction when there are no other
uses inside and outside the loop block, but there are some more interesting
cases in test/CodeGen/Thumb2/LowOverheadLoops/mve-tail-data-types.ll which
shows that there is room for improvement. For example, we can't handle this
case yet:
..
dlstp.32 lr, r2
.LBB0_1:
mov r3, r2
subs r2, #4
vldrh.u32 q2, [r1], #8
vmov q1, q0
vmla.u32 q0, q2, r0
letp lr, .LBB0_1
@ %bb.2:
vctp.32 r3
..
which is a lot more tricky because r2 is not only used by the subs, but also by
the mov to r3, which is used outside the low-overhead loop by the vctp
instruction, and that requires a bit of a different approach, and I will follow
up on this.
Differential Revision: https://reviews.llvm.org/D71007
2019-12-11 18:11:48 +08:00
|
|
|
const TargetRegisterInfo *TRI = nullptr;
|
2019-06-25 18:45:51 +08:00
|
|
|
std::unique_ptr<ARMBasicBlockUtils> BBUtils = nullptr;
|
|
|
|
|
|
|
|
public:
|
|
|
|
static char ID;
|
|
|
|
|
|
|
|
ARMLowOverheadLoops() : MachineFunctionPass(ID) { }
|
|
|
|
|
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
|
|
|
AU.setPreservesCFG();
|
|
|
|
AU.addRequired<MachineLoopInfo>();
|
2019-11-26 18:03:25 +08:00
|
|
|
AU.addRequired<ReachingDefAnalysis>();
|
2019-06-25 18:45:51 +08:00
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool runOnMachineFunction(MachineFunction &MF) override;
|
|
|
|
|
2019-09-17 20:19:32 +08:00
|
|
|
MachineFunctionProperties getRequiredProperties() const override {
|
|
|
|
return MachineFunctionProperties().set(
|
2019-11-26 18:03:25 +08:00
|
|
|
MachineFunctionProperties::Property::NoVRegs).set(
|
|
|
|
MachineFunctionProperties::Property::TracksLiveness);
|
2019-09-17 20:19:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
StringRef getPassName() const override {
|
|
|
|
return ARM_LOW_OVERHEAD_LOOPS_NAME;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2019-06-25 18:45:51 +08:00
|
|
|
bool ProcessLoop(MachineLoop *ML);
|
|
|
|
|
2019-09-17 20:19:32 +08:00
|
|
|
bool RevertNonLoops();
|
2019-07-22 22:16:40 +08:00
|
|
|
|
2019-07-10 20:29:43 +08:00
|
|
|
void RevertWhile(MachineInstr *MI) const;
|
|
|
|
|
2020-01-29 16:26:11 +08:00
|
|
|
bool RevertLoopDec(MachineInstr *MI) const;
|
2019-07-10 20:29:43 +08:00
|
|
|
|
2019-09-23 16:57:50 +08:00
|
|
|
void RevertLoopEnd(MachineInstr *MI, bool SkipCmp = false) const;
|
2019-07-10 20:29:43 +08:00
|
|
|
|
2019-12-20 16:42:11 +08:00
|
|
|
void ConvertVPTBlocks(LowOverheadLoop &LoLoop);
|
2019-11-19 01:07:56 +08:00
|
|
|
|
2020-07-01 15:27:12 +08:00
|
|
|
void FixupReductions(LowOverheadLoop &LoLoop) const;
|
|
|
|
|
2019-11-19 01:07:56 +08:00
|
|
|
MachineInstr *ExpandLoopStart(LowOverheadLoop &LoLoop);
|
|
|
|
|
|
|
|
void Expand(LowOverheadLoop &LoLoop);
|
2019-06-25 18:45:51 +08:00
|
|
|
|
2020-02-05 23:15:46 +08:00
|
|
|
void IterationCountDCE(LowOverheadLoop &LoLoop);
|
2019-06-25 18:45:51 +08:00
|
|
|
};
|
|
|
|
}
|
2019-07-24 21:30:36 +08:00
|
|
|
|
2019-06-25 18:45:51 +08:00
|
|
|
char ARMLowOverheadLoops::ID = 0;
|
|
|
|
|
|
|
|
INITIALIZE_PASS(ARMLowOverheadLoops, DEBUG_TYPE, ARM_LOW_OVERHEAD_LOOPS_NAME,
|
|
|
|
false, false)
|
|
|
|
|
2020-01-29 16:26:11 +08:00
|
|
|
MachineInstr *LowOverheadLoop::isSafeToDefineLR() {
|
2019-11-26 18:03:25 +08:00
|
|
|
// We can define LR because LR already contains the same value.
|
|
|
|
if (Start->getOperand(0).getReg() == ARM::LR)
|
|
|
|
return Start;
|
2019-09-17 20:19:32 +08:00
|
|
|
|
2019-11-26 18:03:25 +08:00
|
|
|
unsigned CountReg = Start->getOperand(0).getReg();
|
|
|
|
auto IsMoveLR = [&CountReg](MachineInstr *MI) {
|
2019-09-17 20:19:32 +08:00
|
|
|
return MI->getOpcode() == ARM::tMOVr &&
|
|
|
|
MI->getOperand(0).getReg() == ARM::LR &&
|
2019-11-26 18:03:25 +08:00
|
|
|
MI->getOperand(1).getReg() == CountReg &&
|
2019-09-17 20:19:32 +08:00
|
|
|
MI->getOperand(2).getImm() == ARMCC::AL;
|
|
|
|
};
|
|
|
|
|
|
|
|
MachineBasicBlock *MBB = Start->getParent();
|
|
|
|
|
2019-11-26 18:03:25 +08:00
|
|
|
// Find an insertion point:
|
|
|
|
// - Is there a (mov lr, Count) before Start? If so, and nothing else writes
|
|
|
|
// to Count before Start, we can insert at that mov.
|
2020-02-26 19:14:54 +08:00
|
|
|
if (auto *LRDef = RDA.getUniqueReachingMIDef(Start, ARM::LR))
|
2020-02-14 16:28:26 +08:00
|
|
|
if (IsMoveLR(LRDef) && RDA.hasSameReachingDef(Start, LRDef, CountReg))
|
2019-12-20 17:32:36 +08:00
|
|
|
return LRDef;
|
|
|
|
|
2019-11-26 18:03:25 +08:00
|
|
|
// - Is there a (mov lr, Count) after Start? If so, and nothing else writes
|
|
|
|
// to Count after Start, we can insert at that mov.
|
2020-02-14 16:28:26 +08:00
|
|
|
if (auto *LRDef = RDA.getLocalLiveOutMIDef(MBB, ARM::LR))
|
|
|
|
if (IsMoveLR(LRDef) && RDA.hasSameReachingDef(Start, LRDef, CountReg))
|
2019-11-26 18:03:25 +08:00
|
|
|
return LRDef;
|
2019-09-17 20:19:32 +08:00
|
|
|
|
|
|
|
// We've found no suitable LR def and Start doesn't use LR directly. Can we
|
2019-11-26 18:03:25 +08:00
|
|
|
// just define LR anyway?
|
2020-02-14 16:28:26 +08:00
|
|
|
return RDA.isSafeToDefRegAt(Start, ARM::LR) ? Start : nullptr;
|
2020-01-17 21:08:24 +08:00
|
|
|
}
|
|
|
|
|
2020-01-24 18:17:43 +08:00
|
|
|
bool LowOverheadLoop::ValidateTailPredicate(MachineInstr *StartInsertPt) {
|
2020-01-10 22:11:52 +08:00
|
|
|
assert(VCTP && "VCTP instruction expected but is not set");
|
2019-12-20 16:42:11 +08:00
|
|
|
// All predication within the loop should be based on vctp. If the block
|
|
|
|
// isn't predicated on entry, check whether the vctp is within the block
|
|
|
|
// and that all other instructions are then predicated on it.
|
|
|
|
for (auto &Block : VPTBlocks) {
|
|
|
|
if (Block.IsPredicatedOn(VCTP))
|
|
|
|
continue;
|
2020-04-08 21:31:21 +08:00
|
|
|
if (Block.HasNonUniformPredicate() && !isVCTP(Block.getDivergent()->MI)) {
|
2020-01-10 22:47:29 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Found unsupported diverging predicate: "
|
2020-04-08 21:31:21 +08:00
|
|
|
<< *Block.getDivergent()->MI);
|
2020-01-03 16:48:33 +08:00
|
|
|
return false;
|
2020-01-10 22:47:29 +08:00
|
|
|
}
|
2019-12-20 16:42:11 +08:00
|
|
|
SmallVectorImpl<PredicatedMI> &Insts = Block.getInsts();
|
|
|
|
for (auto &PredMI : Insts) {
|
2020-04-08 21:31:21 +08:00
|
|
|
// Check the instructions in the block and only allow:
|
|
|
|
// - VCTPs
|
|
|
|
// - Instructions predicated on the main VCTP
|
|
|
|
// - Any VCMP
|
|
|
|
// - VCMPs just "and" their result with VPR.P0. Whether they are
|
|
|
|
// located before/after the VCTP is irrelevant - the end result will
|
|
|
|
// be the same in both cases, so there's no point in requiring them
|
|
|
|
// to be located after the VCTP!
|
|
|
|
if (PredMI.Predicates.count(VCTP) || isVCTP(PredMI.MI) ||
|
|
|
|
VCMPOpcodeToVPT(PredMI.MI->getOpcode()) != 0)
|
2019-12-20 16:42:11 +08:00
|
|
|
continue;
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Can't convert: " << *PredMI.MI
|
2020-01-17 21:08:24 +08:00
|
|
|
<< " - which is predicated on:\n";
|
|
|
|
for (auto *MI : PredMI.Predicates)
|
|
|
|
dbgs() << " - " << *MI);
|
2020-01-03 16:48:33 +08:00
|
|
|
return false;
|
2019-12-20 16:42:11 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-18 22:05:39 +08:00
|
|
|
if (!ValidateLiveOuts())
|
|
|
|
return false;
|
|
|
|
|
2019-11-26 18:25:04 +08:00
|
|
|
// For tail predication, we need to provide the number of elements, instead
|
|
|
|
// of the iteration count, to the loop start instruction. The number of
|
|
|
|
// elements is provided to the vctp instruction, so we need to check that
|
|
|
|
// we can use this register at InsertPt.
|
2020-08-17 23:03:55 +08:00
|
|
|
TPNumElements = VCTP->getOperand(1);
|
|
|
|
Register NumElements = TPNumElements.getReg();
|
2019-11-26 18:25:04 +08:00
|
|
|
|
|
|
|
// If the register is defined within loop, then we can't perform TP.
|
|
|
|
// TODO: Check whether this is just a mov of a register that would be
|
|
|
|
// available.
|
2020-02-14 16:28:26 +08:00
|
|
|
if (RDA.hasLocalDefBefore(VCTP, NumElements)) {
|
2020-01-03 16:48:33 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: VCTP operand is defined in the loop.\n");
|
|
|
|
return false;
|
2019-11-26 18:25:04 +08:00
|
|
|
}
|
|
|
|
|
2019-12-20 17:32:36 +08:00
|
|
|
// The element count register maybe defined after InsertPt, in which case we
|
|
|
|
// need to try to move either InsertPt or the def so that the [w|d]lstp can
|
|
|
|
// use the value.
|
2020-01-17 21:08:24 +08:00
|
|
|
MachineBasicBlock *InsertBB = StartInsertPt->getParent();
|
2020-08-17 23:03:55 +08:00
|
|
|
|
2020-02-14 16:28:26 +08:00
|
|
|
if (!RDA.isReachingDefLiveOut(StartInsertPt, NumElements)) {
|
|
|
|
if (auto *ElemDef = RDA.getLocalLiveOutMIDef(InsertBB, NumElements)) {
|
|
|
|
if (RDA.isSafeToMoveForwards(ElemDef, StartInsertPt)) {
|
2019-12-20 17:32:36 +08:00
|
|
|
ElemDef->removeFromParent();
|
2020-01-17 21:08:24 +08:00
|
|
|
InsertBB->insert(MachineBasicBlock::iterator(StartInsertPt), ElemDef);
|
2019-12-20 17:32:36 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Moved element count def: "
|
|
|
|
<< *ElemDef);
|
2020-02-14 16:28:26 +08:00
|
|
|
} else if (RDA.isSafeToMoveBackwards(StartInsertPt, ElemDef)) {
|
2020-01-17 21:08:24 +08:00
|
|
|
StartInsertPt->removeFromParent();
|
|
|
|
InsertBB->insertAfter(MachineBasicBlock::iterator(ElemDef),
|
|
|
|
StartInsertPt);
|
2019-12-20 17:32:36 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Moved start past: " << *ElemDef);
|
2020-01-10 22:47:29 +08:00
|
|
|
} else {
|
2020-08-17 23:03:55 +08:00
|
|
|
// If we fail to move an instruction and the element count is provided
|
|
|
|
// by a mov, use the mov operand if it will have the same value at the
|
|
|
|
// insertion point
|
|
|
|
MachineOperand Operand = ElemDef->getOperand(1);
|
|
|
|
if (isMovRegOpcode(ElemDef->getOpcode()) &&
|
|
|
|
RDA.getUniqueReachingMIDef(ElemDef, Operand.getReg()) ==
|
|
|
|
RDA.getUniqueReachingMIDef(StartInsertPt, Operand.getReg())) {
|
|
|
|
TPNumElements = Operand;
|
|
|
|
NumElements = TPNumElements.getReg();
|
|
|
|
} else {
|
|
|
|
LLVM_DEBUG(dbgs()
|
|
|
|
<< "ARM Loops: Unable to move element count to loop "
|
|
|
|
<< "start instruction.\n");
|
|
|
|
return false;
|
|
|
|
}
|
2020-01-10 22:47:29 +08:00
|
|
|
}
|
2019-12-20 17:32:36 +08:00
|
|
|
}
|
2019-11-26 18:25:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Especially in the case of while loops, InsertBB may not be the
|
|
|
|
// preheader, so we need to check that the register isn't redefined
|
|
|
|
// before entering the loop.
|
2020-01-24 18:17:43 +08:00
|
|
|
auto CannotProvideElements = [this](MachineBasicBlock *MBB,
|
2019-11-26 18:25:04 +08:00
|
|
|
Register NumElements) {
|
|
|
|
// NumElements is redefined in this block.
|
2020-02-14 16:28:26 +08:00
|
|
|
if (RDA.hasLocalDefBefore(&MBB->back(), NumElements))
|
2019-11-26 18:25:04 +08:00
|
|
|
return true;
|
|
|
|
|
|
|
|
// Don't continue searching up through multiple predecessors.
|
|
|
|
if (MBB->pred_size() > 1)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
};
|
|
|
|
|
|
|
|
// First, find the block that looks like the preheader.
|
2020-07-01 15:27:12 +08:00
|
|
|
MachineBasicBlock *MBB = Preheader;
|
2020-01-10 22:47:29 +08:00
|
|
|
if (!MBB) {
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Didn't find preheader.\n");
|
2020-01-03 16:48:33 +08:00
|
|
|
return false;
|
2020-01-10 22:47:29 +08:00
|
|
|
}
|
2019-11-26 18:25:04 +08:00
|
|
|
|
|
|
|
// Then search backwards for a def, until we get to InsertBB.
|
|
|
|
while (MBB != InsertBB) {
|
2020-01-10 22:47:29 +08:00
|
|
|
if (CannotProvideElements(MBB, NumElements)) {
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Unable to provide element count.\n");
|
2020-01-03 16:48:33 +08:00
|
|
|
return false;
|
2020-01-10 22:47:29 +08:00
|
|
|
}
|
2019-11-26 18:25:04 +08:00
|
|
|
MBB = *MBB->pred_begin();
|
|
|
|
}
|
|
|
|
|
2020-01-17 21:08:24 +08:00
|
|
|
// Check that the value change of the element count is what we expect and
|
|
|
|
// that the predication will be equivalent. For this we need:
|
|
|
|
// NumElements = NumElements - VectorWidth. The sub will be a sub immediate
|
|
|
|
// and we can also allow register copies within the chain too.
|
2020-04-22 20:30:22 +08:00
|
|
|
auto IsValidSub = [](MachineInstr *MI, int ExpectedVecWidth) {
|
|
|
|
return -getAddSubImmediate(*MI) == ExpectedVecWidth;
|
2020-01-17 21:08:24 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
MBB = VCTP->getParent();
|
2020-02-26 19:14:54 +08:00
|
|
|
if (auto *Def = RDA.getUniqueReachingMIDef(&MBB->back(), NumElements)) {
|
2020-01-17 21:08:24 +08:00
|
|
|
SmallPtrSet<MachineInstr*, 2> ElementChain;
|
|
|
|
SmallPtrSet<MachineInstr*, 2> Ignore = { VCTP };
|
|
|
|
unsigned ExpectedVectorWidth = getTailPredVectorWidth(VCTP->getOpcode());
|
|
|
|
|
2020-04-08 21:31:21 +08:00
|
|
|
Ignore.insert(SecondaryVCTPs.begin(), SecondaryVCTPs.end());
|
|
|
|
|
2020-02-14 16:28:26 +08:00
|
|
|
if (RDA.isSafeToRemove(Def, ElementChain, Ignore)) {
|
2020-01-17 21:08:24 +08:00
|
|
|
bool FoundSub = false;
|
|
|
|
|
|
|
|
for (auto *MI : ElementChain) {
|
|
|
|
if (isMovRegOpcode(MI->getOpcode()))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (isSubImmOpcode(MI->getOpcode())) {
|
|
|
|
if (FoundSub || !IsValidSub(MI, ExpectedVectorWidth))
|
|
|
|
return false;
|
|
|
|
FoundSub = true;
|
|
|
|
} else
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Will remove element count chain:\n";
|
|
|
|
for (auto *MI : ElementChain)
|
|
|
|
dbgs() << " - " << *MI);
|
|
|
|
ToRemove.insert(ElementChain.begin(), ElementChain.end());
|
|
|
|
}
|
|
|
|
}
|
2020-01-03 16:48:33 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
[ARM][MVE] Validate tail predication values
Iterate through the loop and check that the observable values
produced are the same whether tail predication happens or not.
We want to find out if the tail-predicated version of this loop will
produce the same values as the loop in its original form. For this to
be true, the newly inserted implicit predication must not change the
the (observable) results.
We're doing this because many instructions in the loop will not be
predicated and so the conversion from VPT predication to tail
predication can result in different values being produced, because of
falsely predicated lanes not being updated in the converted form.
A masked load, whether through VPT or tail predication, will write
zeros to any of the falsely predicated bytes. So, from the loads, we
know that the false lanes are zeroed and here we're trying to track
that those false lanes remain zero, or where they change, the
differences are masked away by their user(s).
All MVE loads and stores have to be predicated, so we know that any
load operands, or stored results are equivalent already. Other
explicitly predicated instructions will perform the same operation in
the original loop and the tail-predicated form too. Because of this,
we can insert loads, stores and other predicated instructions into
our KnownFalseZeros set and build from there.
Differential Revision: https://reviews.llvm.org/D75452
2020-03-10 17:58:29 +08:00
|
|
|
static bool isVectorPredicated(MachineInstr *MI) {
|
|
|
|
int PIdx = llvm::findFirstVPTPredOperandIdx(*MI);
|
|
|
|
return PIdx != -1 && MI->getOperand(PIdx + 1).getReg() == ARM::VPR;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool isRegInClass(const MachineOperand &MO,
|
|
|
|
const TargetRegisterClass *Class) {
|
|
|
|
return MO.isReg() && MO.getReg() && Class->contains(MO.getReg());
|
|
|
|
}
|
|
|
|
|
2020-03-27 21:58:50 +08:00
|
|
|
// MVE 'narrowing' operate on half a lane, reading from half and writing
|
|
|
|
// to half, which are referred to has the top and bottom half. The other
|
|
|
|
// half retains its previous value.
|
|
|
|
static bool retainsPreviousHalfElement(const MachineInstr &MI) {
|
|
|
|
const MCInstrDesc &MCID = MI.getDesc();
|
|
|
|
uint64_t Flags = MCID.TSFlags;
|
|
|
|
return (Flags & ARMII::RetainsPreviousHalfElement) != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Some MVE instructions read from the top/bottom halves of their operand(s)
|
|
|
|
// and generate a vector result with result elements that are double the
|
|
|
|
// width of the input.
|
|
|
|
static bool producesDoubleWidthResult(const MachineInstr &MI) {
|
|
|
|
const MCInstrDesc &MCID = MI.getDesc();
|
|
|
|
uint64_t Flags = MCID.TSFlags;
|
|
|
|
return (Flags & ARMII::DoubleWidthResult) != 0;
|
|
|
|
}
|
|
|
|
|
2020-03-30 16:54:25 +08:00
|
|
|
static bool isHorizontalReduction(const MachineInstr &MI) {
|
|
|
|
const MCInstrDesc &MCID = MI.getDesc();
|
|
|
|
uint64_t Flags = MCID.TSFlags;
|
|
|
|
return (Flags & ARMII::HorizontalReduction) != 0;
|
|
|
|
}
|
|
|
|
|
2020-03-24 16:41:48 +08:00
|
|
|
// Can this instruction generate a non-zero result when given only zeroed
|
|
|
|
// operands? This allows us to know that, given operands with false bytes
|
|
|
|
// zeroed by masked loads, that the result will also contain zeros in those
|
|
|
|
// bytes.
|
|
|
|
static bool canGenerateNonZeros(const MachineInstr &MI) {
|
2020-03-27 21:58:50 +08:00
|
|
|
|
|
|
|
// Check for instructions which can write into a larger element size,
|
|
|
|
// possibly writing into a previous zero'd lane.
|
|
|
|
if (producesDoubleWidthResult(MI))
|
|
|
|
return true;
|
|
|
|
|
2020-03-24 16:41:48 +08:00
|
|
|
switch (MI.getOpcode()) {
|
|
|
|
default:
|
|
|
|
break;
|
2020-03-27 21:58:50 +08:00
|
|
|
// FIXME: VNEG FP and -0? I think we'll need to handle this once we allow
|
|
|
|
// fp16 -> fp32 vector conversions.
|
|
|
|
// Instructions that perform a NOT will generate 1s from 0s.
|
2020-03-24 16:41:48 +08:00
|
|
|
case ARM::MVE_VMVN:
|
|
|
|
case ARM::MVE_VORN:
|
2020-03-27 21:58:50 +08:00
|
|
|
// Count leading zeros will do just that!
|
2020-03-24 16:41:48 +08:00
|
|
|
case ARM::MVE_VCLZs8:
|
|
|
|
case ARM::MVE_VCLZs16:
|
|
|
|
case ARM::MVE_VCLZs32:
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// Look at its register uses to see if it only can only receive zeros
|
|
|
|
// into its false lanes which would then produce zeros. Also check that
|
2020-03-30 16:54:25 +08:00
|
|
|
// the output register is also defined by an FalseLanesZero instruction
|
2020-03-24 16:41:48 +08:00
|
|
|
// so that if tail-predication happens, the lanes that aren't updated will
|
|
|
|
// still be zeros.
|
2020-03-30 16:54:25 +08:00
|
|
|
static bool producesFalseLanesZero(MachineInstr &MI,
|
2020-03-24 16:41:48 +08:00
|
|
|
const TargetRegisterClass *QPRs,
|
|
|
|
const ReachingDefAnalysis &RDA,
|
2020-03-30 16:54:25 +08:00
|
|
|
InstSet &FalseLanesZero) {
|
2020-03-24 16:41:48 +08:00
|
|
|
if (canGenerateNonZeros(MI))
|
|
|
|
return false;
|
2020-03-30 16:54:25 +08:00
|
|
|
|
|
|
|
bool AllowScalars = isHorizontalReduction(MI);
|
2020-03-24 16:41:48 +08:00
|
|
|
for (auto &MO : MI.operands()) {
|
|
|
|
if (!MO.isReg() || !MO.getReg())
|
|
|
|
continue;
|
2020-03-30 16:54:25 +08:00
|
|
|
if (!isRegInClass(MO, QPRs) && AllowScalars)
|
|
|
|
continue;
|
2020-03-24 16:41:48 +08:00
|
|
|
if (auto *OpDef = RDA.getMIOperand(&MI, MO))
|
2020-03-30 16:54:25 +08:00
|
|
|
if (FalseLanesZero.count(OpDef))
|
2020-03-24 16:41:48 +08:00
|
|
|
continue;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Always False Zeros: " << MI);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-07-01 15:27:12 +08:00
|
|
|
bool
|
|
|
|
LowOverheadLoop::FindValidReduction(InstSet &LiveMIs, InstSet &LiveOutUsers) {
|
|
|
|
// Also check for reductions where the operation needs to be merging values
|
|
|
|
// from the last and previous loop iterations. This means an instruction
|
|
|
|
// producing a value and a vmov storing the value calculated in the previous
|
|
|
|
// iteration. So we can have two live-out regs, one produced by a vmov and
|
|
|
|
// both being consumed by a vpsel.
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Looking for reduction live-outs:\n";
|
|
|
|
for (auto *MI : LiveMIs)
|
|
|
|
dbgs() << " - " << *MI);
|
|
|
|
|
|
|
|
if (!Preheader)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Expect a vmov, a vadd and a single vpsel user.
|
|
|
|
// TODO: This means we can't currently support multiple reductions in the
|
|
|
|
// loop.
|
|
|
|
if (LiveMIs.size() != 2 || LiveOutUsers.size() != 1)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
MachineInstr *VPSEL = *LiveOutUsers.begin();
|
|
|
|
if (VPSEL->getOpcode() != ARM::MVE_VPSEL)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned VPRIdx = llvm::findFirstVPTPredOperandIdx(*VPSEL) + 1;
|
|
|
|
MachineInstr *Pred = RDA.getMIOperand(VPSEL, VPRIdx);
|
|
|
|
if (!Pred || Pred != VCTP) {
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Not using equivalent predicate.\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
MachineInstr *Reduce = RDA.getMIOperand(VPSEL, 1);
|
|
|
|
if (!Reduce)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
assert(LiveMIs.count(Reduce) && "Expected MI to be live-out");
|
|
|
|
|
|
|
|
// TODO: Support more operations than VADD.
|
|
|
|
switch (VCTP->getOpcode()) {
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
case ARM::MVE_VCTP8:
|
|
|
|
if (Reduce->getOpcode() != ARM::MVE_VADDi8)
|
|
|
|
return false;
|
|
|
|
break;
|
|
|
|
case ARM::MVE_VCTP16:
|
|
|
|
if (Reduce->getOpcode() != ARM::MVE_VADDi16)
|
|
|
|
return false;
|
|
|
|
break;
|
|
|
|
case ARM::MVE_VCTP32:
|
|
|
|
if (Reduce->getOpcode() != ARM::MVE_VADDi32)
|
|
|
|
return false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that the reduce op is overwriting ones of its operands.
|
|
|
|
if (Reduce->getOperand(0).getReg() != Reduce->getOperand(1).getReg() &&
|
|
|
|
Reduce->getOperand(0).getReg() != Reduce->getOperand(2).getReg()) {
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Reducing op isn't overwriting itself.\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the VORR is actually a VMOV.
|
|
|
|
MachineInstr *Copy = RDA.getMIOperand(VPSEL, 2);
|
|
|
|
if (!Copy || Copy->getOpcode() != ARM::MVE_VORR ||
|
|
|
|
!Copy->getOperand(1).isReg() || !Copy->getOperand(2).isReg() ||
|
|
|
|
Copy->getOperand(1).getReg() != Copy->getOperand(2).getReg())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
assert(LiveMIs.count(Copy) && "Expected MI to be live-out");
|
|
|
|
|
|
|
|
// Check that the vadd and vmov are only used by each other and the vpsel.
|
|
|
|
SmallPtrSet<MachineInstr*, 2> CopyUsers;
|
|
|
|
RDA.getGlobalUses(Copy, Copy->getOperand(0).getReg(), CopyUsers);
|
|
|
|
if (CopyUsers.size() > 2 || !CopyUsers.count(Reduce)) {
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Copy users unsupported.\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
SmallPtrSet<MachineInstr*, 2> ReduceUsers;
|
|
|
|
RDA.getGlobalUses(Reduce, Reduce->getOperand(0).getReg(), ReduceUsers);
|
|
|
|
if (ReduceUsers.size() > 2 || !ReduceUsers.count(Copy)) {
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Reduce users unsupported.\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Then find whether there's an instruction initialising the register that
|
|
|
|
// is storing the reduction.
|
|
|
|
SmallPtrSet<MachineInstr*, 2> Incoming;
|
|
|
|
RDA.getLiveOuts(Preheader, Copy->getOperand(1).getReg(), Incoming);
|
|
|
|
if (Incoming.size() > 1)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
MachineInstr *Init = Incoming.empty() ? nullptr : *Incoming.begin();
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Found a reduction:\n"
|
|
|
|
<< " - " << *Copy
|
|
|
|
<< " - " << *Reduce
|
|
|
|
<< " - " << *VPSEL);
|
|
|
|
Reductions.push_back(std::make_unique<Reduction>(Init, Copy, Reduce, VPSEL));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool LowOverheadLoop::ValidateLiveOuts() {
|
[ARM][MVE] Validate tail predication values
Iterate through the loop and check that the observable values
produced are the same whether tail predication happens or not.
We want to find out if the tail-predicated version of this loop will
produce the same values as the loop in its original form. For this to
be true, the newly inserted implicit predication must not change the
the (observable) results.
We're doing this because many instructions in the loop will not be
predicated and so the conversion from VPT predication to tail
predication can result in different values being produced, because of
falsely predicated lanes not being updated in the converted form.
A masked load, whether through VPT or tail predication, will write
zeros to any of the falsely predicated bytes. So, from the loads, we
know that the false lanes are zeroed and here we're trying to track
that those false lanes remain zero, or where they change, the
differences are masked away by their user(s).
All MVE loads and stores have to be predicated, so we know that any
load operands, or stored results are equivalent already. Other
explicitly predicated instructions will perform the same operation in
the original loop and the tail-predicated form too. Because of this,
we can insert loads, stores and other predicated instructions into
our KnownFalseZeros set and build from there.
Differential Revision: https://reviews.llvm.org/D75452
2020-03-10 17:58:29 +08:00
|
|
|
// We want to find out if the tail-predicated version of this loop will
|
|
|
|
// produce the same values as the loop in its original form. For this to
|
|
|
|
// be true, the newly inserted implicit predication must not change the
|
|
|
|
// the (observable) results.
|
|
|
|
// We're doing this because many instructions in the loop will not be
|
|
|
|
// predicated and so the conversion from VPT predication to tail-predication
|
|
|
|
// can result in different values being produced; due to the tail-predication
|
|
|
|
// preventing many instructions from updating their falsely predicated
|
|
|
|
// lanes. This analysis assumes that all the instructions perform lane-wise
|
|
|
|
// operations and don't perform any exchanges.
|
|
|
|
// A masked load, whether through VPT or tail predication, will write zeros
|
|
|
|
// to any of the falsely predicated bytes. So, from the loads, we know that
|
|
|
|
// the false lanes are zeroed and here we're trying to track that those false
|
|
|
|
// lanes remain zero, or where they change, the differences are masked away
|
|
|
|
// by their user(s).
|
2020-08-19 00:15:45 +08:00
|
|
|
// All MVE stores have to be predicated, so we know that any predicate load
|
[ARM][MVE] Validate tail predication values
Iterate through the loop and check that the observable values
produced are the same whether tail predication happens or not.
We want to find out if the tail-predicated version of this loop will
produce the same values as the loop in its original form. For this to
be true, the newly inserted implicit predication must not change the
the (observable) results.
We're doing this because many instructions in the loop will not be
predicated and so the conversion from VPT predication to tail
predication can result in different values being produced, because of
falsely predicated lanes not being updated in the converted form.
A masked load, whether through VPT or tail predication, will write
zeros to any of the falsely predicated bytes. So, from the loads, we
know that the false lanes are zeroed and here we're trying to track
that those false lanes remain zero, or where they change, the
differences are masked away by their user(s).
All MVE loads and stores have to be predicated, so we know that any
load operands, or stored results are equivalent already. Other
explicitly predicated instructions will perform the same operation in
the original loop and the tail-predicated form too. Because of this,
we can insert loads, stores and other predicated instructions into
our KnownFalseZeros set and build from there.
Differential Revision: https://reviews.llvm.org/D75452
2020-03-10 17:58:29 +08:00
|
|
|
// operands, or stored results are equivalent already. Other explicitly
|
|
|
|
// predicated instructions will perform the same operation in the original
|
|
|
|
// loop and the tail-predicated form too. Because of this, we can insert
|
2020-03-24 16:41:48 +08:00
|
|
|
// loads, stores and other predicated instructions into our Predicated
|
[ARM][MVE] Validate tail predication values
Iterate through the loop and check that the observable values
produced are the same whether tail predication happens or not.
We want to find out if the tail-predicated version of this loop will
produce the same values as the loop in its original form. For this to
be true, the newly inserted implicit predication must not change the
the (observable) results.
We're doing this because many instructions in the loop will not be
predicated and so the conversion from VPT predication to tail
predication can result in different values being produced, because of
falsely predicated lanes not being updated in the converted form.
A masked load, whether through VPT or tail predication, will write
zeros to any of the falsely predicated bytes. So, from the loads, we
know that the false lanes are zeroed and here we're trying to track
that those false lanes remain zero, or where they change, the
differences are masked away by their user(s).
All MVE loads and stores have to be predicated, so we know that any
load operands, or stored results are equivalent already. Other
explicitly predicated instructions will perform the same operation in
the original loop and the tail-predicated form too. Because of this,
we can insert loads, stores and other predicated instructions into
our KnownFalseZeros set and build from there.
Differential Revision: https://reviews.llvm.org/D75452
2020-03-10 17:58:29 +08:00
|
|
|
// set and build from there.
|
2020-03-11 19:39:14 +08:00
|
|
|
const TargetRegisterClass *QPRs = TRI.getRegClass(ARM::MQPRRegClassID);
|
2020-03-30 16:54:25 +08:00
|
|
|
SetVector<MachineInstr *> FalseLanesUnknown;
|
|
|
|
SmallPtrSet<MachineInstr *, 4> FalseLanesZero;
|
2020-03-24 16:41:48 +08:00
|
|
|
SmallPtrSet<MachineInstr *, 4> Predicated;
|
2020-07-01 15:27:12 +08:00
|
|
|
MachineBasicBlock *Header = ML.getHeader();
|
2020-03-24 16:41:48 +08:00
|
|
|
|
2020-07-01 15:27:12 +08:00
|
|
|
for (auto &MI : *Header) {
|
[ARM][MVE] Validate tail predication values
Iterate through the loop and check that the observable values
produced are the same whether tail predication happens or not.
We want to find out if the tail-predicated version of this loop will
produce the same values as the loop in its original form. For this to
be true, the newly inserted implicit predication must not change the
the (observable) results.
We're doing this because many instructions in the loop will not be
predicated and so the conversion from VPT predication to tail
predication can result in different values being produced, because of
falsely predicated lanes not being updated in the converted form.
A masked load, whether through VPT or tail predication, will write
zeros to any of the falsely predicated bytes. So, from the loads, we
know that the false lanes are zeroed and here we're trying to track
that those false lanes remain zero, or where they change, the
differences are masked away by their user(s).
All MVE loads and stores have to be predicated, so we know that any
load operands, or stored results are equivalent already. Other
explicitly predicated instructions will perform the same operation in
the original loop and the tail-predicated form too. Because of this,
we can insert loads, stores and other predicated instructions into
our KnownFalseZeros set and build from there.
Differential Revision: https://reviews.llvm.org/D75452
2020-03-10 17:58:29 +08:00
|
|
|
const MCInstrDesc &MCID = MI.getDesc();
|
|
|
|
uint64_t Flags = MCID.TSFlags;
|
|
|
|
if ((Flags & ARMII::DomainMask) != ARMII::DomainMVE)
|
|
|
|
continue;
|
|
|
|
|
2020-04-08 21:31:21 +08:00
|
|
|
if (isVCTP(&MI) || isVPTOpcode(MI.getOpcode()))
|
2020-03-30 16:54:25 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
// Predicated loads will write zeros to the falsely predicated bytes of the
|
|
|
|
// destination register.
|
[ARM][MVE] Validate tail predication values
Iterate through the loop and check that the observable values
produced are the same whether tail predication happens or not.
We want to find out if the tail-predicated version of this loop will
produce the same values as the loop in its original form. For this to
be true, the newly inserted implicit predication must not change the
the (observable) results.
We're doing this because many instructions in the loop will not be
predicated and so the conversion from VPT predication to tail
predication can result in different values being produced, because of
falsely predicated lanes not being updated in the converted form.
A masked load, whether through VPT or tail predication, will write
zeros to any of the falsely predicated bytes. So, from the loads, we
know that the false lanes are zeroed and here we're trying to track
that those false lanes remain zero, or where they change, the
differences are masked away by their user(s).
All MVE loads and stores have to be predicated, so we know that any
load operands, or stored results are equivalent already. Other
explicitly predicated instructions will perform the same operation in
the original loop and the tail-predicated form too. Because of this,
we can insert loads, stores and other predicated instructions into
our KnownFalseZeros set and build from there.
Differential Revision: https://reviews.llvm.org/D75452
2020-03-10 17:58:29 +08:00
|
|
|
if (isVectorPredicated(&MI)) {
|
2020-03-24 16:41:48 +08:00
|
|
|
if (MI.mayLoad())
|
2020-03-30 16:54:25 +08:00
|
|
|
FalseLanesZero.insert(&MI);
|
2020-03-24 16:41:48 +08:00
|
|
|
Predicated.insert(&MI);
|
[ARM][MVE] Validate tail predication values
Iterate through the loop and check that the observable values
produced are the same whether tail predication happens or not.
We want to find out if the tail-predicated version of this loop will
produce the same values as the loop in its original form. For this to
be true, the newly inserted implicit predication must not change the
the (observable) results.
We're doing this because many instructions in the loop will not be
predicated and so the conversion from VPT predication to tail
predication can result in different values being produced, because of
falsely predicated lanes not being updated in the converted form.
A masked load, whether through VPT or tail predication, will write
zeros to any of the falsely predicated bytes. So, from the loads, we
know that the false lanes are zeroed and here we're trying to track
that those false lanes remain zero, or where they change, the
differences are masked away by their user(s).
All MVE loads and stores have to be predicated, so we know that any
load operands, or stored results are equivalent already. Other
explicitly predicated instructions will perform the same operation in
the original loop and the tail-predicated form too. Because of this,
we can insert loads, stores and other predicated instructions into
our KnownFalseZeros set and build from there.
Differential Revision: https://reviews.llvm.org/D75452
2020-03-10 17:58:29 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (MI.getNumDefs() == 0)
|
|
|
|
continue;
|
|
|
|
|
2020-03-30 16:54:25 +08:00
|
|
|
if (!producesFalseLanesZero(MI, QPRs, RDA, FalseLanesZero)) {
|
|
|
|
// We require retaining and horizontal operations to operate upon zero'd
|
|
|
|
// false lanes to ensure the conversion doesn't change the output.
|
|
|
|
if (retainsPreviousHalfElement(MI) || isHorizontalReduction(MI))
|
|
|
|
return false;
|
|
|
|
// Otherwise we need to evaluate this instruction later to see whether
|
|
|
|
// unknown false lanes will get masked away by their user(s).
|
|
|
|
FalseLanesUnknown.insert(&MI);
|
|
|
|
} else if (!isHorizontalReduction(MI))
|
|
|
|
FalseLanesZero.insert(&MI);
|
2020-02-18 22:05:39 +08:00
|
|
|
}
|
|
|
|
|
2020-03-24 16:41:48 +08:00
|
|
|
auto HasPredicatedUsers = [this](MachineInstr *MI, const MachineOperand &MO,
|
|
|
|
SmallPtrSetImpl<MachineInstr *> &Predicated) {
|
[ARM][MVE] Validate tail predication values
Iterate through the loop and check that the observable values
produced are the same whether tail predication happens or not.
We want to find out if the tail-predicated version of this loop will
produce the same values as the loop in its original form. For this to
be true, the newly inserted implicit predication must not change the
the (observable) results.
We're doing this because many instructions in the loop will not be
predicated and so the conversion from VPT predication to tail
predication can result in different values being produced, because of
falsely predicated lanes not being updated in the converted form.
A masked load, whether through VPT or tail predication, will write
zeros to any of the falsely predicated bytes. So, from the loads, we
know that the false lanes are zeroed and here we're trying to track
that those false lanes remain zero, or where they change, the
differences are masked away by their user(s).
All MVE loads and stores have to be predicated, so we know that any
load operands, or stored results are equivalent already. Other
explicitly predicated instructions will perform the same operation in
the original loop and the tail-predicated form too. Because of this,
we can insert loads, stores and other predicated instructions into
our KnownFalseZeros set and build from there.
Differential Revision: https://reviews.llvm.org/D75452
2020-03-10 17:58:29 +08:00
|
|
|
SmallPtrSet<MachineInstr *, 2> Uses;
|
|
|
|
RDA.getGlobalUses(MI, MO.getReg(), Uses);
|
|
|
|
for (auto *Use : Uses) {
|
2020-03-24 16:41:48 +08:00
|
|
|
if (Use != MI && !Predicated.count(Use))
|
[ARM][MVE] Validate tail predication values
Iterate through the loop and check that the observable values
produced are the same whether tail predication happens or not.
We want to find out if the tail-predicated version of this loop will
produce the same values as the loop in its original form. For this to
be true, the newly inserted implicit predication must not change the
the (observable) results.
We're doing this because many instructions in the loop will not be
predicated and so the conversion from VPT predication to tail
predication can result in different values being produced, because of
falsely predicated lanes not being updated in the converted form.
A masked load, whether through VPT or tail predication, will write
zeros to any of the falsely predicated bytes. So, from the loads, we
know that the false lanes are zeroed and here we're trying to track
that those false lanes remain zero, or where they change, the
differences are masked away by their user(s).
All MVE loads and stores have to be predicated, so we know that any
load operands, or stored results are equivalent already. Other
explicitly predicated instructions will perform the same operation in
the original loop and the tail-predicated form too. Because of this,
we can insert loads, stores and other predicated instructions into
our KnownFalseZeros set and build from there.
Differential Revision: https://reviews.llvm.org/D75452
2020-03-10 17:58:29 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
};
|
|
|
|
|
2020-03-24 16:41:48 +08:00
|
|
|
// Visit the unknowns in reverse so that we can start at the values being
|
[ARM][MVE] Validate tail predication values
Iterate through the loop and check that the observable values
produced are the same whether tail predication happens or not.
We want to find out if the tail-predicated version of this loop will
produce the same values as the loop in its original form. For this to
be true, the newly inserted implicit predication must not change the
the (observable) results.
We're doing this because many instructions in the loop will not be
predicated and so the conversion from VPT predication to tail
predication can result in different values being produced, because of
falsely predicated lanes not being updated in the converted form.
A masked load, whether through VPT or tail predication, will write
zeros to any of the falsely predicated bytes. So, from the loads, we
know that the false lanes are zeroed and here we're trying to track
that those false lanes remain zero, or where they change, the
differences are masked away by their user(s).
All MVE loads and stores have to be predicated, so we know that any
load operands, or stored results are equivalent already. Other
explicitly predicated instructions will perform the same operation in
the original loop and the tail-predicated form too. Because of this,
we can insert loads, stores and other predicated instructions into
our KnownFalseZeros set and build from there.
Differential Revision: https://reviews.llvm.org/D75452
2020-03-10 17:58:29 +08:00
|
|
|
// stored and then we can work towards the leaves, hopefully adding more
|
2020-03-30 16:54:25 +08:00
|
|
|
// instructions to Predicated. Successfully terminating the loop means that
|
|
|
|
// all the unknown values have to found to be masked by predicated user(s).
|
2020-07-01 15:27:12 +08:00
|
|
|
// For any unpredicated values, we store them in NonPredicated so that we
|
|
|
|
// can later check whether these form a reduction.
|
|
|
|
SmallPtrSet<MachineInstr*, 2> NonPredicated;
|
2020-03-30 16:54:25 +08:00
|
|
|
for (auto *MI : reverse(FalseLanesUnknown)) {
|
[ARM][MVE] Validate tail predication values
Iterate through the loop and check that the observable values
produced are the same whether tail predication happens or not.
We want to find out if the tail-predicated version of this loop will
produce the same values as the loop in its original form. For this to
be true, the newly inserted implicit predication must not change the
the (observable) results.
We're doing this because many instructions in the loop will not be
predicated and so the conversion from VPT predication to tail
predication can result in different values being produced, because of
falsely predicated lanes not being updated in the converted form.
A masked load, whether through VPT or tail predication, will write
zeros to any of the falsely predicated bytes. So, from the loads, we
know that the false lanes are zeroed and here we're trying to track
that those false lanes remain zero, or where they change, the
differences are masked away by their user(s).
All MVE loads and stores have to be predicated, so we know that any
load operands, or stored results are equivalent already. Other
explicitly predicated instructions will perform the same operation in
the original loop and the tail-predicated form too. Because of this,
we can insert loads, stores and other predicated instructions into
our KnownFalseZeros set and build from there.
Differential Revision: https://reviews.llvm.org/D75452
2020-03-10 17:58:29 +08:00
|
|
|
for (auto &MO : MI->operands()) {
|
|
|
|
if (!isRegInClass(MO, QPRs) || !MO.isDef())
|
|
|
|
continue;
|
2020-03-24 16:41:48 +08:00
|
|
|
if (!HasPredicatedUsers(MI, MO, Predicated)) {
|
[ARM][MVE] Validate tail predication values
Iterate through the loop and check that the observable values
produced are the same whether tail predication happens or not.
We want to find out if the tail-predicated version of this loop will
produce the same values as the loop in its original form. For this to
be true, the newly inserted implicit predication must not change the
the (observable) results.
We're doing this because many instructions in the loop will not be
predicated and so the conversion from VPT predication to tail
predication can result in different values being produced, because of
falsely predicated lanes not being updated in the converted form.
A masked load, whether through VPT or tail predication, will write
zeros to any of the falsely predicated bytes. So, from the loads, we
know that the false lanes are zeroed and here we're trying to track
that those false lanes remain zero, or where they change, the
differences are masked away by their user(s).
All MVE loads and stores have to be predicated, so we know that any
load operands, or stored results are equivalent already. Other
explicitly predicated instructions will perform the same operation in
the original loop and the tail-predicated form too. Because of this,
we can insert loads, stores and other predicated instructions into
our KnownFalseZeros set and build from there.
Differential Revision: https://reviews.llvm.org/D75452
2020-03-10 17:58:29 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Found an unknown def of : "
|
|
|
|
<< TRI.getRegAsmName(MO.getReg()) << " at " << *MI);
|
2020-07-01 15:27:12 +08:00
|
|
|
NonPredicated.insert(MI);
|
|
|
|
continue;
|
[ARM][MVE] Validate tail predication values
Iterate through the loop and check that the observable values
produced are the same whether tail predication happens or not.
We want to find out if the tail-predicated version of this loop will
produce the same values as the loop in its original form. For this to
be true, the newly inserted implicit predication must not change the
the (observable) results.
We're doing this because many instructions in the loop will not be
predicated and so the conversion from VPT predication to tail
predication can result in different values being produced, because of
falsely predicated lanes not being updated in the converted form.
A masked load, whether through VPT or tail predication, will write
zeros to any of the falsely predicated bytes. So, from the loads, we
know that the false lanes are zeroed and here we're trying to track
that those false lanes remain zero, or where they change, the
differences are masked away by their user(s).
All MVE loads and stores have to be predicated, so we know that any
load operands, or stored results are equivalent already. Other
explicitly predicated instructions will perform the same operation in
the original loop and the tail-predicated form too. Because of this,
we can insert loads, stores and other predicated instructions into
our KnownFalseZeros set and build from there.
Differential Revision: https://reviews.llvm.org/D75452
2020-03-10 17:58:29 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// Any unknown false lanes have been masked away by the user(s).
|
2020-03-24 16:41:48 +08:00
|
|
|
Predicated.insert(MI);
|
[ARM][MVE] Validate tail predication values
Iterate through the loop and check that the observable values
produced are the same whether tail predication happens or not.
We want to find out if the tail-predicated version of this loop will
produce the same values as the loop in its original form. For this to
be true, the newly inserted implicit predication must not change the
the (observable) results.
We're doing this because many instructions in the loop will not be
predicated and so the conversion from VPT predication to tail
predication can result in different values being produced, because of
falsely predicated lanes not being updated in the converted form.
A masked load, whether through VPT or tail predication, will write
zeros to any of the falsely predicated bytes. So, from the loads, we
know that the false lanes are zeroed and here we're trying to track
that those false lanes remain zero, or where they change, the
differences are masked away by their user(s).
All MVE loads and stores have to be predicated, so we know that any
load operands, or stored results are equivalent already. Other
explicitly predicated instructions will perform the same operation in
the original loop and the tail-predicated form too. Because of this,
we can insert loads, stores and other predicated instructions into
our KnownFalseZeros set and build from there.
Differential Revision: https://reviews.llvm.org/D75452
2020-03-10 17:58:29 +08:00
|
|
|
}
|
2020-03-11 19:39:14 +08:00
|
|
|
|
2020-07-01 15:27:12 +08:00
|
|
|
SmallPtrSet<MachineInstr *, 2> LiveOutMIs;
|
|
|
|
SmallPtrSet<MachineInstr*, 2> LiveOutUsers;
|
2020-03-11 19:39:14 +08:00
|
|
|
SmallVector<MachineBasicBlock *, 2> ExitBlocks;
|
|
|
|
ML.getExitBlocks(ExitBlocks);
|
|
|
|
assert(ML.getNumBlocks() == 1 && "Expected single block loop!");
|
2020-07-01 15:27:12 +08:00
|
|
|
assert(ExitBlocks.size() == 1 && "Expected a single exit block");
|
|
|
|
MachineBasicBlock *ExitBB = ExitBlocks.front();
|
|
|
|
for (const MachineBasicBlock::RegisterMaskPair &RegMask : ExitBB->liveins()) {
|
|
|
|
// Check Q-regs that are live in the exit blocks. We don't collect scalars
|
|
|
|
// because they won't be affected by lane predication.
|
|
|
|
if (QPRs->contains(RegMask.PhysReg)) {
|
|
|
|
if (auto *MI = RDA.getLocalLiveOutMIDef(Header, RegMask.PhysReg))
|
|
|
|
LiveOutMIs.insert(MI);
|
|
|
|
RDA.getLiveInUses(ExitBB, RegMask.PhysReg, LiveOutUsers);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we have any non-predicated live-outs, they need to be part of a
|
|
|
|
// reduction that we can fixup later. The reduction that the form of an
|
|
|
|
// operation that uses its previous values through a vmov and then a vpsel
|
|
|
|
// resides in the exit blocks to select the final bytes from n and n-1
|
|
|
|
// iterations.
|
|
|
|
if (!NonPredicated.empty() &&
|
|
|
|
!FindValidReduction(NonPredicated, LiveOutUsers))
|
|
|
|
return false;
|
2020-03-11 19:39:14 +08:00
|
|
|
|
|
|
|
// We've already validated that any VPT predication within the loop will be
|
|
|
|
// equivalent when we perform the predication transformation; so we know that
|
|
|
|
// any VPT predicated instruction is predicated upon VCTP. Any live-out
|
2020-07-01 15:27:12 +08:00
|
|
|
// instruction needs to be predicated, so check this here. The instructions
|
|
|
|
// in NonPredicated have been found to be a reduction that we can ensure its
|
|
|
|
// legality.
|
|
|
|
for (auto *MI : LiveOutMIs)
|
|
|
|
if (!isVectorPredicated(MI) && !NonPredicated.count(MI))
|
2020-03-11 19:39:14 +08:00
|
|
|
return false;
|
|
|
|
|
2020-02-18 22:05:39 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-01-24 18:17:43 +08:00
|
|
|
void LowOverheadLoop::CheckLegality(ARMBasicBlockUtils *BBUtils) {
|
2020-01-03 16:48:33 +08:00
|
|
|
if (Revert)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!End->getOperand(1).isMBB())
|
|
|
|
report_fatal_error("Expected LoopEnd to target basic block");
|
|
|
|
|
|
|
|
// TODO Maybe there's cases where the target doesn't have to be the header,
|
|
|
|
// but for now be safe and revert.
|
2020-02-14 16:28:26 +08:00
|
|
|
if (End->getOperand(1).getMBB() != ML.getHeader()) {
|
2020-01-03 16:48:33 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: LoopEnd is not targetting header.\n");
|
|
|
|
Revert = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The WLS and LE instructions have 12-bits for the label offset. WLS
|
|
|
|
// requires a positive offset, while LE uses negative.
|
2020-02-14 16:28:26 +08:00
|
|
|
if (BBUtils->getOffsetOf(End) < BBUtils->getOffsetOf(ML.getHeader()) ||
|
|
|
|
!BBUtils->isBBInRange(End, ML.getHeader(), 4094)) {
|
2020-01-03 16:48:33 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: LE offset is out-of-range\n");
|
|
|
|
Revert = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Start->getOpcode() == ARM::t2WhileLoopStart &&
|
|
|
|
(BBUtils->getOffsetOf(Start) >
|
|
|
|
BBUtils->getOffsetOf(Start->getOperand(1).getMBB()) ||
|
|
|
|
!BBUtils->isBBInRange(Start, Start->getOperand(1).getMBB(), 4094))) {
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: WLS offset is out-of-range!\n");
|
|
|
|
Revert = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-01-29 16:26:11 +08:00
|
|
|
InsertPt = Revert ? nullptr : isSafeToDefineLR();
|
2020-01-03 16:48:33 +08:00
|
|
|
if (!InsertPt) {
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Unable to find safe insertion point.\n");
|
|
|
|
Revert = true;
|
|
|
|
return;
|
|
|
|
} else
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Start insertion point: " << *InsertPt);
|
|
|
|
|
|
|
|
if (!IsTailPredicationLegal()) {
|
2020-01-10 22:11:52 +08:00
|
|
|
LLVM_DEBUG(if (!VCTP)
|
|
|
|
dbgs() << "ARM Loops: Didn't find a VCTP instruction.\n";
|
|
|
|
dbgs() << "ARM Loops: Tail-predication is not valid.\n");
|
2020-01-03 16:48:33 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-02-14 16:28:26 +08:00
|
|
|
assert(ML.getBlocks().size() == 1 &&
|
2020-01-03 16:48:33 +08:00
|
|
|
"Shouldn't be processing a loop with more than one block");
|
2020-01-24 18:17:43 +08:00
|
|
|
CannotTailPredicate = !ValidateTailPredicate(InsertPt);
|
2020-01-03 16:48:33 +08:00
|
|
|
LLVM_DEBUG(if (CannotTailPredicate)
|
|
|
|
dbgs() << "ARM Loops: Couldn't validate tail predicate.\n");
|
2019-12-20 16:42:11 +08:00
|
|
|
}
|
|
|
|
|
2020-01-14 20:02:32 +08:00
|
|
|
bool LowOverheadLoop::ValidateMVEInst(MachineInstr* MI) {
|
2020-01-14 19:02:32 +08:00
|
|
|
if (CannotTailPredicate)
|
|
|
|
return false;
|
|
|
|
|
2020-04-08 21:31:21 +08:00
|
|
|
if (isVCTP(MI)) {
|
|
|
|
// If we find another VCTP, check whether it uses the same value as the main VCTP.
|
|
|
|
// If it does, store it in the SecondaryVCTPs set, else refuse it.
|
|
|
|
if (VCTP) {
|
|
|
|
if (!VCTP->getOperand(1).isIdenticalTo(MI->getOperand(1)) ||
|
|
|
|
!RDA.hasSameReachingDef(VCTP, MI, MI->getOperand(1).getReg())) {
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Found VCTP with a different reaching "
|
|
|
|
"definition from the main VCTP");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Found secondary VCTP: " << *MI);
|
|
|
|
SecondaryVCTPs.insert(MI);
|
|
|
|
} else {
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Found 'main' VCTP: " << *MI);
|
|
|
|
VCTP = MI;
|
|
|
|
}
|
|
|
|
} else if (isVPTOpcode(MI->getOpcode())) {
|
|
|
|
if (MI->getOpcode() != ARM::MVE_VPST) {
|
|
|
|
assert(MI->findRegisterDefOperandIdx(ARM::VPR) != -1 &&
|
|
|
|
"VPT does not implicitly define VPR?!");
|
|
|
|
CurrentPredicate.insert(MI);
|
|
|
|
}
|
2019-12-20 16:42:11 +08:00
|
|
|
|
|
|
|
VPTBlocks.emplace_back(MI, CurrentPredicate);
|
|
|
|
CurrentBlock = &VPTBlocks.back();
|
|
|
|
return true;
|
2020-04-08 21:31:21 +08:00
|
|
|
} else if (MI->getOpcode() == ARM::MVE_VPSEL ||
|
|
|
|
MI->getOpcode() == ARM::MVE_VPNOT) {
|
|
|
|
// TODO: Allow VPSEL and VPNOT, we currently cannot because:
|
|
|
|
// 1) It will use the VPR as a predicate operand, but doesn't have to be
|
|
|
|
// instead a VPT block, which means we can assert while building up
|
|
|
|
// the VPT block because we don't find another VPT or VPST to being a new
|
|
|
|
// one.
|
|
|
|
// 2) VPSEL still requires a VPR operand even after tail predicating,
|
|
|
|
// which means we can't remove it unless there is another
|
|
|
|
// instruction, such as vcmp, that can provide the VPR def.
|
2020-01-14 19:02:32 +08:00
|
|
|
return false;
|
2020-04-08 21:31:21 +08:00
|
|
|
}
|
2019-12-20 16:42:11 +08:00
|
|
|
|
|
|
|
bool IsUse = false;
|
|
|
|
bool IsDef = false;
|
2020-01-10 22:47:29 +08:00
|
|
|
const MCInstrDesc &MCID = MI->getDesc();
|
|
|
|
for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
|
2019-12-20 16:42:11 +08:00
|
|
|
const MachineOperand &MO = MI->getOperand(i);
|
|
|
|
if (!MO.isReg() || MO.getReg() != ARM::VPR)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (MO.isDef()) {
|
|
|
|
CurrentPredicate.insert(MI);
|
|
|
|
IsDef = true;
|
2020-01-10 22:47:29 +08:00
|
|
|
} else if (ARM::isVpred(MCID.OpInfo[i].OperandType)) {
|
|
|
|
CurrentBlock->addInst(MI, CurrentPredicate);
|
|
|
|
IsUse = true;
|
2019-12-20 16:42:11 +08:00
|
|
|
} else {
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Found instruction using vpr: " << *MI);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we find a vpr def that is not already predicated on the vctp, we've
|
|
|
|
// got disjoint predicates that may not be equivalent when we do the
|
|
|
|
// conversion.
|
|
|
|
if (IsDef && !IsUse && VCTP && !isVCTP(MI)) {
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Found disjoint vpr def: " << *MI);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-01-14 20:02:32 +08:00
|
|
|
uint64_t Flags = MCID.TSFlags;
|
|
|
|
if ((Flags & ARMII::DomainMask) != ARMII::DomainMVE)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// If we find an instruction that has been marked as not valid for tail
|
|
|
|
// predication, only allow the instruction if it's contained within a valid
|
|
|
|
// VPT block.
|
|
|
|
if ((Flags & ARMII::ValidForTailPredication) == 0 && !IsUse) {
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Can't tail predicate: " << *MI);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-02-18 22:05:39 +08:00
|
|
|
// If the instruction is already explicitly predicated, then the conversion
|
2020-08-19 00:15:45 +08:00
|
|
|
// will be fine, but ensure that all store operations are predicated.
|
|
|
|
return !IsUse && MI->mayStore() ? false : true;
|
2019-11-19 01:07:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool ARMLowOverheadLoops::runOnMachineFunction(MachineFunction &mf) {
|
|
|
|
const ARMSubtarget &ST = static_cast<const ARMSubtarget&>(mf.getSubtarget());
|
|
|
|
if (!ST.hasLOB())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
MF = &mf;
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops on " << MF->getName() << " ------------- \n");
|
|
|
|
|
2019-11-26 18:25:04 +08:00
|
|
|
MLI = &getAnalysis<MachineLoopInfo>();
|
2019-11-26 18:03:25 +08:00
|
|
|
RDA = &getAnalysis<ReachingDefAnalysis>();
|
2019-11-19 01:07:56 +08:00
|
|
|
MF->getProperties().set(MachineFunctionProperties::Property::TracksLiveness);
|
|
|
|
MRI = &MF->getRegInfo();
|
|
|
|
TII = static_cast<const ARMBaseInstrInfo*>(ST.getInstrInfo());
|
[ARM][LowOverheadLoops] Remove dead loop update instructions.
After creating a low-overhead loop, the loop update instruction was still
lingering around hurting performance. This removes dead loop update
instructions, which in our case are mostly SUBS instructions.
To support this, some helper functions were added to MachineLoopUtils and
ReachingDefAnalysis to analyse live-ins of loop exit blocks and find uses
before a particular loop instruction, respectively.
This is a first version that removes a SUBS instruction when there are no other
uses inside and outside the loop block, but there are some more interesting
cases in test/CodeGen/Thumb2/LowOverheadLoops/mve-tail-data-types.ll which
shows that there is room for improvement. For example, we can't handle this
case yet:
..
dlstp.32 lr, r2
.LBB0_1:
mov r3, r2
subs r2, #4
vldrh.u32 q2, [r1], #8
vmov q1, q0
vmla.u32 q0, q2, r0
letp lr, .LBB0_1
@ %bb.2:
vctp.32 r3
..
which is a lot more tricky because r2 is not only used by the subs, but also by
the mov to r3, which is used outside the low-overhead loop by the vctp
instruction, and that requires a bit of a different approach, and I will follow
up on this.
Differential Revision: https://reviews.llvm.org/D71007
2019-12-11 18:11:48 +08:00
|
|
|
TRI = ST.getRegisterInfo();
|
2019-11-19 01:07:56 +08:00
|
|
|
BBUtils = std::unique_ptr<ARMBasicBlockUtils>(new ARMBasicBlockUtils(*MF));
|
|
|
|
BBUtils->computeAllBlockSizes();
|
|
|
|
BBUtils->adjustBBOffsetsAfter(&MF->front());
|
|
|
|
|
|
|
|
bool Changed = false;
|
2019-11-26 18:25:04 +08:00
|
|
|
for (auto ML : *MLI) {
|
2019-11-19 01:07:56 +08:00
|
|
|
if (!ML->getParentLoop())
|
|
|
|
Changed |= ProcessLoop(ML);
|
|
|
|
}
|
|
|
|
Changed |= RevertNonLoops();
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
2019-06-25 18:45:51 +08:00
|
|
|
bool ARMLowOverheadLoops::ProcessLoop(MachineLoop *ML) {
|
|
|
|
|
|
|
|
bool Changed = false;
|
|
|
|
|
|
|
|
// Process inner loops first.
|
|
|
|
for (auto I = ML->begin(), E = ML->end(); I != E; ++I)
|
|
|
|
Changed |= ProcessLoop(*I);
|
|
|
|
|
2019-11-26 18:25:04 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Processing loop containing:\n";
|
|
|
|
if (auto *Preheader = ML->getLoopPreheader())
|
|
|
|
dbgs() << " - " << Preheader->getName() << "\n";
|
|
|
|
else if (auto *Preheader = MLI->findLoopPreheader(ML))
|
|
|
|
dbgs() << " - " << Preheader->getName() << "\n";
|
2020-01-17 21:08:24 +08:00
|
|
|
else if (auto *Preheader = MLI->findLoopPreheader(ML, true))
|
|
|
|
dbgs() << " - " << Preheader->getName() << "\n";
|
2019-11-26 18:25:04 +08:00
|
|
|
for (auto *MBB : ML->getBlocks())
|
|
|
|
dbgs() << " - " << MBB->getName() << "\n";
|
|
|
|
);
|
2019-06-25 18:45:51 +08:00
|
|
|
|
2019-07-01 16:21:28 +08:00
|
|
|
// Search the given block for a loop start instruction. If one isn't found,
|
|
|
|
// and there's only one predecessor block, search that one too.
|
|
|
|
std::function<MachineInstr*(MachineBasicBlock*)> SearchForStart =
|
2019-07-22 22:16:40 +08:00
|
|
|
[&SearchForStart](MachineBasicBlock *MBB) -> MachineInstr* {
|
2019-06-25 18:45:51 +08:00
|
|
|
for (auto &MI : *MBB) {
|
2019-12-16 17:11:47 +08:00
|
|
|
if (isLoopStart(MI))
|
2019-06-25 18:45:51 +08:00
|
|
|
return &MI;
|
|
|
|
}
|
2019-07-01 16:21:28 +08:00
|
|
|
if (MBB->pred_size() == 1)
|
|
|
|
return SearchForStart(*MBB->pred_begin());
|
2019-06-25 18:45:51 +08:00
|
|
|
return nullptr;
|
|
|
|
};
|
|
|
|
|
2020-07-01 15:27:12 +08:00
|
|
|
LowOverheadLoop LoLoop(*ML, *MLI, *RDA, *TRI, *TII);
|
2019-11-26 18:25:04 +08:00
|
|
|
// Search the preheader for the start intrinsic.
|
2019-07-01 16:21:28 +08:00
|
|
|
// FIXME: I don't see why we shouldn't be supporting multiple predecessors
|
|
|
|
// with potentially multiple set.loop.iterations, so we need to enable this.
|
2020-07-01 15:27:12 +08:00
|
|
|
if (LoLoop.Preheader)
|
|
|
|
LoLoop.Start = SearchForStart(LoLoop.Preheader);
|
2019-11-26 18:25:04 +08:00
|
|
|
else
|
|
|
|
return false;
|
2019-06-25 18:45:51 +08:00
|
|
|
|
|
|
|
// Find the low-overhead loop components and decide whether or not to fall
|
2019-11-19 01:07:56 +08:00
|
|
|
// back to a normal loop. Also look for a vctp instructions and decide
|
|
|
|
// whether we can convert that predicate using tail predication.
|
2019-06-25 18:45:51 +08:00
|
|
|
for (auto *MBB : reverse(ML->getBlocks())) {
|
|
|
|
for (auto &MI : *MBB) {
|
2020-01-30 18:47:55 +08:00
|
|
|
if (MI.isDebugValue())
|
|
|
|
continue;
|
|
|
|
else if (MI.getOpcode() == ARM::t2LoopDec)
|
2019-11-19 01:07:56 +08:00
|
|
|
LoLoop.Dec = &MI;
|
2019-06-25 18:45:51 +08:00
|
|
|
else if (MI.getOpcode() == ARM::t2LoopEnd)
|
2019-11-19 01:07:56 +08:00
|
|
|
LoLoop.End = &MI;
|
2019-12-16 17:11:47 +08:00
|
|
|
else if (isLoopStart(MI))
|
2019-11-19 01:07:56 +08:00
|
|
|
LoLoop.Start = &MI;
|
2019-09-17 20:19:32 +08:00
|
|
|
else if (MI.getDesc().isCall()) {
|
2019-06-25 23:11:17 +08:00
|
|
|
// TODO: Though the call will require LE to execute again, does this
|
|
|
|
// mean we should revert? Always executing LE hopefully should be
|
|
|
|
// faster than performing a sub,cmp,br or even subs,br.
|
2019-11-19 01:07:56 +08:00
|
|
|
LoLoop.Revert = true;
|
2019-09-17 20:19:32 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Found call.\n");
|
2019-11-19 01:07:56 +08:00
|
|
|
} else {
|
2019-12-20 16:42:11 +08:00
|
|
|
// Record VPR defs and build up their corresponding vpt blocks.
|
2019-11-19 01:07:56 +08:00
|
|
|
// Check we know how to tail predicate any mve instructions.
|
2020-01-03 16:48:33 +08:00
|
|
|
LoLoop.AnalyseMVEInst(&MI);
|
2019-09-17 20:19:32 +08:00
|
|
|
}
|
2019-06-25 18:45:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-19 01:07:56 +08:00
|
|
|
LLVM_DEBUG(LoLoop.dump());
|
2020-01-10 22:11:52 +08:00
|
|
|
if (!LoLoop.FoundAllComponents()) {
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Didn't find loop start, update, end\n");
|
2019-07-22 22:16:40 +08:00
|
|
|
return false;
|
2020-01-10 22:11:52 +08:00
|
|
|
}
|
2019-09-17 20:19:32 +08:00
|
|
|
|
2020-02-05 21:20:50 +08:00
|
|
|
// Check that the only instruction using LoopDec is LoopEnd.
|
|
|
|
// TODO: Check for copy chains that really have no effect.
|
|
|
|
SmallPtrSet<MachineInstr*, 2> Uses;
|
|
|
|
RDA->getReachingLocalUses(LoLoop.Dec, ARM::LR, Uses);
|
|
|
|
if (Uses.size() > 1 || !Uses.count(LoLoop.End)) {
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Unable to remove LoopDec.\n");
|
2020-01-17 21:08:24 +08:00
|
|
|
LoLoop.Revert = true;
|
|
|
|
}
|
2020-01-24 18:17:43 +08:00
|
|
|
LoLoop.CheckLegality(BBUtils.get());
|
2019-11-19 01:07:56 +08:00
|
|
|
Expand(LoLoop);
|
2019-06-25 18:45:51 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-07-10 20:29:43 +08:00
|
|
|
// WhileLoopStart holds the exit block, so produce a cmp lr, 0 and then a
|
|
|
|
// beq that branches to the exit branch.
|
2019-09-23 16:35:31 +08:00
|
|
|
// TODO: We could also try to generate a cbz if the value in LR is also in
|
2019-07-10 20:29:43 +08:00
|
|
|
// another low register.
|
|
|
|
void ARMLowOverheadLoops::RevertWhile(MachineInstr *MI) const {
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Reverting to cmp: " << *MI);
|
|
|
|
MachineBasicBlock *MBB = MI->getParent();
|
|
|
|
MachineInstrBuilder MIB = BuildMI(*MBB, MI, MI->getDebugLoc(),
|
|
|
|
TII->get(ARM::t2CMPri));
|
2019-08-16 07:35:53 +08:00
|
|
|
MIB.add(MI->getOperand(0));
|
2019-07-10 20:29:43 +08:00
|
|
|
MIB.addImm(0);
|
|
|
|
MIB.addImm(ARMCC::AL);
|
2019-08-16 07:35:53 +08:00
|
|
|
MIB.addReg(ARM::NoRegister);
|
[ARM][LowOverheadLoops] Remove dead loop update instructions.
After creating a low-overhead loop, the loop update instruction was still
lingering around hurting performance. This removes dead loop update
instructions, which in our case are mostly SUBS instructions.
To support this, some helper functions were added to MachineLoopUtils and
ReachingDefAnalysis to analyse live-ins of loop exit blocks and find uses
before a particular loop instruction, respectively.
This is a first version that removes a SUBS instruction when there are no other
uses inside and outside the loop block, but there are some more interesting
cases in test/CodeGen/Thumb2/LowOverheadLoops/mve-tail-data-types.ll which
shows that there is room for improvement. For example, we can't handle this
case yet:
..
dlstp.32 lr, r2
.LBB0_1:
mov r3, r2
subs r2, #4
vldrh.u32 q2, [r1], #8
vmov q1, q0
vmla.u32 q0, q2, r0
letp lr, .LBB0_1
@ %bb.2:
vctp.32 r3
..
which is a lot more tricky because r2 is not only used by the subs, but also by
the mov to r3, which is used outside the low-overhead loop by the vctp
instruction, and that requires a bit of a different approach, and I will follow
up on this.
Differential Revision: https://reviews.llvm.org/D71007
2019-12-11 18:11:48 +08:00
|
|
|
|
2019-09-23 16:35:31 +08:00
|
|
|
MachineBasicBlock *DestBB = MI->getOperand(1).getMBB();
|
|
|
|
unsigned BrOpc = BBUtils->isBBInRange(MI, DestBB, 254) ?
|
|
|
|
ARM::tBcc : ARM::t2Bcc;
|
2019-07-10 20:29:43 +08:00
|
|
|
|
2019-09-23 16:35:31 +08:00
|
|
|
MIB = BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(BrOpc));
|
2019-07-10 20:29:43 +08:00
|
|
|
MIB.add(MI->getOperand(1)); // branch target
|
|
|
|
MIB.addImm(ARMCC::EQ); // condition code
|
|
|
|
MIB.addReg(ARM::CPSR);
|
|
|
|
MI->eraseFromParent();
|
|
|
|
}
|
|
|
|
|
2020-01-29 16:26:11 +08:00
|
|
|
bool ARMLowOverheadLoops::RevertLoopDec(MachineInstr *MI) const {
|
2019-07-10 20:29:43 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Reverting to sub: " << *MI);
|
|
|
|
MachineBasicBlock *MBB = MI->getParent();
|
2020-01-29 16:26:11 +08:00
|
|
|
SmallPtrSet<MachineInstr*, 1> Ignore;
|
2020-02-28 19:14:42 +08:00
|
|
|
for (auto I = MachineBasicBlock::iterator(MI), E = MBB->end(); I != E; ++I) {
|
|
|
|
if (I->getOpcode() == ARM::t2LoopEnd) {
|
|
|
|
Ignore.insert(&*I);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2019-09-23 16:57:50 +08:00
|
|
|
|
2019-11-26 18:03:25 +08:00
|
|
|
// If nothing defines CPSR between LoopDec and LoopEnd, use a t2SUBS.
|
2020-01-29 16:26:11 +08:00
|
|
|
bool SetFlags = RDA->isSafeToDefRegAt(MI, ARM::CPSR, Ignore);
|
2019-09-23 16:57:50 +08:00
|
|
|
|
2019-07-10 20:29:43 +08:00
|
|
|
MachineInstrBuilder MIB = BuildMI(*MBB, MI, MI->getDebugLoc(),
|
|
|
|
TII->get(ARM::t2SUBri));
|
|
|
|
MIB.addDef(ARM::LR);
|
|
|
|
MIB.add(MI->getOperand(1));
|
|
|
|
MIB.add(MI->getOperand(2));
|
|
|
|
MIB.addImm(ARMCC::AL);
|
|
|
|
MIB.addReg(0);
|
2019-09-23 16:57:50 +08:00
|
|
|
|
|
|
|
if (SetFlags) {
|
|
|
|
MIB.addReg(ARM::CPSR);
|
|
|
|
MIB->getOperand(5).setIsDef(true);
|
|
|
|
} else
|
|
|
|
MIB.addReg(0);
|
|
|
|
|
2019-07-10 20:29:43 +08:00
|
|
|
MI->eraseFromParent();
|
2019-09-23 16:57:50 +08:00
|
|
|
return SetFlags;
|
2019-07-10 20:29:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a subs, or sub and cmp, and a branch instead of an LE.
|
2019-09-23 16:57:50 +08:00
|
|
|
void ARMLowOverheadLoops::RevertLoopEnd(MachineInstr *MI, bool SkipCmp) const {
|
2019-07-10 20:29:43 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Reverting to cmp, br: " << *MI);
|
|
|
|
|
|
|
|
MachineBasicBlock *MBB = MI->getParent();
|
2019-09-23 16:57:50 +08:00
|
|
|
// Create cmp
|
|
|
|
if (!SkipCmp) {
|
|
|
|
MachineInstrBuilder MIB = BuildMI(*MBB, MI, MI->getDebugLoc(),
|
|
|
|
TII->get(ARM::t2CMPri));
|
|
|
|
MIB.addReg(ARM::LR);
|
|
|
|
MIB.addImm(0);
|
|
|
|
MIB.addImm(ARMCC::AL);
|
|
|
|
MIB.addReg(ARM::NoRegister);
|
|
|
|
}
|
2019-07-10 20:29:43 +08:00
|
|
|
|
2019-09-23 16:35:31 +08:00
|
|
|
MachineBasicBlock *DestBB = MI->getOperand(1).getMBB();
|
|
|
|
unsigned BrOpc = BBUtils->isBBInRange(MI, DestBB, 254) ?
|
|
|
|
ARM::tBcc : ARM::t2Bcc;
|
|
|
|
|
2019-07-10 20:29:43 +08:00
|
|
|
// Create bne
|
2019-09-23 16:57:50 +08:00
|
|
|
MachineInstrBuilder MIB =
|
|
|
|
BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(BrOpc));
|
2019-07-10 20:29:43 +08:00
|
|
|
MIB.add(MI->getOperand(1)); // branch target
|
|
|
|
MIB.addImm(ARMCC::NE); // condition code
|
|
|
|
MIB.addReg(ARM::CPSR);
|
|
|
|
MI->eraseFromParent();
|
|
|
|
}
|
|
|
|
|
2020-02-05 23:15:46 +08:00
|
|
|
// Perform dead code elimation on the loop iteration count setup expression.
|
|
|
|
// If we are tail-predicating, the number of elements to be processed is the
|
|
|
|
// operand of the VCTP instruction in the vector body, see getCount(), which is
|
|
|
|
// register $r3 in this example:
|
|
|
|
//
|
|
|
|
// $lr = big-itercount-expression
|
|
|
|
// ..
|
|
|
|
// t2DoLoopStart renamable $lr
|
|
|
|
// vector.body:
|
|
|
|
// ..
|
|
|
|
// $vpr = MVE_VCTP32 renamable $r3
|
|
|
|
// renamable $lr = t2LoopDec killed renamable $lr, 1
|
|
|
|
// t2LoopEnd renamable $lr, %vector.body
|
|
|
|
// tB %end
|
|
|
|
//
|
|
|
|
// What we would like achieve here is to replace the do-loop start pseudo
|
|
|
|
// instruction t2DoLoopStart with:
|
|
|
|
//
|
|
|
|
// $lr = MVE_DLSTP_32 killed renamable $r3
|
|
|
|
//
|
|
|
|
// Thus, $r3 which defines the number of elements, is written to $lr,
|
|
|
|
// and then we want to delete the whole chain that used to define $lr,
|
|
|
|
// see the comment below how this chain could look like.
|
|
|
|
//
|
|
|
|
void ARMLowOverheadLoops::IterationCountDCE(LowOverheadLoop &LoLoop) {
|
|
|
|
if (!LoLoop.IsTailPredicationLegal())
|
|
|
|
return;
|
2020-01-30 16:26:28 +08:00
|
|
|
|
2020-03-03 23:19:57 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Trying DCE on loop iteration count.\n");
|
|
|
|
|
2020-02-26 19:14:54 +08:00
|
|
|
MachineInstr *Def = RDA->getMIOperand(LoLoop.Start, 0);
|
2020-03-03 23:19:57 +08:00
|
|
|
if (!Def) {
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Couldn't find iteration count.\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Collect and remove the users of iteration count.
|
|
|
|
SmallPtrSet<MachineInstr*, 4> Killed = { LoLoop.Start, LoLoop.Dec,
|
|
|
|
LoLoop.End, LoLoop.InsertPt };
|
|
|
|
SmallPtrSet<MachineInstr*, 2> Remove;
|
|
|
|
if (RDA->isSafeToRemove(Def, Remove, Killed))
|
|
|
|
LoLoop.ToRemove.insert(Remove.begin(), Remove.end());
|
|
|
|
else {
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Unsafe to remove loop iteration count.\n");
|
2020-02-24 21:50:46 +08:00
|
|
|
return;
|
2020-03-03 23:19:57 +08:00
|
|
|
}
|
2020-02-05 23:15:46 +08:00
|
|
|
|
2020-03-03 23:19:57 +08:00
|
|
|
// Collect the dead code and the MBBs in which they reside.
|
|
|
|
RDA->collectKilledOperands(Def, Killed);
|
|
|
|
SmallPtrSet<MachineBasicBlock*, 2> BasicBlocks;
|
|
|
|
for (auto *MI : Killed)
|
|
|
|
BasicBlocks.insert(MI->getParent());
|
|
|
|
|
|
|
|
// Collect IT blocks in all affected basic blocks.
|
2020-02-24 21:50:46 +08:00
|
|
|
std::map<MachineInstr *, SmallPtrSet<MachineInstr *, 2>> ITBlocks;
|
2020-03-03 23:19:57 +08:00
|
|
|
for (auto *MBB : BasicBlocks) {
|
|
|
|
for (auto &MI : *MBB) {
|
|
|
|
if (MI.getOpcode() != ARM::t2IT)
|
|
|
|
continue;
|
|
|
|
RDA->getReachingLocalUses(&MI, ARM::ITSTATE, ITBlocks[&MI]);
|
2020-02-24 21:50:46 +08:00
|
|
|
}
|
|
|
|
}
|
2020-02-05 23:15:46 +08:00
|
|
|
|
2020-02-24 21:50:46 +08:00
|
|
|
// If we're removing all of the instructions within an IT block, then
|
|
|
|
// also remove the IT instruction.
|
|
|
|
SmallPtrSet<MachineInstr*, 2> ModifiedITs;
|
|
|
|
for (auto *MI : Killed) {
|
2020-03-03 23:19:57 +08:00
|
|
|
if (MachineOperand *MO = MI->findRegisterUseOperand(ARM::ITSTATE)) {
|
|
|
|
MachineInstr *IT = RDA->getMIOperand(MI, *MO);
|
|
|
|
auto &CurrentBlock = ITBlocks[IT];
|
|
|
|
CurrentBlock.erase(MI);
|
|
|
|
if (CurrentBlock.empty())
|
|
|
|
ModifiedITs.erase(IT);
|
|
|
|
else
|
|
|
|
ModifiedITs.insert(IT);
|
2020-01-17 21:08:24 +08:00
|
|
|
}
|
|
|
|
}
|
2020-02-24 21:50:46 +08:00
|
|
|
|
|
|
|
// Delete the killed instructions only if we don't have any IT blocks that
|
|
|
|
// need to be modified because we need to fixup the mask.
|
|
|
|
// TODO: Handle cases where IT blocks are modified.
|
|
|
|
if (ModifiedITs.empty()) {
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Will remove iteration count:\n";
|
|
|
|
for (auto *MI : Killed)
|
|
|
|
dbgs() << " - " << *MI);
|
|
|
|
LoLoop.ToRemove.insert(Killed.begin(), Killed.end());
|
2020-03-03 23:19:57 +08:00
|
|
|
} else
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Would need to modify IT block(s).\n");
|
2020-02-05 23:15:46 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
MachineInstr* ARMLowOverheadLoops::ExpandLoopStart(LowOverheadLoop &LoLoop) {
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Expanding LoopStart.\n");
|
|
|
|
// When using tail-predication, try to delete the dead code that was used to
|
|
|
|
// calculate the number of loop iterations.
|
|
|
|
IterationCountDCE(LoLoop);
|
2020-01-17 21:08:24 +08:00
|
|
|
|
2019-11-19 01:07:56 +08:00
|
|
|
MachineInstr *InsertPt = LoLoop.InsertPt;
|
|
|
|
MachineInstr *Start = LoLoop.Start;
|
|
|
|
MachineBasicBlock *MBB = InsertPt->getParent();
|
|
|
|
bool IsDo = Start->getOpcode() == ARM::t2DoLoopStart;
|
2019-11-26 18:25:04 +08:00
|
|
|
unsigned Opc = LoLoop.getStartOpcode();
|
2020-08-19 00:20:05 +08:00
|
|
|
MachineOperand &Count = LoLoop.getLoopStartOperand();
|
2019-06-25 18:45:51 +08:00
|
|
|
|
2019-11-19 01:07:56 +08:00
|
|
|
MachineInstrBuilder MIB =
|
|
|
|
BuildMI(*MBB, InsertPt, InsertPt->getDebugLoc(), TII->get(Opc));
|
2019-06-25 18:45:51 +08:00
|
|
|
|
2019-11-19 01:07:56 +08:00
|
|
|
MIB.addDef(ARM::LR);
|
2019-11-26 18:25:04 +08:00
|
|
|
MIB.add(Count);
|
2019-11-19 01:07:56 +08:00
|
|
|
if (!IsDo)
|
|
|
|
MIB.add(Start->getOperand(1));
|
|
|
|
|
2019-11-26 18:25:04 +08:00
|
|
|
// If we're inserting at a mov lr, then remove it as it's redundant.
|
2019-11-19 01:07:56 +08:00
|
|
|
if (InsertPt != Start)
|
2020-01-17 21:08:24 +08:00
|
|
|
LoLoop.ToRemove.insert(InsertPt);
|
|
|
|
LoLoop.ToRemove.insert(Start);
|
2019-11-19 01:07:56 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Inserted start: " << *MIB);
|
|
|
|
return &*MIB;
|
|
|
|
}
|
|
|
|
|
2020-07-01 15:27:12 +08:00
|
|
|
void ARMLowOverheadLoops::FixupReductions(LowOverheadLoop &LoLoop) const {
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Fixing up reduction(s).\n");
|
|
|
|
auto BuildMov = [this](MachineInstr &InsertPt, Register To, Register From) {
|
|
|
|
MachineBasicBlock *MBB = InsertPt.getParent();
|
|
|
|
MachineInstrBuilder MIB =
|
|
|
|
BuildMI(*MBB, &InsertPt, InsertPt.getDebugLoc(), TII->get(ARM::MVE_VORR));
|
|
|
|
MIB.addDef(To);
|
|
|
|
MIB.addReg(From);
|
|
|
|
MIB.addReg(From);
|
|
|
|
MIB.addImm(0);
|
|
|
|
MIB.addReg(0);
|
|
|
|
MIB.addReg(To);
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Inserted VMOV: " << *MIB);
|
|
|
|
};
|
|
|
|
|
|
|
|
for (auto &Reduction : LoLoop.Reductions) {
|
|
|
|
MachineInstr &Copy = Reduction->Copy;
|
|
|
|
MachineInstr &Reduce = Reduction->Reduce;
|
|
|
|
Register DestReg = Copy.getOperand(0).getReg();
|
|
|
|
|
|
|
|
// Change the initialiser if present
|
|
|
|
if (Reduction->Init) {
|
|
|
|
MachineInstr *Init = Reduction->Init;
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < Init->getNumOperands(); ++i) {
|
|
|
|
MachineOperand &MO = Init->getOperand(i);
|
|
|
|
if (MO.isReg() && MO.isUse() && MO.isTied() &&
|
|
|
|
Init->findTiedOperandIdx(i) == 0)
|
|
|
|
Init->getOperand(i).setReg(DestReg);
|
|
|
|
}
|
|
|
|
Init->getOperand(0).setReg(DestReg);
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Changed init regs: " << *Init);
|
|
|
|
} else
|
|
|
|
BuildMov(LoLoop.Preheader->instr_back(), DestReg, Copy.getOperand(1).getReg());
|
|
|
|
|
|
|
|
// Change the reducing op to write to the register that is used to copy
|
|
|
|
// its value on the next iteration. Also update the tied-def operand.
|
|
|
|
Reduce.getOperand(0).setReg(DestReg);
|
|
|
|
Reduce.getOperand(5).setReg(DestReg);
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Changed reduction regs: " << Reduce);
|
|
|
|
|
|
|
|
// Instead of a vpsel, just copy the register into the necessary one.
|
|
|
|
MachineInstr &VPSEL = Reduction->VPSEL;
|
|
|
|
if (VPSEL.getOperand(0).getReg() != DestReg)
|
|
|
|
BuildMov(VPSEL, VPSEL.getOperand(0).getReg(), DestReg);
|
|
|
|
|
|
|
|
// Remove the unnecessary instructions.
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Removing:\n"
|
|
|
|
<< " - " << Copy
|
|
|
|
<< " - " << VPSEL << "\n");
|
|
|
|
Copy.eraseFromParent();
|
|
|
|
VPSEL.eraseFromParent();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-20 16:42:11 +08:00
|
|
|
void ARMLowOverheadLoops::ConvertVPTBlocks(LowOverheadLoop &LoLoop) {
|
|
|
|
auto RemovePredicate = [](MachineInstr *MI) {
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Removing predicate from: " << *MI);
|
2020-01-10 22:47:29 +08:00
|
|
|
if (int PIdx = llvm::findFirstVPTPredOperandIdx(*MI)) {
|
|
|
|
assert(MI->getOperand(PIdx).getImm() == ARMVCC::Then &&
|
|
|
|
"Expected Then predicate!");
|
|
|
|
MI->getOperand(PIdx).setImm(ARMVCC::None);
|
|
|
|
MI->getOperand(PIdx+1).setReg(0);
|
|
|
|
} else
|
|
|
|
llvm_unreachable("trying to unpredicate a non-predicated instruction");
|
2019-12-20 16:42:11 +08:00
|
|
|
};
|
2019-11-19 01:07:56 +08:00
|
|
|
|
2019-12-20 16:42:11 +08:00
|
|
|
// There are a few scenarios which we have to fix up:
|
2020-04-08 21:31:21 +08:00
|
|
|
// 1. VPT Blocks with non-uniform predicates:
|
|
|
|
// - a. When the divergent instruction is a vctp
|
|
|
|
// - b. When the block uses a vpst, and is only predicated on the vctp
|
|
|
|
// - c. When the block uses a vpt and (optionally) contains one or more
|
|
|
|
// vctp.
|
|
|
|
// 2. VPT Blocks with uniform predicates:
|
|
|
|
// - a. The block uses a vpst, and is only predicated on the vctp
|
2019-12-20 16:42:11 +08:00
|
|
|
for (auto &Block : LoLoop.getVPTBlocks()) {
|
|
|
|
SmallVectorImpl<PredicatedMI> &Insts = Block.getInsts();
|
|
|
|
if (Block.HasNonUniformPredicate()) {
|
|
|
|
PredicatedMI *Divergent = Block.getDivergent();
|
|
|
|
if (isVCTP(Divergent->MI)) {
|
2020-04-08 21:31:21 +08:00
|
|
|
// The vctp will be removed, so the block mask of the vp(s)t will need
|
2020-04-08 18:55:09 +08:00
|
|
|
// to be recomputed.
|
2020-04-08 21:31:21 +08:00
|
|
|
LoLoop.BlockMasksToRecompute.insert(Block.getPredicateThen());
|
|
|
|
} else if (Block.isVPST() && Block.IsOnlyPredicatedOn(LoLoop.VCTP)) {
|
|
|
|
// The VPT block has a non-uniform predicate but it uses a vpst and its
|
|
|
|
// entry is guarded only by a vctp, which means we:
|
2019-12-20 16:42:11 +08:00
|
|
|
// - Need to remove the original vpst.
|
|
|
|
// - Then need to unpredicate any following instructions, until
|
|
|
|
// we come across the divergent vpr def.
|
|
|
|
// - Insert a new vpst to predicate the instruction(s) that following
|
|
|
|
// the divergent vpr def.
|
|
|
|
// TODO: We could be producing more VPT blocks than necessary and could
|
|
|
|
// fold the newly created one into a proceeding one.
|
2020-04-08 21:31:21 +08:00
|
|
|
for (auto I = ++MachineBasicBlock::iterator(Block.getPredicateThen()),
|
2019-12-20 16:42:11 +08:00
|
|
|
E = ++MachineBasicBlock::iterator(Divergent->MI); I != E; ++I)
|
|
|
|
RemovePredicate(&*I);
|
|
|
|
|
|
|
|
unsigned Size = 0;
|
|
|
|
auto E = MachineBasicBlock::reverse_iterator(Divergent->MI);
|
|
|
|
auto I = MachineBasicBlock::reverse_iterator(Insts.back().MI);
|
|
|
|
MachineInstr *InsertAt = nullptr;
|
|
|
|
while (I != E) {
|
|
|
|
InsertAt = &*I;
|
|
|
|
++Size;
|
|
|
|
++I;
|
|
|
|
}
|
2020-04-08 21:31:21 +08:00
|
|
|
// Create a VPST (with a null mask for now, we'll recompute it later).
|
2019-12-20 16:42:11 +08:00
|
|
|
MachineInstrBuilder MIB = BuildMI(*InsertAt->getParent(), InsertAt,
|
|
|
|
InsertAt->getDebugLoc(),
|
|
|
|
TII->get(ARM::MVE_VPST));
|
2020-04-08 18:55:09 +08:00
|
|
|
MIB.addImm(0);
|
2020-04-08 21:31:21 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Removing VPST: " << *Block.getPredicateThen());
|
2019-12-20 16:42:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Created VPST: " << *MIB);
|
2020-04-08 21:31:21 +08:00
|
|
|
LoLoop.ToRemove.insert(Block.getPredicateThen());
|
2020-04-08 18:55:09 +08:00
|
|
|
LoLoop.BlockMasksToRecompute.insert(MIB.getInstr());
|
2019-12-20 16:42:11 +08:00
|
|
|
}
|
2020-04-08 21:31:21 +08:00
|
|
|
// Else, if the block uses a vpt, iterate over the block, removing the
|
|
|
|
// extra VCTPs it may contain.
|
|
|
|
else if (Block.isVPT()) {
|
|
|
|
bool RemovedVCTP = false;
|
|
|
|
for (PredicatedMI &Elt : Block.getInsts()) {
|
|
|
|
MachineInstr *MI = Elt.MI;
|
|
|
|
if (isVCTP(MI)) {
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Removing VCTP: " << *MI);
|
|
|
|
LoLoop.ToRemove.insert(MI);
|
|
|
|
RemovedVCTP = true;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (RemovedVCTP)
|
|
|
|
LoLoop.BlockMasksToRecompute.insert(Block.getPredicateThen());
|
|
|
|
}
|
|
|
|
} else if (Block.IsOnlyPredicatedOn(LoLoop.VCTP) && Block.isVPST()) {
|
|
|
|
// A vpt block starting with VPST, is only predicated upon vctp and has no
|
|
|
|
// internal vpr defs:
|
2019-12-20 16:42:11 +08:00
|
|
|
// - Remove vpst.
|
|
|
|
// - Unpredicate the remaining instructions.
|
2020-04-08 21:31:21 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Removing VPST: " << *Block.getPredicateThen());
|
|
|
|
LoLoop.ToRemove.insert(Block.getPredicateThen());
|
2019-12-20 16:42:11 +08:00
|
|
|
for (auto &PredMI : Insts)
|
|
|
|
RemovePredicate(PredMI.MI);
|
2019-11-19 01:07:56 +08:00
|
|
|
}
|
|
|
|
}
|
2020-04-08 21:31:21 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Removing remaining VCTPs...\n");
|
|
|
|
// Remove the "main" VCTP
|
2020-01-17 21:08:24 +08:00
|
|
|
LoLoop.ToRemove.insert(LoLoop.VCTP);
|
2020-04-08 21:31:21 +08:00
|
|
|
LLVM_DEBUG(dbgs() << " " << *LoLoop.VCTP);
|
|
|
|
// Remove remaining secondary VCTPs
|
|
|
|
for (MachineInstr *VCTP : LoLoop.SecondaryVCTPs) {
|
|
|
|
// All VCTPs that aren't marked for removal yet should be unpredicated ones.
|
|
|
|
// The predicated ones should have already been marked for removal when
|
|
|
|
// visiting the VPT blocks.
|
|
|
|
if (LoLoop.ToRemove.insert(VCTP).second) {
|
|
|
|
assert(getVPTInstrPredicate(*VCTP) == ARMVCC::None &&
|
|
|
|
"Removing Predicated VCTP without updating the block mask!");
|
|
|
|
LLVM_DEBUG(dbgs() << " " << *VCTP);
|
|
|
|
}
|
|
|
|
}
|
2019-11-19 01:07:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void ARMLowOverheadLoops::Expand(LowOverheadLoop &LoLoop) {
|
2019-06-25 18:45:51 +08:00
|
|
|
|
|
|
|
// Combine the LoopDec and LoopEnd instructions into LE(TP).
|
2019-11-19 01:07:56 +08:00
|
|
|
auto ExpandLoopEnd = [this](LowOverheadLoop &LoLoop) {
|
|
|
|
MachineInstr *End = LoLoop.End;
|
2019-06-25 18:45:51 +08:00
|
|
|
MachineBasicBlock *MBB = End->getParent();
|
2019-11-19 01:07:56 +08:00
|
|
|
unsigned Opc = LoLoop.IsTailPredicationLegal() ?
|
|
|
|
ARM::MVE_LETP : ARM::t2LEUpdate;
|
2019-06-25 18:45:51 +08:00
|
|
|
MachineInstrBuilder MIB = BuildMI(*MBB, End, End->getDebugLoc(),
|
2019-11-19 01:07:56 +08:00
|
|
|
TII->get(Opc));
|
2019-06-25 18:45:51 +08:00
|
|
|
MIB.addDef(ARM::LR);
|
|
|
|
MIB.add(End->getOperand(0));
|
|
|
|
MIB.add(End->getOperand(1));
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Inserted LE: " << *MIB);
|
2020-03-03 23:19:57 +08:00
|
|
|
LoLoop.ToRemove.insert(LoLoop.Dec);
|
|
|
|
LoLoop.ToRemove.insert(End);
|
2019-07-01 16:21:28 +08:00
|
|
|
return &*MIB;
|
2019-06-25 18:45:51 +08:00
|
|
|
};
|
|
|
|
|
2019-07-01 16:21:28 +08:00
|
|
|
// TODO: We should be able to automatically remove these branches before we
|
|
|
|
// get here - probably by teaching analyzeBranch about the pseudo
|
|
|
|
// instructions.
|
|
|
|
// If there is an unconditional branch, after I, that just branches to the
|
|
|
|
// next block, remove it.
|
|
|
|
auto RemoveDeadBranch = [](MachineInstr *I) {
|
|
|
|
MachineBasicBlock *BB = I->getParent();
|
|
|
|
MachineInstr *Terminator = &BB->instr_back();
|
|
|
|
if (Terminator->isUnconditionalBranch() && I != Terminator) {
|
|
|
|
MachineBasicBlock *Succ = Terminator->getOperand(0).getMBB();
|
|
|
|
if (BB->isLayoutSuccessor(Succ)) {
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Removing branch: " << *Terminator);
|
|
|
|
Terminator->eraseFromParent();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-11-19 01:07:56 +08:00
|
|
|
if (LoLoop.Revert) {
|
|
|
|
if (LoLoop.Start->getOpcode() == ARM::t2WhileLoopStart)
|
|
|
|
RevertWhile(LoLoop.Start);
|
2019-07-10 20:29:43 +08:00
|
|
|
else
|
2019-11-19 01:07:56 +08:00
|
|
|
LoLoop.Start->eraseFromParent();
|
2020-01-29 16:26:11 +08:00
|
|
|
bool FlagsAlreadySet = RevertLoopDec(LoLoop.Dec);
|
2019-11-19 01:07:56 +08:00
|
|
|
RevertLoopEnd(LoLoop.End, FlagsAlreadySet);
|
2019-06-25 18:45:51 +08:00
|
|
|
} else {
|
2019-11-19 01:07:56 +08:00
|
|
|
LoLoop.Start = ExpandLoopStart(LoLoop);
|
|
|
|
RemoveDeadBranch(LoLoop.Start);
|
|
|
|
LoLoop.End = ExpandLoopEnd(LoLoop);
|
|
|
|
RemoveDeadBranch(LoLoop.End);
|
2020-07-01 15:27:12 +08:00
|
|
|
if (LoLoop.IsTailPredicationLegal()) {
|
2019-12-20 16:42:11 +08:00
|
|
|
ConvertVPTBlocks(LoLoop);
|
2020-07-01 15:27:12 +08:00
|
|
|
FixupReductions(LoLoop);
|
|
|
|
}
|
2020-01-17 21:08:24 +08:00
|
|
|
for (auto *I : LoLoop.ToRemove) {
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Erasing " << *I);
|
|
|
|
I->eraseFromParent();
|
[ARM][LowOverheadLoops] Remove dead loop update instructions.
After creating a low-overhead loop, the loop update instruction was still
lingering around hurting performance. This removes dead loop update
instructions, which in our case are mostly SUBS instructions.
To support this, some helper functions were added to MachineLoopUtils and
ReachingDefAnalysis to analyse live-ins of loop exit blocks and find uses
before a particular loop instruction, respectively.
This is a first version that removes a SUBS instruction when there are no other
uses inside and outside the loop block, but there are some more interesting
cases in test/CodeGen/Thumb2/LowOverheadLoops/mve-tail-data-types.ll which
shows that there is room for improvement. For example, we can't handle this
case yet:
..
dlstp.32 lr, r2
.LBB0_1:
mov r3, r2
subs r2, #4
vldrh.u32 q2, [r1], #8
vmov q1, q0
vmla.u32 q0, q2, r0
letp lr, .LBB0_1
@ %bb.2:
vctp.32 r3
..
which is a lot more tricky because r2 is not only used by the subs, but also by
the mov to r3, which is used outside the low-overhead loop by the vctp
instruction, and that requires a bit of a different approach, and I will follow
up on this.
Differential Revision: https://reviews.llvm.org/D71007
2019-12-11 18:11:48 +08:00
|
|
|
}
|
2020-04-08 18:55:09 +08:00
|
|
|
for (auto *I : LoLoop.BlockMasksToRecompute) {
|
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Recomputing VPT/VPST Block Mask: " << *I);
|
|
|
|
recomputeVPTBlockMask(*I);
|
|
|
|
LLVM_DEBUG(dbgs() << " ... done: " << *I);
|
|
|
|
}
|
2019-06-25 18:45:51 +08:00
|
|
|
}
|
2020-01-16 23:42:41 +08:00
|
|
|
|
2020-02-14 16:28:26 +08:00
|
|
|
PostOrderLoopTraversal DFS(LoLoop.ML, *MLI);
|
2020-01-16 23:42:41 +08:00
|
|
|
DFS.ProcessLoop();
|
|
|
|
const SmallVectorImpl<MachineBasicBlock*> &PostOrder = DFS.getOrder();
|
|
|
|
for (auto *MBB : PostOrder) {
|
|
|
|
recomputeLiveIns(*MBB);
|
|
|
|
// FIXME: For some reason, the live-in print order is non-deterministic for
|
|
|
|
// our tests and I can't out why... So just sort them.
|
|
|
|
MBB->sortUniqueLiveIns();
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto *MBB : reverse(PostOrder))
|
|
|
|
recomputeLivenessFlags(*MBB);
|
2020-02-26 19:14:54 +08:00
|
|
|
|
|
|
|
// We've moved, removed and inserted new instructions, so update RDA.
|
|
|
|
RDA->reset();
|
2019-06-25 18:45:51 +08:00
|
|
|
}
|
|
|
|
|
2019-09-17 20:19:32 +08:00
|
|
|
bool ARMLowOverheadLoops::RevertNonLoops() {
|
2019-07-22 22:16:40 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "ARM Loops: Reverting any remaining pseudos...\n");
|
|
|
|
bool Changed = false;
|
|
|
|
|
2019-09-17 20:19:32 +08:00
|
|
|
for (auto &MBB : *MF) {
|
2019-07-22 22:16:40 +08:00
|
|
|
SmallVector<MachineInstr*, 4> Starts;
|
|
|
|
SmallVector<MachineInstr*, 4> Decs;
|
|
|
|
SmallVector<MachineInstr*, 4> Ends;
|
|
|
|
|
|
|
|
for (auto &I : MBB) {
|
2019-12-16 17:11:47 +08:00
|
|
|
if (isLoopStart(I))
|
2019-07-22 22:16:40 +08:00
|
|
|
Starts.push_back(&I);
|
|
|
|
else if (I.getOpcode() == ARM::t2LoopDec)
|
|
|
|
Decs.push_back(&I);
|
|
|
|
else if (I.getOpcode() == ARM::t2LoopEnd)
|
|
|
|
Ends.push_back(&I);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Starts.empty() && Decs.empty() && Ends.empty())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
Changed = true;
|
|
|
|
|
|
|
|
for (auto *Start : Starts) {
|
|
|
|
if (Start->getOpcode() == ARM::t2WhileLoopStart)
|
|
|
|
RevertWhile(Start);
|
|
|
|
else
|
|
|
|
Start->eraseFromParent();
|
|
|
|
}
|
|
|
|
for (auto *Dec : Decs)
|
|
|
|
RevertLoopDec(Dec);
|
|
|
|
|
|
|
|
for (auto *End : Ends)
|
|
|
|
RevertLoopEnd(End);
|
|
|
|
}
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
2019-06-25 18:45:51 +08:00
|
|
|
FunctionPass *llvm::createARMLowOverheadLoopsPass() {
|
|
|
|
return new ARMLowOverheadLoops();
|
|
|
|
}
|