[AArch64] Add pass to remove redundant copy after RA
Summary:
This change will add a pass to remove unnecessary zero copies in target blocks
of cbz/cbnz instructions. E.g., the copy instruction in the code below can be
removed because the cbz jumps to BB1 when x0 is zero :
BB0:
cbz x0, .BB1
BB1:
mov x0, xzr
Jun
Reviewers: gberry, jmolloy, HaoLiu, MatzeB, mcrosier
Subscribers: mcrosier, mssimpso, haicheng, bmakam, llvm-commits, aemerson, rengolin
Differential Revision: http://reviews.llvm.org/D16203
llvm-svn: 261004
2016-02-17 04:02:39 +08:00
|
|
|
//=- AArch64RedundantCopyElimination.cpp - Remove useless copy for AArch64 -=//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
2017-07-24 00:38:08 +08:00
|
|
|
// This pass removes unnecessary copies/moves in BBs based on a dominating
|
|
|
|
// condition.
|
|
|
|
//
|
|
|
|
// We handle three cases:
|
|
|
|
// 1. For BBs that are targets of CBZ/CBNZ instructions, we know the value of
|
|
|
|
// the CBZ/CBNZ source register is zero on the taken/not-taken path. For
|
|
|
|
// instance, the copy instruction in the code below can be removed because
|
2017-12-05 01:18:51 +08:00
|
|
|
// the CBZW jumps to %bb.2 when w0 is zero.
|
2017-07-24 00:38:08 +08:00
|
|
|
//
|
2017-12-05 01:18:51 +08:00
|
|
|
// %bb.1:
|
2017-07-24 00:38:08 +08:00
|
|
|
// cbz w0, .LBB0_2
|
|
|
|
// .LBB0_2:
|
|
|
|
// mov w0, wzr ; <-- redundant
|
|
|
|
//
|
|
|
|
// 2. If the flag setting instruction defines a register other than WZR/XZR, we
|
|
|
|
// can remove a zero copy in some cases.
|
|
|
|
//
|
2017-12-05 01:18:51 +08:00
|
|
|
// %bb.0:
|
2017-07-24 00:38:08 +08:00
|
|
|
// subs w0, w1, w2
|
|
|
|
// str w0, [x1]
|
|
|
|
// b.ne .LBB0_2
|
2017-12-05 01:18:51 +08:00
|
|
|
// %bb.1:
|
2017-07-24 00:38:08 +08:00
|
|
|
// mov w0, wzr ; <-- redundant
|
|
|
|
// str w0, [x2]
|
|
|
|
// .LBB0_2
|
|
|
|
//
|
|
|
|
// 3. Finally, if the flag setting instruction is a comparison against a
|
|
|
|
// constant (i.e., ADDS[W|X]ri, SUBS[W|X]ri), we can remove a mov immediate
|
|
|
|
// in some cases.
|
|
|
|
//
|
2017-12-05 01:18:51 +08:00
|
|
|
// %bb.0:
|
2017-07-24 00:38:08 +08:00
|
|
|
// subs xzr, x0, #1
|
2017-03-03 04:48:11 +08:00
|
|
|
// b.eq .LBB0_1
|
|
|
|
// .LBB0_1:
|
2017-07-24 00:38:08 +08:00
|
|
|
// orr x0, xzr, #0x1 ; <-- redundant
|
[AArch64] Add pass to remove redundant copy after RA
Summary:
This change will add a pass to remove unnecessary zero copies in target blocks
of cbz/cbnz instructions. E.g., the copy instruction in the code below can be
removed because the cbz jumps to BB1 when x0 is zero :
BB0:
cbz x0, .BB1
BB1:
mov x0, xzr
Jun
Reviewers: gberry, jmolloy, HaoLiu, MatzeB, mcrosier
Subscribers: mcrosier, mssimpso, haicheng, bmakam, llvm-commits, aemerson, rengolin
Differential Revision: http://reviews.llvm.org/D16203
llvm-svn: 261004
2016-02-17 04:02:39 +08:00
|
|
|
//
|
2017-03-03 04:48:11 +08:00
|
|
|
// This pass should be run after register allocation.
|
[AArch64] Add pass to remove redundant copy after RA
Summary:
This change will add a pass to remove unnecessary zero copies in target blocks
of cbz/cbnz instructions. E.g., the copy instruction in the code below can be
removed because the cbz jumps to BB1 when x0 is zero :
BB0:
cbz x0, .BB1
BB1:
mov x0, xzr
Jun
Reviewers: gberry, jmolloy, HaoLiu, MatzeB, mcrosier
Subscribers: mcrosier, mssimpso, haicheng, bmakam, llvm-commits, aemerson, rengolin
Differential Revision: http://reviews.llvm.org/D16203
llvm-svn: 261004
2016-02-17 04:02:39 +08:00
|
|
|
//
|
|
|
|
// FIXME: This could also be extended to check the whole dominance subtree below
|
|
|
|
// the comparison if the compile time regression is acceptable.
|
|
|
|
//
|
2017-07-24 00:38:08 +08:00
|
|
|
// FIXME: Add support for handling CCMP instructions.
|
|
|
|
// FIXME: If the known register value is zero, we should be able to rewrite uses
|
|
|
|
// to use WZR/XZR directly in some cases.
|
[AArch64] Add pass to remove redundant copy after RA
Summary:
This change will add a pass to remove unnecessary zero copies in target blocks
of cbz/cbnz instructions. E.g., the copy instruction in the code below can be
removed because the cbz jumps to BB1 when x0 is zero :
BB0:
cbz x0, .BB1
BB1:
mov x0, xzr
Jun
Reviewers: gberry, jmolloy, HaoLiu, MatzeB, mcrosier
Subscribers: mcrosier, mssimpso, haicheng, bmakam, llvm-commits, aemerson, rengolin
Differential Revision: http://reviews.llvm.org/D16203
llvm-svn: 261004
2016-02-17 04:02:39 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "AArch64.h"
|
2017-03-03 04:48:11 +08:00
|
|
|
#include "llvm/ADT/Optional.h"
|
[AArch64] Add pass to remove redundant copy after RA
Summary:
This change will add a pass to remove unnecessary zero copies in target blocks
of cbz/cbnz instructions. E.g., the copy instruction in the code below can be
removed because the cbz jumps to BB1 when x0 is zero :
BB0:
cbz x0, .BB1
BB1:
mov x0, xzr
Jun
Reviewers: gberry, jmolloy, HaoLiu, MatzeB, mcrosier
Subscribers: mcrosier, mssimpso, haicheng, bmakam, llvm-commits, aemerson, rengolin
Differential Revision: http://reviews.llvm.org/D16203
llvm-svn: 261004
2016-02-17 04:02:39 +08:00
|
|
|
#include "llvm/ADT/SetVector.h"
|
|
|
|
#include "llvm/ADT/Statistic.h"
|
|
|
|
#include "llvm/ADT/iterator_range.h"
|
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
#define DEBUG_TYPE "aarch64-copyelim"
|
|
|
|
|
|
|
|
STATISTIC(NumCopiesRemoved, "Number of copies removed.");
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
class AArch64RedundantCopyElimination : public MachineFunctionPass {
|
|
|
|
const MachineRegisterInfo *MRI;
|
|
|
|
const TargetRegisterInfo *TRI;
|
2017-07-24 00:38:08 +08:00
|
|
|
|
|
|
|
// DomBBClobberedRegs is used when computing known values in the dominating
|
|
|
|
// BB.
|
|
|
|
BitVector DomBBClobberedRegs;
|
|
|
|
|
|
|
|
// OptBBClobberedRegs is used when optimizing away redundant copies/moves.
|
|
|
|
BitVector OptBBClobberedRegs;
|
[AArch64] Add pass to remove redundant copy after RA
Summary:
This change will add a pass to remove unnecessary zero copies in target blocks
of cbz/cbnz instructions. E.g., the copy instruction in the code below can be
removed because the cbz jumps to BB1 when x0 is zero :
BB0:
cbz x0, .BB1
BB1:
mov x0, xzr
Jun
Reviewers: gberry, jmolloy, HaoLiu, MatzeB, mcrosier
Subscribers: mcrosier, mssimpso, haicheng, bmakam, llvm-commits, aemerson, rengolin
Differential Revision: http://reviews.llvm.org/D16203
llvm-svn: 261004
2016-02-17 04:02:39 +08:00
|
|
|
|
|
|
|
public:
|
|
|
|
static char ID;
|
2016-08-01 13:56:57 +08:00
|
|
|
AArch64RedundantCopyElimination() : MachineFunctionPass(ID) {
|
|
|
|
initializeAArch64RedundantCopyEliminationPass(
|
|
|
|
*PassRegistry::getPassRegistry());
|
|
|
|
}
|
2017-03-03 04:48:11 +08:00
|
|
|
|
|
|
|
struct RegImm {
|
|
|
|
MCPhysReg Reg;
|
|
|
|
int32_t Imm;
|
|
|
|
RegImm(MCPhysReg Reg, int32_t Imm) : Reg(Reg), Imm(Imm) {}
|
|
|
|
};
|
|
|
|
|
2017-07-24 00:38:08 +08:00
|
|
|
bool knownRegValInBlock(MachineInstr &CondBr, MachineBasicBlock *MBB,
|
|
|
|
SmallVectorImpl<RegImm> &KnownRegs,
|
|
|
|
MachineBasicBlock::iterator &FirstUse);
|
|
|
|
bool optimizeBlock(MachineBasicBlock *MBB);
|
[AArch64] Add pass to remove redundant copy after RA
Summary:
This change will add a pass to remove unnecessary zero copies in target blocks
of cbz/cbnz instructions. E.g., the copy instruction in the code below can be
removed because the cbz jumps to BB1 when x0 is zero :
BB0:
cbz x0, .BB1
BB1:
mov x0, xzr
Jun
Reviewers: gberry, jmolloy, HaoLiu, MatzeB, mcrosier
Subscribers: mcrosier, mssimpso, haicheng, bmakam, llvm-commits, aemerson, rengolin
Differential Revision: http://reviews.llvm.org/D16203
llvm-svn: 261004
2016-02-17 04:02:39 +08:00
|
|
|
bool runOnMachineFunction(MachineFunction &MF) override;
|
2016-04-05 01:09:25 +08:00
|
|
|
MachineFunctionProperties getRequiredProperties() const override {
|
|
|
|
return MachineFunctionProperties().set(
|
2016-08-25 09:27:13 +08:00
|
|
|
MachineFunctionProperties::Property::NoVRegs);
|
2016-04-05 01:09:25 +08:00
|
|
|
}
|
2016-10-01 10:56:57 +08:00
|
|
|
StringRef getPassName() const override {
|
[AArch64] Add pass to remove redundant copy after RA
Summary:
This change will add a pass to remove unnecessary zero copies in target blocks
of cbz/cbnz instructions. E.g., the copy instruction in the code below can be
removed because the cbz jumps to BB1 when x0 is zero :
BB0:
cbz x0, .BB1
BB1:
mov x0, xzr
Jun
Reviewers: gberry, jmolloy, HaoLiu, MatzeB, mcrosier
Subscribers: mcrosier, mssimpso, haicheng, bmakam, llvm-commits, aemerson, rengolin
Differential Revision: http://reviews.llvm.org/D16203
llvm-svn: 261004
2016-02-17 04:02:39 +08:00
|
|
|
return "AArch64 Redundant Copy Elimination";
|
|
|
|
}
|
|
|
|
};
|
|
|
|
char AArch64RedundantCopyElimination::ID = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
INITIALIZE_PASS(AArch64RedundantCopyElimination, "aarch64-copyelim",
|
|
|
|
"AArch64 redundant copy elimination pass", false, false)
|
|
|
|
|
[AArch64] Extend AArch64RedundantCopyElimination to do simple copy propagation.
Summary:
Extend AArch64RedundantCopyElimination to catch cases where the register
that is known to be zero is COPY'd in the predecessor block. Before
this change, this pass would catch cases like:
CBZW %W0, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
After this change, cases like the one below are also caught:
%W0 = COPY %W1
CBZW %W1, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
This change results in a 4% increase in static copies removed by this
pass when compiling the llvm test-suite. It also fixes regressions
caused by doing post-RA copy propagation (a separate change to be put up
for review shortly).
Reviewers: junbuml, mcrosier, t.p.northover, qcolombet, MatzeB
Subscribers: aemerson, rengolin, llvm-commits
Differential Revision: https://reviews.llvm.org/D30113
llvm-svn: 295863
2017-02-23 03:10:45 +08:00
|
|
|
/// Remember what registers the specified instruction modifies.
|
|
|
|
static void trackRegDefs(const MachineInstr &MI, BitVector &ClobberedRegs,
|
|
|
|
const TargetRegisterInfo *TRI) {
|
|
|
|
for (const MachineOperand &MO : MI.operands()) {
|
|
|
|
if (MO.isRegMask()) {
|
|
|
|
ClobberedRegs.setBitsNotInMask(MO.getRegMask());
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!MO.isReg())
|
|
|
|
continue;
|
|
|
|
unsigned Reg = MO.getReg();
|
|
|
|
if (!Reg)
|
|
|
|
continue;
|
|
|
|
if (!MO.isDef())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
|
|
|
|
ClobberedRegs.set(*AI);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-03 04:48:11 +08:00
|
|
|
/// It's possible to determine the value of a register based on a dominating
|
|
|
|
/// condition. To do so, this function checks to see if the basic block \p MBB
|
2017-07-24 00:38:08 +08:00
|
|
|
/// is the target of a conditional branch \p CondBr with an equality comparison.
|
|
|
|
/// If the branch is a CBZ/CBNZ, we know the value of its source operand is zero
|
|
|
|
/// in \p MBB for some cases. Otherwise, we find and inspect the NZCV setting
|
|
|
|
/// instruction (e.g., SUBS, ADDS). If this instruction defines a register
|
|
|
|
/// other than WZR/XZR, we know the value of the destination register is zero in
|
|
|
|
/// \p MMB for some cases. In addition, if the NZCV setting instruction is
|
|
|
|
/// comparing against a constant we know the other source register is equal to
|
|
|
|
/// the constant in \p MBB for some cases. If we find any constant values, push
|
|
|
|
/// a physical register and constant value pair onto the KnownRegs vector and
|
|
|
|
/// return true. Otherwise, return false if no known values were found.
|
|
|
|
bool AArch64RedundantCopyElimination::knownRegValInBlock(
|
2017-03-03 04:48:11 +08:00
|
|
|
MachineInstr &CondBr, MachineBasicBlock *MBB,
|
2017-07-24 00:38:08 +08:00
|
|
|
SmallVectorImpl<RegImm> &KnownRegs, MachineBasicBlock::iterator &FirstUse) {
|
2017-03-03 04:48:11 +08:00
|
|
|
unsigned Opc = CondBr.getOpcode();
|
|
|
|
|
|
|
|
// Check if the current basic block is the target block to which the
|
|
|
|
// CBZ/CBNZ instruction jumps when its Wt/Xt is zero.
|
|
|
|
if (((Opc == AArch64::CBZW || Opc == AArch64::CBZX) &&
|
|
|
|
MBB == CondBr.getOperand(1).getMBB()) ||
|
|
|
|
((Opc == AArch64::CBNZW || Opc == AArch64::CBNZX) &&
|
|
|
|
MBB != CondBr.getOperand(1).getMBB())) {
|
|
|
|
FirstUse = CondBr;
|
2017-07-24 00:38:08 +08:00
|
|
|
KnownRegs.push_back(RegImm(CondBr.getOperand(0).getReg(), 0));
|
|
|
|
return true;
|
2017-03-03 04:48:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, must be a conditional branch.
|
|
|
|
if (Opc != AArch64::Bcc)
|
2017-07-24 00:38:08 +08:00
|
|
|
return false;
|
2017-03-03 04:48:11 +08:00
|
|
|
|
|
|
|
// Must be an equality check (i.e., == or !=).
|
|
|
|
AArch64CC::CondCode CC = (AArch64CC::CondCode)CondBr.getOperand(0).getImm();
|
|
|
|
if (CC != AArch64CC::EQ && CC != AArch64CC::NE)
|
2017-07-24 00:38:08 +08:00
|
|
|
return false;
|
2017-03-03 04:48:11 +08:00
|
|
|
|
|
|
|
MachineBasicBlock *BrTarget = CondBr.getOperand(1).getMBB();
|
|
|
|
if ((CC == AArch64CC::EQ && BrTarget != MBB) ||
|
|
|
|
(CC == AArch64CC::NE && BrTarget == MBB))
|
2017-07-24 00:38:08 +08:00
|
|
|
return false;
|
2017-03-03 04:48:11 +08:00
|
|
|
|
|
|
|
// Stop if we get to the beginning of PredMBB.
|
|
|
|
MachineBasicBlock *PredMBB = *MBB->pred_begin();
|
|
|
|
assert(PredMBB == CondBr.getParent() &&
|
|
|
|
"Conditional branch not in predecessor block!");
|
|
|
|
if (CondBr == PredMBB->begin())
|
2017-07-24 00:38:08 +08:00
|
|
|
return false;
|
2017-03-03 04:48:11 +08:00
|
|
|
|
|
|
|
// Registers clobbered in PredMBB between CondBr instruction and current
|
|
|
|
// instruction being checked in loop.
|
2017-07-24 00:38:08 +08:00
|
|
|
DomBBClobberedRegs.reset();
|
2017-03-03 04:48:11 +08:00
|
|
|
|
|
|
|
// Find compare instruction that sets NZCV used by CondBr.
|
|
|
|
MachineBasicBlock::reverse_iterator RIt = CondBr.getReverseIterator();
|
|
|
|
for (MachineInstr &PredI : make_range(std::next(RIt), PredMBB->rend())) {
|
|
|
|
|
2017-03-07 05:20:00 +08:00
|
|
|
bool IsCMN = false;
|
2017-03-03 04:48:11 +08:00
|
|
|
switch (PredI.getOpcode()) {
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
|
2017-03-07 05:20:00 +08:00
|
|
|
// CMN is an alias for ADDS with a dead destination register.
|
|
|
|
case AArch64::ADDSWri:
|
|
|
|
case AArch64::ADDSXri:
|
|
|
|
IsCMN = true;
|
2017-07-07 21:03:28 +08:00
|
|
|
LLVM_FALLTHROUGH;
|
2017-03-03 04:48:11 +08:00
|
|
|
// CMP is an alias for SUBS with a dead destination register.
|
|
|
|
case AArch64::SUBSWri:
|
|
|
|
case AArch64::SUBSXri: {
|
2017-10-18 05:43:52 +08:00
|
|
|
// Sometimes the first operand is a FrameIndex. Bail if tht happens.
|
|
|
|
if (!PredI.getOperand(1).isReg())
|
|
|
|
return false;
|
2017-07-24 00:38:08 +08:00
|
|
|
MCPhysReg DstReg = PredI.getOperand(0).getReg();
|
2017-03-07 05:20:00 +08:00
|
|
|
MCPhysReg SrcReg = PredI.getOperand(1).getReg();
|
|
|
|
|
2017-07-24 00:38:08 +08:00
|
|
|
bool Res = false;
|
|
|
|
// If we're comparing against a non-symbolic immediate and the source
|
|
|
|
// register of the compare is not modified (including a self-clobbering
|
|
|
|
// compare) between the compare and conditional branch we known the value
|
|
|
|
// of the 1st source operand.
|
|
|
|
if (PredI.getOperand(2).isImm() && !DomBBClobberedRegs[SrcReg] &&
|
|
|
|
SrcReg != DstReg) {
|
|
|
|
// We've found the instruction that sets NZCV.
|
|
|
|
int32_t KnownImm = PredI.getOperand(2).getImm();
|
|
|
|
int32_t Shift = PredI.getOperand(3).getImm();
|
|
|
|
KnownImm <<= Shift;
|
|
|
|
if (IsCMN)
|
|
|
|
KnownImm = -KnownImm;
|
|
|
|
FirstUse = PredI;
|
|
|
|
KnownRegs.push_back(RegImm(SrcReg, KnownImm));
|
|
|
|
Res = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this instructions defines something other than WZR/XZR, we know it's
|
|
|
|
// result is zero in some cases.
|
|
|
|
if (DstReg == AArch64::WZR || DstReg == AArch64::XZR)
|
|
|
|
return Res;
|
|
|
|
|
|
|
|
// The destination register must not be modified between the NZCV setting
|
|
|
|
// instruction and the conditional branch.
|
|
|
|
if (DomBBClobberedRegs[DstReg])
|
|
|
|
return Res;
|
|
|
|
|
2017-03-03 04:48:11 +08:00
|
|
|
FirstUse = PredI;
|
2017-07-24 00:38:08 +08:00
|
|
|
KnownRegs.push_back(RegImm(DstReg, 0));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Look for NZCV setting instructions that define something other than
|
|
|
|
// WZR/XZR.
|
|
|
|
case AArch64::ADCSWr:
|
|
|
|
case AArch64::ADCSXr:
|
|
|
|
case AArch64::ADDSWrr:
|
|
|
|
case AArch64::ADDSWrs:
|
|
|
|
case AArch64::ADDSWrx:
|
|
|
|
case AArch64::ADDSXrr:
|
|
|
|
case AArch64::ADDSXrs:
|
|
|
|
case AArch64::ADDSXrx:
|
|
|
|
case AArch64::ADDSXrx64:
|
|
|
|
case AArch64::ANDSWri:
|
|
|
|
case AArch64::ANDSWrr:
|
|
|
|
case AArch64::ANDSWrs:
|
|
|
|
case AArch64::ANDSXri:
|
|
|
|
case AArch64::ANDSXrr:
|
|
|
|
case AArch64::ANDSXrs:
|
|
|
|
case AArch64::BICSWrr:
|
|
|
|
case AArch64::BICSWrs:
|
|
|
|
case AArch64::BICSXrs:
|
|
|
|
case AArch64::BICSXrr:
|
|
|
|
case AArch64::SBCSWr:
|
|
|
|
case AArch64::SBCSXr:
|
|
|
|
case AArch64::SUBSWrr:
|
|
|
|
case AArch64::SUBSWrs:
|
|
|
|
case AArch64::SUBSWrx:
|
|
|
|
case AArch64::SUBSXrr:
|
|
|
|
case AArch64::SUBSXrs:
|
|
|
|
case AArch64::SUBSXrx:
|
|
|
|
case AArch64::SUBSXrx64: {
|
|
|
|
MCPhysReg DstReg = PredI.getOperand(0).getReg();
|
|
|
|
if (DstReg == AArch64::WZR || DstReg == AArch64::XZR)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// The destination register of the NZCV setting instruction must not be
|
|
|
|
// modified before the conditional branch.
|
|
|
|
if (DomBBClobberedRegs[DstReg])
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// We've found the instruction that sets NZCV whose DstReg == 0.
|
|
|
|
FirstUse = PredI;
|
|
|
|
KnownRegs.push_back(RegImm(DstReg, 0));
|
|
|
|
return true;
|
2017-03-03 04:48:11 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Bail if we see an instruction that defines NZCV that we don't handle.
|
|
|
|
if (PredI.definesRegister(AArch64::NZCV))
|
2017-07-24 00:38:08 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Track clobbered registers.
|
|
|
|
trackRegDefs(PredI, DomBBClobberedRegs, TRI);
|
2017-03-03 04:48:11 +08:00
|
|
|
}
|
2017-07-24 00:38:08 +08:00
|
|
|
return false;
|
2017-03-03 04:48:11 +08:00
|
|
|
}
|
|
|
|
|
2017-07-24 00:38:08 +08:00
|
|
|
bool AArch64RedundantCopyElimination::optimizeBlock(MachineBasicBlock *MBB) {
|
[AArch64] Add pass to remove redundant copy after RA
Summary:
This change will add a pass to remove unnecessary zero copies in target blocks
of cbz/cbnz instructions. E.g., the copy instruction in the code below can be
removed because the cbz jumps to BB1 when x0 is zero :
BB0:
cbz x0, .BB1
BB1:
mov x0, xzr
Jun
Reviewers: gberry, jmolloy, HaoLiu, MatzeB, mcrosier
Subscribers: mcrosier, mssimpso, haicheng, bmakam, llvm-commits, aemerson, rengolin
Differential Revision: http://reviews.llvm.org/D16203
llvm-svn: 261004
2016-02-17 04:02:39 +08:00
|
|
|
// Check if the current basic block has a single predecessor.
|
|
|
|
if (MBB->pred_size() != 1)
|
|
|
|
return false;
|
|
|
|
|
2017-01-25 23:56:59 +08:00
|
|
|
// Check if the predecessor has two successors, implying the block ends in a
|
|
|
|
// conditional branch.
|
[AArch64] Add pass to remove redundant copy after RA
Summary:
This change will add a pass to remove unnecessary zero copies in target blocks
of cbz/cbnz instructions. E.g., the copy instruction in the code below can be
removed because the cbz jumps to BB1 when x0 is zero :
BB0:
cbz x0, .BB1
BB1:
mov x0, xzr
Jun
Reviewers: gberry, jmolloy, HaoLiu, MatzeB, mcrosier
Subscribers: mcrosier, mssimpso, haicheng, bmakam, llvm-commits, aemerson, rengolin
Differential Revision: http://reviews.llvm.org/D16203
llvm-svn: 261004
2016-02-17 04:02:39 +08:00
|
|
|
MachineBasicBlock *PredMBB = *MBB->pred_begin();
|
2017-01-25 23:56:59 +08:00
|
|
|
if (PredMBB->succ_size() != 2)
|
|
|
|
return false;
|
|
|
|
|
2017-03-03 04:48:11 +08:00
|
|
|
MachineBasicBlock::iterator CondBr = PredMBB->getLastNonDebugInstr();
|
|
|
|
if (CondBr == PredMBB->end())
|
[AArch64] Add pass to remove redundant copy after RA
Summary:
This change will add a pass to remove unnecessary zero copies in target blocks
of cbz/cbnz instructions. E.g., the copy instruction in the code below can be
removed because the cbz jumps to BB1 when x0 is zero :
BB0:
cbz x0, .BB1
BB1:
mov x0, xzr
Jun
Reviewers: gberry, jmolloy, HaoLiu, MatzeB, mcrosier
Subscribers: mcrosier, mssimpso, haicheng, bmakam, llvm-commits, aemerson, rengolin
Differential Revision: http://reviews.llvm.org/D16203
llvm-svn: 261004
2016-02-17 04:02:39 +08:00
|
|
|
return false;
|
|
|
|
|
[AArch64] Extend AArch64RedundantCopyElimination to do simple copy propagation.
Summary:
Extend AArch64RedundantCopyElimination to catch cases where the register
that is known to be zero is COPY'd in the predecessor block. Before
this change, this pass would catch cases like:
CBZW %W0, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
After this change, cases like the one below are also caught:
%W0 = COPY %W1
CBZW %W1, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
This change results in a 4% increase in static copies removed by this
pass when compiling the llvm test-suite. It also fixes regressions
caused by doing post-RA copy propagation (a separate change to be put up
for review shortly).
Reviewers: junbuml, mcrosier, t.p.northover, qcolombet, MatzeB
Subscribers: aemerson, rengolin, llvm-commits
Differential Revision: https://reviews.llvm.org/D30113
llvm-svn: 295863
2017-02-23 03:10:45 +08:00
|
|
|
// Keep track of the earliest point in the PredMBB block where kill markers
|
|
|
|
// need to be removed if a COPY is removed.
|
|
|
|
MachineBasicBlock::iterator FirstUse;
|
2017-03-03 04:48:11 +08:00
|
|
|
// After calling knownRegValInBlock, FirstUse will either point to a CBZ/CBNZ
|
|
|
|
// or a compare (i.e., SUBS). In the latter case, we must take care when
|
|
|
|
// updating FirstUse when scanning for COPY instructions. In particular, if
|
|
|
|
// there's a COPY in between the compare and branch the COPY should not
|
|
|
|
// update FirstUse.
|
|
|
|
bool SeenFirstUse = false;
|
|
|
|
// Registers that contain a known value at the start of MBB.
|
|
|
|
SmallVector<RegImm, 4> KnownRegs;
|
|
|
|
|
|
|
|
MachineBasicBlock::iterator Itr = std::next(CondBr);
|
2016-02-18 05:16:53 +08:00
|
|
|
do {
|
2017-03-03 04:48:11 +08:00
|
|
|
--Itr;
|
|
|
|
|
2017-07-24 00:38:08 +08:00
|
|
|
if (!knownRegValInBlock(*Itr, MBB, KnownRegs, FirstUse))
|
[AArch64] Extend AArch64RedundantCopyElimination to do simple copy propagation.
Summary:
Extend AArch64RedundantCopyElimination to catch cases where the register
that is known to be zero is COPY'd in the predecessor block. Before
this change, this pass would catch cases like:
CBZW %W0, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
After this change, cases like the one below are also caught:
%W0 = COPY %W1
CBZW %W1, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
This change results in a 4% increase in static copies removed by this
pass when compiling the llvm test-suite. It also fixes regressions
caused by doing post-RA copy propagation (a separate change to be put up
for review shortly).
Reviewers: junbuml, mcrosier, t.p.northover, qcolombet, MatzeB
Subscribers: aemerson, rengolin, llvm-commits
Differential Revision: https://reviews.llvm.org/D30113
llvm-svn: 295863
2017-02-23 03:10:45 +08:00
|
|
|
continue;
|
2016-02-18 05:16:53 +08:00
|
|
|
|
2017-07-24 00:38:08 +08:00
|
|
|
// Reset the clobber list.
|
|
|
|
OptBBClobberedRegs.reset();
|
2017-03-03 04:48:11 +08:00
|
|
|
|
|
|
|
// Look backward in PredMBB for COPYs from the known reg to find other
|
|
|
|
// registers that are known to be a constant value.
|
|
|
|
for (auto PredI = Itr;; --PredI) {
|
|
|
|
if (FirstUse == PredI)
|
|
|
|
SeenFirstUse = true;
|
|
|
|
|
[AArch64] Extend AArch64RedundantCopyElimination to do simple copy propagation.
Summary:
Extend AArch64RedundantCopyElimination to catch cases where the register
that is known to be zero is COPY'd in the predecessor block. Before
this change, this pass would catch cases like:
CBZW %W0, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
After this change, cases like the one below are also caught:
%W0 = COPY %W1
CBZW %W1, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
This change results in a 4% increase in static copies removed by this
pass when compiling the llvm test-suite. It also fixes regressions
caused by doing post-RA copy propagation (a separate change to be put up
for review shortly).
Reviewers: junbuml, mcrosier, t.p.northover, qcolombet, MatzeB
Subscribers: aemerson, rengolin, llvm-commits
Differential Revision: https://reviews.llvm.org/D30113
llvm-svn: 295863
2017-02-23 03:10:45 +08:00
|
|
|
if (PredI->isCopy()) {
|
|
|
|
MCPhysReg CopyDstReg = PredI->getOperand(0).getReg();
|
|
|
|
MCPhysReg CopySrcReg = PredI->getOperand(1).getReg();
|
2017-03-03 04:48:11 +08:00
|
|
|
for (auto &KnownReg : KnownRegs) {
|
2017-07-24 00:38:08 +08:00
|
|
|
if (OptBBClobberedRegs[KnownReg.Reg])
|
[AArch64] Extend AArch64RedundantCopyElimination to do simple copy propagation.
Summary:
Extend AArch64RedundantCopyElimination to catch cases where the register
that is known to be zero is COPY'd in the predecessor block. Before
this change, this pass would catch cases like:
CBZW %W0, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
After this change, cases like the one below are also caught:
%W0 = COPY %W1
CBZW %W1, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
This change results in a 4% increase in static copies removed by this
pass when compiling the llvm test-suite. It also fixes regressions
caused by doing post-RA copy propagation (a separate change to be put up
for review shortly).
Reviewers: junbuml, mcrosier, t.p.northover, qcolombet, MatzeB
Subscribers: aemerson, rengolin, llvm-commits
Differential Revision: https://reviews.llvm.org/D30113
llvm-svn: 295863
2017-02-23 03:10:45 +08:00
|
|
|
continue;
|
|
|
|
// If we have X = COPY Y, and Y is known to be zero, then now X is
|
|
|
|
// known to be zero.
|
2017-07-24 00:38:08 +08:00
|
|
|
if (CopySrcReg == KnownReg.Reg && !OptBBClobberedRegs[CopyDstReg]) {
|
2017-03-03 04:48:11 +08:00
|
|
|
KnownRegs.push_back(RegImm(CopyDstReg, KnownReg.Imm));
|
|
|
|
if (SeenFirstUse)
|
|
|
|
FirstUse = PredI;
|
[AArch64] Extend AArch64RedundantCopyElimination to do simple copy propagation.
Summary:
Extend AArch64RedundantCopyElimination to catch cases where the register
that is known to be zero is COPY'd in the predecessor block. Before
this change, this pass would catch cases like:
CBZW %W0, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
After this change, cases like the one below are also caught:
%W0 = COPY %W1
CBZW %W1, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
This change results in a 4% increase in static copies removed by this
pass when compiling the llvm test-suite. It also fixes regressions
caused by doing post-RA copy propagation (a separate change to be put up
for review shortly).
Reviewers: junbuml, mcrosier, t.p.northover, qcolombet, MatzeB
Subscribers: aemerson, rengolin, llvm-commits
Differential Revision: https://reviews.llvm.org/D30113
llvm-svn: 295863
2017-02-23 03:10:45 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
// If we have X = COPY Y, and X is known to be zero, then now Y is
|
|
|
|
// known to be zero.
|
2017-07-24 00:38:08 +08:00
|
|
|
if (CopyDstReg == KnownReg.Reg && !OptBBClobberedRegs[CopySrcReg]) {
|
2017-03-03 04:48:11 +08:00
|
|
|
KnownRegs.push_back(RegImm(CopySrcReg, KnownReg.Imm));
|
|
|
|
if (SeenFirstUse)
|
|
|
|
FirstUse = PredI;
|
[AArch64] Extend AArch64RedundantCopyElimination to do simple copy propagation.
Summary:
Extend AArch64RedundantCopyElimination to catch cases where the register
that is known to be zero is COPY'd in the predecessor block. Before
this change, this pass would catch cases like:
CBZW %W0, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
After this change, cases like the one below are also caught:
%W0 = COPY %W1
CBZW %W1, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
This change results in a 4% increase in static copies removed by this
pass when compiling the llvm test-suite. It also fixes regressions
caused by doing post-RA copy propagation (a separate change to be put up
for review shortly).
Reviewers: junbuml, mcrosier, t.p.northover, qcolombet, MatzeB
Subscribers: aemerson, rengolin, llvm-commits
Differential Revision: https://reviews.llvm.org/D30113
llvm-svn: 295863
2017-02-23 03:10:45 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
[AArch64] Add pass to remove redundant copy after RA
Summary:
This change will add a pass to remove unnecessary zero copies in target blocks
of cbz/cbnz instructions. E.g., the copy instruction in the code below can be
removed because the cbz jumps to BB1 when x0 is zero :
BB0:
cbz x0, .BB1
BB1:
mov x0, xzr
Jun
Reviewers: gberry, jmolloy, HaoLiu, MatzeB, mcrosier
Subscribers: mcrosier, mssimpso, haicheng, bmakam, llvm-commits, aemerson, rengolin
Differential Revision: http://reviews.llvm.org/D16203
llvm-svn: 261004
2016-02-17 04:02:39 +08:00
|
|
|
|
[AArch64] Extend AArch64RedundantCopyElimination to do simple copy propagation.
Summary:
Extend AArch64RedundantCopyElimination to catch cases where the register
that is known to be zero is COPY'd in the predecessor block. Before
this change, this pass would catch cases like:
CBZW %W0, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
After this change, cases like the one below are also caught:
%W0 = COPY %W1
CBZW %W1, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
This change results in a 4% increase in static copies removed by this
pass when compiling the llvm test-suite. It also fixes regressions
caused by doing post-RA copy propagation (a separate change to be put up
for review shortly).
Reviewers: junbuml, mcrosier, t.p.northover, qcolombet, MatzeB
Subscribers: aemerson, rengolin, llvm-commits
Differential Revision: https://reviews.llvm.org/D30113
llvm-svn: 295863
2017-02-23 03:10:45 +08:00
|
|
|
// Stop if we get to the beginning of PredMBB.
|
|
|
|
if (PredI == PredMBB->begin())
|
|
|
|
break;
|
[AArch64] Add pass to remove redundant copy after RA
Summary:
This change will add a pass to remove unnecessary zero copies in target blocks
of cbz/cbnz instructions. E.g., the copy instruction in the code below can be
removed because the cbz jumps to BB1 when x0 is zero :
BB0:
cbz x0, .BB1
BB1:
mov x0, xzr
Jun
Reviewers: gberry, jmolloy, HaoLiu, MatzeB, mcrosier
Subscribers: mcrosier, mssimpso, haicheng, bmakam, llvm-commits, aemerson, rengolin
Differential Revision: http://reviews.llvm.org/D16203
llvm-svn: 261004
2016-02-17 04:02:39 +08:00
|
|
|
|
2017-07-24 00:38:08 +08:00
|
|
|
trackRegDefs(*PredI, OptBBClobberedRegs, TRI);
|
[AArch64] Extend AArch64RedundantCopyElimination to do simple copy propagation.
Summary:
Extend AArch64RedundantCopyElimination to catch cases where the register
that is known to be zero is COPY'd in the predecessor block. Before
this change, this pass would catch cases like:
CBZW %W0, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
After this change, cases like the one below are also caught:
%W0 = COPY %W1
CBZW %W1, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
This change results in a 4% increase in static copies removed by this
pass when compiling the llvm test-suite. It also fixes regressions
caused by doing post-RA copy propagation (a separate change to be put up
for review shortly).
Reviewers: junbuml, mcrosier, t.p.northover, qcolombet, MatzeB
Subscribers: aemerson, rengolin, llvm-commits
Differential Revision: https://reviews.llvm.org/D30113
llvm-svn: 295863
2017-02-23 03:10:45 +08:00
|
|
|
// Stop if all of the known-zero regs have been clobbered.
|
2017-03-03 04:48:11 +08:00
|
|
|
if (all_of(KnownRegs, [&](RegImm KnownReg) {
|
2017-07-24 00:38:08 +08:00
|
|
|
return OptBBClobberedRegs[KnownReg.Reg];
|
[AArch64] Extend AArch64RedundantCopyElimination to do simple copy propagation.
Summary:
Extend AArch64RedundantCopyElimination to catch cases where the register
that is known to be zero is COPY'd in the predecessor block. Before
this change, this pass would catch cases like:
CBZW %W0, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
After this change, cases like the one below are also caught:
%W0 = COPY %W1
CBZW %W1, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
This change results in a 4% increase in static copies removed by this
pass when compiling the llvm test-suite. It also fixes regressions
caused by doing post-RA copy propagation (a separate change to be put up
for review shortly).
Reviewers: junbuml, mcrosier, t.p.northover, qcolombet, MatzeB
Subscribers: aemerson, rengolin, llvm-commits
Differential Revision: https://reviews.llvm.org/D30113
llvm-svn: 295863
2017-02-23 03:10:45 +08:00
|
|
|
}))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2017-03-03 04:48:11 +08:00
|
|
|
} while (Itr != PredMBB->begin() && Itr->isTerminator());
|
[AArch64] Extend AArch64RedundantCopyElimination to do simple copy propagation.
Summary:
Extend AArch64RedundantCopyElimination to catch cases where the register
that is known to be zero is COPY'd in the predecessor block. Before
this change, this pass would catch cases like:
CBZW %W0, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
After this change, cases like the one below are also caught:
%W0 = COPY %W1
CBZW %W1, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
This change results in a 4% increase in static copies removed by this
pass when compiling the llvm test-suite. It also fixes regressions
caused by doing post-RA copy propagation (a separate change to be put up
for review shortly).
Reviewers: junbuml, mcrosier, t.p.northover, qcolombet, MatzeB
Subscribers: aemerson, rengolin, llvm-commits
Differential Revision: https://reviews.llvm.org/D30113
llvm-svn: 295863
2017-02-23 03:10:45 +08:00
|
|
|
|
2017-03-03 04:48:11 +08:00
|
|
|
// We've not found a registers with a known value, time to bail out.
|
|
|
|
if (KnownRegs.empty())
|
[AArch64] Extend AArch64RedundantCopyElimination to do simple copy propagation.
Summary:
Extend AArch64RedundantCopyElimination to catch cases where the register
that is known to be zero is COPY'd in the predecessor block. Before
this change, this pass would catch cases like:
CBZW %W0, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
After this change, cases like the one below are also caught:
%W0 = COPY %W1
CBZW %W1, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
This change results in a 4% increase in static copies removed by this
pass when compiling the llvm test-suite. It also fixes regressions
caused by doing post-RA copy propagation (a separate change to be put up
for review shortly).
Reviewers: junbuml, mcrosier, t.p.northover, qcolombet, MatzeB
Subscribers: aemerson, rengolin, llvm-commits
Differential Revision: https://reviews.llvm.org/D30113
llvm-svn: 295863
2017-02-23 03:10:45 +08:00
|
|
|
return false;
|
[AArch64] Add pass to remove redundant copy after RA
Summary:
This change will add a pass to remove unnecessary zero copies in target blocks
of cbz/cbnz instructions. E.g., the copy instruction in the code below can be
removed because the cbz jumps to BB1 when x0 is zero :
BB0:
cbz x0, .BB1
BB1:
mov x0, xzr
Jun
Reviewers: gberry, jmolloy, HaoLiu, MatzeB, mcrosier
Subscribers: mcrosier, mssimpso, haicheng, bmakam, llvm-commits, aemerson, rengolin
Differential Revision: http://reviews.llvm.org/D16203
llvm-svn: 261004
2016-02-17 04:02:39 +08:00
|
|
|
|
|
|
|
bool Changed = false;
|
2017-03-03 04:48:11 +08:00
|
|
|
// UsedKnownRegs is the set of KnownRegs that have had uses added to MBB.
|
|
|
|
SmallSetVector<unsigned, 4> UsedKnownRegs;
|
2016-02-18 05:16:53 +08:00
|
|
|
MachineBasicBlock::iterator LastChange = MBB->begin();
|
2017-07-24 00:38:08 +08:00
|
|
|
// Remove redundant copy/move instructions unless KnownReg is modified.
|
[AArch64] Add pass to remove redundant copy after RA
Summary:
This change will add a pass to remove unnecessary zero copies in target blocks
of cbz/cbnz instructions. E.g., the copy instruction in the code below can be
removed because the cbz jumps to BB1 when x0 is zero :
BB0:
cbz x0, .BB1
BB1:
mov x0, xzr
Jun
Reviewers: gberry, jmolloy, HaoLiu, MatzeB, mcrosier
Subscribers: mcrosier, mssimpso, haicheng, bmakam, llvm-commits, aemerson, rengolin
Differential Revision: http://reviews.llvm.org/D16203
llvm-svn: 261004
2016-02-17 04:02:39 +08:00
|
|
|
for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); I != E;) {
|
|
|
|
MachineInstr *MI = &*I;
|
|
|
|
++I;
|
2017-03-03 04:48:11 +08:00
|
|
|
bool RemovedMI = false;
|
|
|
|
bool IsCopy = MI->isCopy();
|
|
|
|
bool IsMoveImm = MI->isMoveImmediate();
|
|
|
|
if (IsCopy || IsMoveImm) {
|
[AArch64] Extend AArch64RedundantCopyElimination to do simple copy propagation.
Summary:
Extend AArch64RedundantCopyElimination to catch cases where the register
that is known to be zero is COPY'd in the predecessor block. Before
this change, this pass would catch cases like:
CBZW %W0, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
After this change, cases like the one below are also caught:
%W0 = COPY %W1
CBZW %W1, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
This change results in a 4% increase in static copies removed by this
pass when compiling the llvm test-suite. It also fixes regressions
caused by doing post-RA copy propagation (a separate change to be put up
for review shortly).
Reviewers: junbuml, mcrosier, t.p.northover, qcolombet, MatzeB
Subscribers: aemerson, rengolin, llvm-commits
Differential Revision: https://reviews.llvm.org/D30113
llvm-svn: 295863
2017-02-23 03:10:45 +08:00
|
|
|
MCPhysReg DefReg = MI->getOperand(0).getReg();
|
2017-03-03 04:48:11 +08:00
|
|
|
MCPhysReg SrcReg = IsCopy ? MI->getOperand(1).getReg() : 0;
|
|
|
|
int64_t SrcImm = IsMoveImm ? MI->getOperand(1).getImm() : 0;
|
|
|
|
if (!MRI->isReserved(DefReg) &&
|
|
|
|
((IsCopy && (SrcReg == AArch64::XZR || SrcReg == AArch64::WZR)) ||
|
|
|
|
IsMoveImm)) {
|
|
|
|
for (RegImm &KnownReg : KnownRegs) {
|
|
|
|
if (KnownReg.Reg != DefReg &&
|
|
|
|
!TRI->isSuperRegister(DefReg, KnownReg.Reg))
|
|
|
|
continue;
|
[AArch64] Add pass to remove redundant copy after RA
Summary:
This change will add a pass to remove unnecessary zero copies in target blocks
of cbz/cbnz instructions. E.g., the copy instruction in the code below can be
removed because the cbz jumps to BB1 when x0 is zero :
BB0:
cbz x0, .BB1
BB1:
mov x0, xzr
Jun
Reviewers: gberry, jmolloy, HaoLiu, MatzeB, mcrosier
Subscribers: mcrosier, mssimpso, haicheng, bmakam, llvm-commits, aemerson, rengolin
Differential Revision: http://reviews.llvm.org/D16203
llvm-svn: 261004
2016-02-17 04:02:39 +08:00
|
|
|
|
2017-03-03 04:48:11 +08:00
|
|
|
// For a copy, the known value must be a zero.
|
|
|
|
if (IsCopy && KnownReg.Imm != 0)
|
|
|
|
continue;
|
[AArch64] Extend AArch64RedundantCopyElimination to do simple copy propagation.
Summary:
Extend AArch64RedundantCopyElimination to catch cases where the register
that is known to be zero is COPY'd in the predecessor block. Before
this change, this pass would catch cases like:
CBZW %W0, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
After this change, cases like the one below are also caught:
%W0 = COPY %W1
CBZW %W1, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
This change results in a 4% increase in static copies removed by this
pass when compiling the llvm test-suite. It also fixes regressions
caused by doing post-RA copy propagation (a separate change to be put up
for review shortly).
Reviewers: junbuml, mcrosier, t.p.northover, qcolombet, MatzeB
Subscribers: aemerson, rengolin, llvm-commits
Differential Revision: https://reviews.llvm.org/D30113
llvm-svn: 295863
2017-02-23 03:10:45 +08:00
|
|
|
|
2017-03-03 04:48:11 +08:00
|
|
|
if (IsMoveImm) {
|
|
|
|
// For a move immediate, the known immediate must match the source
|
|
|
|
// immediate.
|
|
|
|
if (KnownReg.Imm != SrcImm)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Don't remove a move immediate that implicitly defines the upper
|
|
|
|
// bits when only the lower 32 bits are known.
|
|
|
|
MCPhysReg CmpReg = KnownReg.Reg;
|
|
|
|
if (any_of(MI->implicit_operands(), [CmpReg](MachineOperand &O) {
|
|
|
|
return !O.isDead() && O.isReg() && O.isDef() &&
|
|
|
|
O.getReg() != CmpReg;
|
|
|
|
}))
|
|
|
|
continue;
|
[AArch64] Extend AArch64RedundantCopyElimination to do simple copy propagation.
Summary:
Extend AArch64RedundantCopyElimination to catch cases where the register
that is known to be zero is COPY'd in the predecessor block. Before
this change, this pass would catch cases like:
CBZW %W0, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
After this change, cases like the one below are also caught:
%W0 = COPY %W1
CBZW %W1, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
This change results in a 4% increase in static copies removed by this
pass when compiling the llvm test-suite. It also fixes regressions
caused by doing post-RA copy propagation (a separate change to be put up
for review shortly).
Reviewers: junbuml, mcrosier, t.p.northover, qcolombet, MatzeB
Subscribers: aemerson, rengolin, llvm-commits
Differential Revision: https://reviews.llvm.org/D30113
llvm-svn: 295863
2017-02-23 03:10:45 +08:00
|
|
|
}
|
2017-03-03 04:48:11 +08:00
|
|
|
|
|
|
|
if (IsCopy)
|
|
|
|
DEBUG(dbgs() << "Remove redundant Copy : " << *MI);
|
|
|
|
else
|
|
|
|
DEBUG(dbgs() << "Remove redundant Move : " << *MI);
|
|
|
|
|
|
|
|
MI->eraseFromParent();
|
|
|
|
Changed = true;
|
|
|
|
LastChange = I;
|
|
|
|
NumCopiesRemoved++;
|
|
|
|
UsedKnownRegs.insert(KnownReg.Reg);
|
|
|
|
RemovedMI = true;
|
|
|
|
break;
|
[AArch64] Extend AArch64RedundantCopyElimination to do simple copy propagation.
Summary:
Extend AArch64RedundantCopyElimination to catch cases where the register
that is known to be zero is COPY'd in the predecessor block. Before
this change, this pass would catch cases like:
CBZW %W0, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
After this change, cases like the one below are also caught:
%W0 = COPY %W1
CBZW %W1, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
This change results in a 4% increase in static copies removed by this
pass when compiling the llvm test-suite. It also fixes regressions
caused by doing post-RA copy propagation (a separate change to be put up
for review shortly).
Reviewers: junbuml, mcrosier, t.p.northover, qcolombet, MatzeB
Subscribers: aemerson, rengolin, llvm-commits
Differential Revision: https://reviews.llvm.org/D30113
llvm-svn: 295863
2017-02-23 03:10:45 +08:00
|
|
|
}
|
[AArch64] Add pass to remove redundant copy after RA
Summary:
This change will add a pass to remove unnecessary zero copies in target blocks
of cbz/cbnz instructions. E.g., the copy instruction in the code below can be
removed because the cbz jumps to BB1 when x0 is zero :
BB0:
cbz x0, .BB1
BB1:
mov x0, xzr
Jun
Reviewers: gberry, jmolloy, HaoLiu, MatzeB, mcrosier
Subscribers: mcrosier, mssimpso, haicheng, bmakam, llvm-commits, aemerson, rengolin
Differential Revision: http://reviews.llvm.org/D16203
llvm-svn: 261004
2016-02-17 04:02:39 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-03 04:48:11 +08:00
|
|
|
// Skip to the next instruction if we removed the COPY/MovImm.
|
|
|
|
if (RemovedMI)
|
[AArch64] Extend AArch64RedundantCopyElimination to do simple copy propagation.
Summary:
Extend AArch64RedundantCopyElimination to catch cases where the register
that is known to be zero is COPY'd in the predecessor block. Before
this change, this pass would catch cases like:
CBZW %W0, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
After this change, cases like the one below are also caught:
%W0 = COPY %W1
CBZW %W1, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
This change results in a 4% increase in static copies removed by this
pass when compiling the llvm test-suite. It also fixes regressions
caused by doing post-RA copy propagation (a separate change to be put up
for review shortly).
Reviewers: junbuml, mcrosier, t.p.northover, qcolombet, MatzeB
Subscribers: aemerson, rengolin, llvm-commits
Differential Revision: https://reviews.llvm.org/D30113
llvm-svn: 295863
2017-02-23 03:10:45 +08:00
|
|
|
continue;
|
|
|
|
|
2017-03-03 04:48:11 +08:00
|
|
|
// Remove any regs the MI clobbers from the KnownConstRegs set.
|
|
|
|
for (unsigned RI = 0; RI < KnownRegs.size();)
|
|
|
|
if (MI->modifiesRegister(KnownRegs[RI].Reg, TRI)) {
|
|
|
|
std::swap(KnownRegs[RI], KnownRegs[KnownRegs.size() - 1]);
|
|
|
|
KnownRegs.pop_back();
|
[AArch64] Extend AArch64RedundantCopyElimination to do simple copy propagation.
Summary:
Extend AArch64RedundantCopyElimination to catch cases where the register
that is known to be zero is COPY'd in the predecessor block. Before
this change, this pass would catch cases like:
CBZW %W0, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
After this change, cases like the one below are also caught:
%W0 = COPY %W1
CBZW %W1, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
This change results in a 4% increase in static copies removed by this
pass when compiling the llvm test-suite. It also fixes regressions
caused by doing post-RA copy propagation (a separate change to be put up
for review shortly).
Reviewers: junbuml, mcrosier, t.p.northover, qcolombet, MatzeB
Subscribers: aemerson, rengolin, llvm-commits
Differential Revision: https://reviews.llvm.org/D30113
llvm-svn: 295863
2017-02-23 03:10:45 +08:00
|
|
|
// Don't increment RI since we need to now check the swapped-in
|
2017-03-03 04:48:11 +08:00
|
|
|
// KnownRegs[RI].
|
[AArch64] Extend AArch64RedundantCopyElimination to do simple copy propagation.
Summary:
Extend AArch64RedundantCopyElimination to catch cases where the register
that is known to be zero is COPY'd in the predecessor block. Before
this change, this pass would catch cases like:
CBZW %W0, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
After this change, cases like the one below are also caught:
%W0 = COPY %W1
CBZW %W1, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
This change results in a 4% increase in static copies removed by this
pass when compiling the llvm test-suite. It also fixes regressions
caused by doing post-RA copy propagation (a separate change to be put up
for review shortly).
Reviewers: junbuml, mcrosier, t.p.northover, qcolombet, MatzeB
Subscribers: aemerson, rengolin, llvm-commits
Differential Revision: https://reviews.llvm.org/D30113
llvm-svn: 295863
2017-02-23 03:10:45 +08:00
|
|
|
} else {
|
|
|
|
++RI;
|
|
|
|
}
|
|
|
|
|
2017-03-03 04:48:11 +08:00
|
|
|
// Continue until the KnownRegs set is empty.
|
|
|
|
if (KnownRegs.empty())
|
2016-02-18 05:16:53 +08:00
|
|
|
break;
|
|
|
|
}
|
[AArch64] Add pass to remove redundant copy after RA
Summary:
This change will add a pass to remove unnecessary zero copies in target blocks
of cbz/cbnz instructions. E.g., the copy instruction in the code below can be
removed because the cbz jumps to BB1 when x0 is zero :
BB0:
cbz x0, .BB1
BB1:
mov x0, xzr
Jun
Reviewers: gberry, jmolloy, HaoLiu, MatzeB, mcrosier
Subscribers: mcrosier, mssimpso, haicheng, bmakam, llvm-commits, aemerson, rengolin
Differential Revision: http://reviews.llvm.org/D16203
llvm-svn: 261004
2016-02-17 04:02:39 +08:00
|
|
|
|
2016-02-18 05:16:53 +08:00
|
|
|
if (!Changed)
|
|
|
|
return false;
|
[AArch64] Add pass to remove redundant copy after RA
Summary:
This change will add a pass to remove unnecessary zero copies in target blocks
of cbz/cbnz instructions. E.g., the copy instruction in the code below can be
removed because the cbz jumps to BB1 when x0 is zero :
BB0:
cbz x0, .BB1
BB1:
mov x0, xzr
Jun
Reviewers: gberry, jmolloy, HaoLiu, MatzeB, mcrosier
Subscribers: mcrosier, mssimpso, haicheng, bmakam, llvm-commits, aemerson, rengolin
Differential Revision: http://reviews.llvm.org/D16203
llvm-svn: 261004
2016-02-17 04:02:39 +08:00
|
|
|
|
[AArch64] Extend AArch64RedundantCopyElimination to do simple copy propagation.
Summary:
Extend AArch64RedundantCopyElimination to catch cases where the register
that is known to be zero is COPY'd in the predecessor block. Before
this change, this pass would catch cases like:
CBZW %W0, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
After this change, cases like the one below are also caught:
%W0 = COPY %W1
CBZW %W1, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
This change results in a 4% increase in static copies removed by this
pass when compiling the llvm test-suite. It also fixes regressions
caused by doing post-RA copy propagation (a separate change to be put up
for review shortly).
Reviewers: junbuml, mcrosier, t.p.northover, qcolombet, MatzeB
Subscribers: aemerson, rengolin, llvm-commits
Differential Revision: https://reviews.llvm.org/D30113
llvm-svn: 295863
2017-02-23 03:10:45 +08:00
|
|
|
// Add newly used regs to the block's live-in list if they aren't there
|
|
|
|
// already.
|
2017-03-03 04:48:11 +08:00
|
|
|
for (MCPhysReg KnownReg : UsedKnownRegs)
|
|
|
|
if (!MBB->isLiveIn(KnownReg))
|
|
|
|
MBB->addLiveIn(KnownReg);
|
2016-02-18 05:16:53 +08:00
|
|
|
|
[AArch64] Extend AArch64RedundantCopyElimination to do simple copy propagation.
Summary:
Extend AArch64RedundantCopyElimination to catch cases where the register
that is known to be zero is COPY'd in the predecessor block. Before
this change, this pass would catch cases like:
CBZW %W0, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
After this change, cases like the one below are also caught:
%W0 = COPY %W1
CBZW %W1, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
This change results in a 4% increase in static copies removed by this
pass when compiling the llvm test-suite. It also fixes regressions
caused by doing post-RA copy propagation (a separate change to be put up
for review shortly).
Reviewers: junbuml, mcrosier, t.p.northover, qcolombet, MatzeB
Subscribers: aemerson, rengolin, llvm-commits
Differential Revision: https://reviews.llvm.org/D30113
llvm-svn: 295863
2017-02-23 03:10:45 +08:00
|
|
|
// Clear kills in the range where changes were made. This is conservative,
|
|
|
|
// but should be okay since kill markers are being phased out.
|
2017-03-03 04:48:11 +08:00
|
|
|
DEBUG(dbgs() << "Clearing kill flags.\n\tFirstUse: " << *FirstUse
|
|
|
|
<< "\tLastChange: " << *LastChange);
|
[AArch64] Extend AArch64RedundantCopyElimination to do simple copy propagation.
Summary:
Extend AArch64RedundantCopyElimination to catch cases where the register
that is known to be zero is COPY'd in the predecessor block. Before
this change, this pass would catch cases like:
CBZW %W0, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
After this change, cases like the one below are also caught:
%W0 = COPY %W1
CBZW %W1, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
This change results in a 4% increase in static copies removed by this
pass when compiling the llvm test-suite. It also fixes regressions
caused by doing post-RA copy propagation (a separate change to be put up
for review shortly).
Reviewers: junbuml, mcrosier, t.p.northover, qcolombet, MatzeB
Subscribers: aemerson, rengolin, llvm-commits
Differential Revision: https://reviews.llvm.org/D30113
llvm-svn: 295863
2017-02-23 03:10:45 +08:00
|
|
|
for (MachineInstr &MMI : make_range(FirstUse, PredMBB->end()))
|
|
|
|
MMI.clearKillInfo();
|
2016-08-19 01:58:09 +08:00
|
|
|
for (MachineInstr &MMI : make_range(MBB->begin(), LastChange))
|
[AArch64] Extend AArch64RedundantCopyElimination to do simple copy propagation.
Summary:
Extend AArch64RedundantCopyElimination to catch cases where the register
that is known to be zero is COPY'd in the predecessor block. Before
this change, this pass would catch cases like:
CBZW %W0, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
After this change, cases like the one below are also caught:
%W0 = COPY %W1
CBZW %W1, <BB#1>
BB#1:
%W0 = COPY %WZR // removed
This change results in a 4% increase in static copies removed by this
pass when compiling the llvm test-suite. It also fixes regressions
caused by doing post-RA copy propagation (a separate change to be put up
for review shortly).
Reviewers: junbuml, mcrosier, t.p.northover, qcolombet, MatzeB
Subscribers: aemerson, rengolin, llvm-commits
Differential Revision: https://reviews.llvm.org/D30113
llvm-svn: 295863
2017-02-23 03:10:45 +08:00
|
|
|
MMI.clearKillInfo();
|
2016-02-18 07:07:04 +08:00
|
|
|
|
2016-02-18 05:16:53 +08:00
|
|
|
return true;
|
[AArch64] Add pass to remove redundant copy after RA
Summary:
This change will add a pass to remove unnecessary zero copies in target blocks
of cbz/cbnz instructions. E.g., the copy instruction in the code below can be
removed because the cbz jumps to BB1 when x0 is zero :
BB0:
cbz x0, .BB1
BB1:
mov x0, xzr
Jun
Reviewers: gberry, jmolloy, HaoLiu, MatzeB, mcrosier
Subscribers: mcrosier, mssimpso, haicheng, bmakam, llvm-commits, aemerson, rengolin
Differential Revision: http://reviews.llvm.org/D16203
llvm-svn: 261004
2016-02-17 04:02:39 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool AArch64RedundantCopyElimination::runOnMachineFunction(
|
|
|
|
MachineFunction &MF) {
|
2017-12-16 06:22:58 +08:00
|
|
|
if (skipFunction(MF.getFunction()))
|
2016-04-26 05:58:52 +08:00
|
|
|
return false;
|
[AArch64] Add pass to remove redundant copy after RA
Summary:
This change will add a pass to remove unnecessary zero copies in target blocks
of cbz/cbnz instructions. E.g., the copy instruction in the code below can be
removed because the cbz jumps to BB1 when x0 is zero :
BB0:
cbz x0, .BB1
BB1:
mov x0, xzr
Jun
Reviewers: gberry, jmolloy, HaoLiu, MatzeB, mcrosier
Subscribers: mcrosier, mssimpso, haicheng, bmakam, llvm-commits, aemerson, rengolin
Differential Revision: http://reviews.llvm.org/D16203
llvm-svn: 261004
2016-02-17 04:02:39 +08:00
|
|
|
TRI = MF.getSubtarget().getRegisterInfo();
|
|
|
|
MRI = &MF.getRegInfo();
|
2017-03-03 04:48:11 +08:00
|
|
|
|
2017-07-24 00:38:08 +08:00
|
|
|
// Resize the clobber register bitfield trackers. We do this once per
|
|
|
|
// function.
|
|
|
|
DomBBClobberedRegs.resize(TRI->getNumRegs());
|
|
|
|
OptBBClobberedRegs.resize(TRI->getNumRegs());
|
2017-03-03 04:48:11 +08:00
|
|
|
|
[AArch64] Add pass to remove redundant copy after RA
Summary:
This change will add a pass to remove unnecessary zero copies in target blocks
of cbz/cbnz instructions. E.g., the copy instruction in the code below can be
removed because the cbz jumps to BB1 when x0 is zero :
BB0:
cbz x0, .BB1
BB1:
mov x0, xzr
Jun
Reviewers: gberry, jmolloy, HaoLiu, MatzeB, mcrosier
Subscribers: mcrosier, mssimpso, haicheng, bmakam, llvm-commits, aemerson, rengolin
Differential Revision: http://reviews.llvm.org/D16203
llvm-svn: 261004
2016-02-17 04:02:39 +08:00
|
|
|
bool Changed = false;
|
|
|
|
for (MachineBasicBlock &MBB : MF)
|
2017-07-24 00:38:08 +08:00
|
|
|
Changed |= optimizeBlock(&MBB);
|
[AArch64] Add pass to remove redundant copy after RA
Summary:
This change will add a pass to remove unnecessary zero copies in target blocks
of cbz/cbnz instructions. E.g., the copy instruction in the code below can be
removed because the cbz jumps to BB1 when x0 is zero :
BB0:
cbz x0, .BB1
BB1:
mov x0, xzr
Jun
Reviewers: gberry, jmolloy, HaoLiu, MatzeB, mcrosier
Subscribers: mcrosier, mssimpso, haicheng, bmakam, llvm-commits, aemerson, rengolin
Differential Revision: http://reviews.llvm.org/D16203
llvm-svn: 261004
2016-02-17 04:02:39 +08:00
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
|
|
|
FunctionPass *llvm::createAArch64RedundantCopyEliminationPass() {
|
|
|
|
return new AArch64RedundantCopyElimination();
|
|
|
|
}
|