2017-09-21 05:35:51 +08:00
|
|
|
//===- ARMLoadStoreOptimizer.cpp - ARM load / store opt. pass -------------===//
|
2007-01-19 15:51:42 +08:00
|
|
|
//
|
2019-01-19 16:50:56 +08:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2007-01-19 15:51:42 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2015-06-02 05:26:23 +08:00
|
|
|
/// \file This file contains a pass that performs load / store related peephole
|
|
|
|
/// optimizations. This pass should be run after register allocation.
|
2007-01-19 15:51:42 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "ARM.h"
|
2009-08-08 11:20:32 +08:00
|
|
|
#include "ARMBaseInstrInfo.h"
|
2012-03-26 08:45:15 +08:00
|
|
|
#include "ARMBaseRegisterInfo.h"
|
2014-05-16 22:14:30 +08:00
|
|
|
#include "ARMISelLowering.h"
|
2007-03-08 04:30:36 +08:00
|
|
|
#include "ARMMachineFunctionInfo.h"
|
2014-03-23 07:51:00 +08:00
|
|
|
#include "ARMSubtarget.h"
|
2011-07-21 07:34:39 +08:00
|
|
|
#include "MCTargetDesc/ARMAddressingModes.h"
|
2017-09-21 05:35:51 +08:00
|
|
|
#include "MCTargetDesc/ARMBaseInfo.h"
|
|
|
|
#include "Utils/ARMBaseInfo.h"
|
|
|
|
#include "llvm/ADT/ArrayRef.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/ADT/DenseMap.h"
|
2017-09-21 05:35:51 +08:00
|
|
|
#include "llvm/ADT/DenseSet.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
|
|
|
#include "llvm/ADT/SmallSet.h"
|
|
|
|
#include "llvm/ADT/SmallVector.h"
|
|
|
|
#include "llvm/ADT/Statistic.h"
|
2017-09-21 05:35:51 +08:00
|
|
|
#include "llvm/ADT/iterator_range.h"
|
|
|
|
#include "llvm/Analysis/AliasAnalysis.h"
|
2017-06-06 19:49:48 +08:00
|
|
|
#include "llvm/CodeGen/LivePhysRegs.h"
|
2007-01-19 15:51:42 +08:00
|
|
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
2017-09-21 05:35:51 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
2007-01-19 15:51:42 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstr.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
2017-09-21 05:35:51 +08:00
|
|
|
#include "llvm/CodeGen/MachineMemOperand.h"
|
|
|
|
#include "llvm/CodeGen/MachineOperand.h"
|
2009-06-13 17:12:55 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2015-07-11 02:08:49 +08:00
|
|
|
#include "llvm/CodeGen/RegisterClassInfo.h"
|
2017-11-08 09:01:31 +08:00
|
|
|
#include "llvm/CodeGen/TargetFrameLowering.h"
|
|
|
|
#include "llvm/CodeGen/TargetInstrInfo.h"
|
2017-11-17 09:07:10 +08:00
|
|
|
#include "llvm/CodeGen/TargetLowering.h"
|
|
|
|
#include "llvm/CodeGen/TargetRegisterInfo.h"
|
|
|
|
#include "llvm/CodeGen/TargetSubtargetInfo.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
2017-11-08 09:01:31 +08:00
|
|
|
#include "llvm/IR/DebugLoc.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/DerivedTypes.h"
|
|
|
|
#include "llvm/IR/Function.h"
|
2017-09-21 05:35:51 +08:00
|
|
|
#include "llvm/IR/Type.h"
|
|
|
|
#include "llvm/MC/MCInstrDesc.h"
|
|
|
|
#include "llvm/Pass.h"
|
2015-07-11 02:08:49 +08:00
|
|
|
#include "llvm/Support/Allocator.h"
|
2017-09-21 05:35:51 +08:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2015-03-24 03:32:43 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2017-09-21 05:35:51 +08:00
|
|
|
#include <algorithm>
|
|
|
|
#include <cassert>
|
|
|
|
#include <cstddef>
|
|
|
|
#include <cstdlib>
|
|
|
|
#include <iterator>
|
|
|
|
#include <limits>
|
|
|
|
#include <utility>
|
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2014-04-22 10:41:26 +08:00
|
|
|
#define DEBUG_TYPE "arm-ldst-opt"
|
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
STATISTIC(NumLDMGened , "Number of ldm instructions generated");
|
|
|
|
STATISTIC(NumSTMGened , "Number of stm instructions generated");
|
2009-11-09 08:11:35 +08:00
|
|
|
STATISTIC(NumVLDMGened, "Number of vldm instructions generated");
|
|
|
|
STATISTIC(NumVSTMGened, "Number of vstm instructions generated");
|
2009-06-13 17:12:55 +08:00
|
|
|
STATISTIC(NumLdStMoved, "Number of load / store instructions moved");
|
2009-06-18 10:04:01 +08:00
|
|
|
STATISTIC(NumLDRDFormed,"Number of ldrd created before allocation");
|
|
|
|
STATISTIC(NumSTRDFormed,"Number of strd created before allocation");
|
|
|
|
STATISTIC(NumLDRD2LDM, "Number of ldrd instructions turned back into ldm");
|
|
|
|
STATISTIC(NumSTRD2STM, "Number of strd instructions turned back into stm");
|
|
|
|
STATISTIC(NumLDRD2LDR, "Number of ldrd instructions turned back into ldr's");
|
|
|
|
STATISTIC(NumSTRD2STR, "Number of strd instructions turned back into str's");
|
2009-06-13 17:12:55 +08:00
|
|
|
|
2016-03-03 03:20:00 +08:00
|
|
|
/// This switch disables formation of double/multi instructions that could
|
|
|
|
/// potentially lead to (new) alignment traps even with CCR.UNALIGN_TRP
|
|
|
|
/// disabled. This can be used to create libraries that are robust even when
|
|
|
|
/// users provoke undefined behaviour by supplying misaligned pointers.
|
|
|
|
/// \see mayCombineMisaligned()
|
|
|
|
static cl::opt<bool>
|
|
|
|
AssumeMisalignedLoadStores("arm-assume-misaligned-load-store", cl::Hidden,
|
|
|
|
cl::init(false), cl::desc("Be more conservative in ARM load/store opt"));
|
|
|
|
|
2015-07-24 06:12:46 +08:00
|
|
|
#define ARM_LOAD_STORE_OPT_NAME "ARM load / store optimization pass"
|
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
namespace {
|
2017-09-21 05:35:51 +08:00
|
|
|
|
2015-06-02 05:26:23 +08:00
|
|
|
/// Post- register allocation pass the combine load / store instructions to
|
|
|
|
/// form ldm / stm instructions.
|
2009-10-25 14:33:48 +08:00
|
|
|
struct ARMLoadStoreOpt : public MachineFunctionPass {
|
2007-05-03 09:11:54 +08:00
|
|
|
static char ID;
|
2007-05-02 05:15:47 +08:00
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
const MachineFunction *MF;
|
2007-01-19 15:51:42 +08:00
|
|
|
const TargetInstrInfo *TII;
|
2008-02-11 02:45:23 +08:00
|
|
|
const TargetRegisterInfo *TRI;
|
2011-11-09 05:21:09 +08:00
|
|
|
const ARMSubtarget *STI;
|
2014-05-16 22:14:30 +08:00
|
|
|
const TargetLowering *TL;
|
2007-03-08 04:30:36 +08:00
|
|
|
ARMFunctionInfo *AFI;
|
2015-07-11 02:08:49 +08:00
|
|
|
LivePhysRegs LiveRegs;
|
|
|
|
RegisterClassInfo RegClassInfo;
|
|
|
|
MachineBasicBlock::const_iterator LiveRegPos;
|
|
|
|
bool LiveRegsValid;
|
|
|
|
bool RegClassInfoValid;
|
2014-05-16 22:11:38 +08:00
|
|
|
bool isThumb1, isThumb2;
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2017-09-21 05:35:51 +08:00
|
|
|
ARMLoadStoreOpt() : MachineFunctionPass(ID) {}
|
|
|
|
|
2014-03-10 10:09:33 +08:00
|
|
|
bool runOnMachineFunction(MachineFunction &Fn) override;
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2016-04-05 01:09:25 +08:00
|
|
|
MachineFunctionProperties getRequiredProperties() const override {
|
|
|
|
return MachineFunctionProperties().set(
|
2016-08-25 09:27:13 +08:00
|
|
|
MachineFunctionProperties::Property::NoVRegs);
|
2016-04-05 01:09:25 +08:00
|
|
|
}
|
|
|
|
|
2016-10-01 10:56:57 +08:00
|
|
|
StringRef getPassName() const override { return ARM_LOAD_STORE_OPT_NAME; }
|
2007-01-19 15:51:42 +08:00
|
|
|
|
|
|
|
private:
|
2015-07-11 02:08:49 +08:00
|
|
|
/// A set of load/store MachineInstrs with same base register sorted by
|
|
|
|
/// offset.
|
2007-01-19 15:51:42 +08:00
|
|
|
struct MemOpQueueEntry {
|
2015-07-11 02:08:49 +08:00
|
|
|
MachineInstr *MI;
|
|
|
|
int Offset; ///< Load/Store offset.
|
|
|
|
unsigned Position; ///< Position as counted from end of basic block.
|
2017-09-21 05:35:51 +08:00
|
|
|
|
2016-07-09 04:21:17 +08:00
|
|
|
MemOpQueueEntry(MachineInstr &MI, int Offset, unsigned Position)
|
|
|
|
: MI(&MI), Offset(Offset), Position(Position) {}
|
2007-01-19 15:51:42 +08:00
|
|
|
};
|
2017-09-21 05:35:51 +08:00
|
|
|
using MemOpQueue = SmallVector<MemOpQueueEntry, 8>;
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
/// A set of MachineInstrs that fulfill (nearly all) conditions to get
|
|
|
|
/// merged into a LDM/STM.
|
|
|
|
struct MergeCandidate {
|
|
|
|
/// List of instructions ordered by load/store offset.
|
|
|
|
SmallVector<MachineInstr*, 4> Instrs;
|
2017-09-21 05:35:51 +08:00
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
/// Index in Instrs of the instruction being latest in the schedule.
|
|
|
|
unsigned LatestMIIdx;
|
2017-09-21 05:35:51 +08:00
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
/// Index in Instrs of the instruction being earliest in the schedule.
|
|
|
|
unsigned EarliestMIIdx;
|
2017-09-21 05:35:51 +08:00
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
/// Index into the basic block where the merged instruction will be
|
|
|
|
/// inserted. (See MemOpQueueEntry.Position)
|
|
|
|
unsigned InsertPos;
|
2017-09-21 05:35:51 +08:00
|
|
|
|
2015-07-21 08:18:59 +08:00
|
|
|
/// Whether the instructions can be merged into a ldm/stm instruction.
|
|
|
|
bool CanMergeToLSMulti;
|
2017-09-21 05:35:51 +08:00
|
|
|
|
2015-07-21 08:18:59 +08:00
|
|
|
/// Whether the instructions can be merged into a ldrd/strd instruction.
|
|
|
|
bool CanMergeToLSDouble;
|
2015-07-11 02:08:49 +08:00
|
|
|
};
|
2015-07-21 08:18:59 +08:00
|
|
|
SpecificBumpPtrAllocator<MergeCandidate> Allocator;
|
2015-07-11 02:08:49 +08:00
|
|
|
SmallVector<const MergeCandidate*,4> Candidates;
|
2015-07-21 08:19:01 +08:00
|
|
|
SmallVector<MachineInstr*,4> MergeBaseCandidates;
|
2015-07-11 02:08:49 +08:00
|
|
|
|
|
|
|
void moveLiveRegsBefore(const MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::const_iterator Before);
|
|
|
|
unsigned findFreeReg(const TargetRegisterClass &RegClass);
|
2014-09-25 00:35:50 +08:00
|
|
|
void UpdateBaseRegUses(MachineBasicBlock &MBB,
|
2016-06-12 23:39:02 +08:00
|
|
|
MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
|
|
|
|
unsigned Base, unsigned WordOffset,
|
2014-09-25 00:35:50 +08:00
|
|
|
ARMCC::CondCodes Pred, unsigned PredReg);
|
2016-06-12 23:39:02 +08:00
|
|
|
MachineInstr *CreateLoadStoreMulti(
|
|
|
|
MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
|
|
|
|
int Offset, unsigned Base, bool BaseKill, unsigned Opcode,
|
|
|
|
ARMCC::CondCodes Pred, unsigned PredReg, const DebugLoc &DL,
|
|
|
|
ArrayRef<std::pair<unsigned, bool>> Regs);
|
|
|
|
MachineInstr *CreateLoadStoreDouble(
|
|
|
|
MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
|
|
|
|
int Offset, unsigned Base, bool BaseKill, unsigned Opcode,
|
|
|
|
ARMCC::CondCodes Pred, unsigned PredReg, const DebugLoc &DL,
|
|
|
|
ArrayRef<std::pair<unsigned, bool>> Regs) const;
|
2015-07-11 02:08:49 +08:00
|
|
|
void FormCandidates(const MemOpQueue &MemOps);
|
|
|
|
MachineInstr *MergeOpsUpdate(const MergeCandidate &Cand);
|
2009-06-15 16:28:29 +08:00
|
|
|
bool FixInvalidRegPairOp(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator &MBBI);
|
2015-07-11 02:08:49 +08:00
|
|
|
bool MergeBaseUpdateLoadStore(MachineInstr *MI);
|
|
|
|
bool MergeBaseUpdateLSMultiple(MachineInstr *MI);
|
2015-07-21 08:19:01 +08:00
|
|
|
bool MergeBaseUpdateLSDouble(MachineInstr &MI) const;
|
2007-01-19 15:51:42 +08:00
|
|
|
bool LoadStoreMultipleOpti(MachineBasicBlock &MBB);
|
|
|
|
bool MergeReturnIntoLDM(MachineBasicBlock &MBB);
|
2015-12-29 05:40:45 +08:00
|
|
|
bool CombineMovBx(MachineBasicBlock &MBB);
|
2007-01-19 15:51:42 +08:00
|
|
|
};
|
2017-09-21 05:35:51 +08:00
|
|
|
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
|
|
|
char ARMLoadStoreOpt::ID = 0;
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2016-07-16 10:24:10 +08:00
|
|
|
INITIALIZE_PASS(ARMLoadStoreOpt, "arm-ldst-opt", ARM_LOAD_STORE_OPT_NAME, false,
|
|
|
|
false)
|
2015-07-24 06:12:46 +08:00
|
|
|
|
2016-07-09 04:21:17 +08:00
|
|
|
static bool definesCPSR(const MachineInstr &MI) {
|
|
|
|
for (const auto &MO : MI.operands()) {
|
2014-09-25 00:35:50 +08:00
|
|
|
if (!MO.isReg())
|
|
|
|
continue;
|
|
|
|
if (MO.isDef() && MO.getReg() == ARM::CPSR && !MO.isDead())
|
|
|
|
// If the instruction has live CPSR def, then it's not safe to fold it
|
|
|
|
// into load / store.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-07-09 04:21:17 +08:00
|
|
|
static int getMemoryOpOffset(const MachineInstr &MI) {
|
|
|
|
unsigned Opcode = MI.getOpcode();
|
2014-09-25 00:35:50 +08:00
|
|
|
bool isAM3 = Opcode == ARM::LDRD || Opcode == ARM::STRD;
|
2016-07-09 04:21:17 +08:00
|
|
|
unsigned NumOperands = MI.getDesc().getNumOperands();
|
|
|
|
unsigned OffField = MI.getOperand(NumOperands - 3).getImm();
|
2014-09-25 00:35:50 +08:00
|
|
|
|
|
|
|
if (Opcode == ARM::t2LDRi12 || Opcode == ARM::t2LDRi8 ||
|
|
|
|
Opcode == ARM::t2STRi12 || Opcode == ARM::t2STRi8 ||
|
|
|
|
Opcode == ARM::t2LDRDi8 || Opcode == ARM::t2STRDi8 ||
|
|
|
|
Opcode == ARM::LDRi12 || Opcode == ARM::STRi12)
|
|
|
|
return OffField;
|
|
|
|
|
|
|
|
// Thumb1 immediate offsets are scaled by 4
|
2015-02-25 22:41:06 +08:00
|
|
|
if (Opcode == ARM::tLDRi || Opcode == ARM::tSTRi ||
|
|
|
|
Opcode == ARM::tLDRspi || Opcode == ARM::tSTRspi)
|
2014-09-25 00:35:50 +08:00
|
|
|
return OffField * 4;
|
|
|
|
|
|
|
|
int Offset = isAM3 ? ARM_AM::getAM3Offset(OffField)
|
|
|
|
: ARM_AM::getAM5Offset(OffField) * 4;
|
|
|
|
ARM_AM::AddrOpc Op = isAM3 ? ARM_AM::getAM3Op(OffField)
|
|
|
|
: ARM_AM::getAM5Op(OffField);
|
|
|
|
|
|
|
|
if (Op == ARM_AM::sub)
|
|
|
|
return -Offset;
|
|
|
|
|
|
|
|
return Offset;
|
|
|
|
}
|
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
static const MachineOperand &getLoadStoreBaseOp(const MachineInstr &MI) {
|
|
|
|
return MI.getOperand(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const MachineOperand &getLoadStoreRegOp(const MachineInstr &MI) {
|
|
|
|
return MI.getOperand(0);
|
|
|
|
}
|
|
|
|
|
2015-05-19 04:27:55 +08:00
|
|
|
static int getLoadStoreMultipleOpcode(unsigned Opcode, ARM_AM::AMSubMode Mode) {
|
2007-01-19 15:51:42 +08:00
|
|
|
switch (Opcode) {
|
2010-11-16 09:16:36 +08:00
|
|
|
default: llvm_unreachable("Unhandled opcode!");
|
2010-10-27 06:37:02 +08:00
|
|
|
case ARM::LDRi12:
|
2010-06-22 23:08:57 +08:00
|
|
|
++NumLDMGened;
|
2010-11-16 09:16:36 +08:00
|
|
|
switch (Mode) {
|
|
|
|
default: llvm_unreachable("Unhandled submode!");
|
|
|
|
case ARM_AM::ia: return ARM::LDMIA;
|
|
|
|
case ARM_AM::da: return ARM::LDMDA;
|
|
|
|
case ARM_AM::db: return ARM::LDMDB;
|
|
|
|
case ARM_AM::ib: return ARM::LDMIB;
|
|
|
|
}
|
2010-10-28 07:12:14 +08:00
|
|
|
case ARM::STRi12:
|
2010-06-22 23:08:57 +08:00
|
|
|
++NumSTMGened;
|
2010-11-16 09:16:36 +08:00
|
|
|
switch (Mode) {
|
|
|
|
default: llvm_unreachable("Unhandled submode!");
|
|
|
|
case ARM_AM::ia: return ARM::STMIA;
|
|
|
|
case ARM_AM::da: return ARM::STMDA;
|
|
|
|
case ARM_AM::db: return ARM::STMDB;
|
|
|
|
case ARM_AM::ib: return ARM::STMIB;
|
|
|
|
}
|
2014-05-16 22:14:30 +08:00
|
|
|
case ARM::tLDRi:
|
2015-02-25 22:41:06 +08:00
|
|
|
case ARM::tLDRspi:
|
2014-05-16 22:14:30 +08:00
|
|
|
// tLDMIA is writeback-only - unless the base register is in the input
|
|
|
|
// reglist.
|
|
|
|
++NumLDMGened;
|
|
|
|
switch (Mode) {
|
|
|
|
default: llvm_unreachable("Unhandled submode!");
|
|
|
|
case ARM_AM::ia: return ARM::tLDMIA;
|
|
|
|
}
|
|
|
|
case ARM::tSTRi:
|
2015-02-25 22:41:06 +08:00
|
|
|
case ARM::tSTRspi:
|
2014-05-16 22:14:30 +08:00
|
|
|
// There is no non-writeback tSTMIA either.
|
|
|
|
++NumSTMGened;
|
|
|
|
switch (Mode) {
|
|
|
|
default: llvm_unreachable("Unhandled submode!");
|
|
|
|
case ARM_AM::ia: return ARM::tSTMIA_UPD;
|
|
|
|
}
|
2009-07-10 07:11:34 +08:00
|
|
|
case ARM::t2LDRi8:
|
|
|
|
case ARM::t2LDRi12:
|
2010-06-22 23:08:57 +08:00
|
|
|
++NumLDMGened;
|
2010-11-16 09:16:36 +08:00
|
|
|
switch (Mode) {
|
|
|
|
default: llvm_unreachable("Unhandled submode!");
|
|
|
|
case ARM_AM::ia: return ARM::t2LDMIA;
|
|
|
|
case ARM_AM::db: return ARM::t2LDMDB;
|
|
|
|
}
|
2009-07-10 07:11:34 +08:00
|
|
|
case ARM::t2STRi8:
|
|
|
|
case ARM::t2STRi12:
|
2010-06-22 23:08:57 +08:00
|
|
|
++NumSTMGened;
|
2010-11-16 09:16:36 +08:00
|
|
|
switch (Mode) {
|
|
|
|
default: llvm_unreachable("Unhandled submode!");
|
|
|
|
case ARM_AM::ia: return ARM::t2STMIA;
|
|
|
|
case ARM_AM::db: return ARM::t2STMDB;
|
|
|
|
}
|
2009-11-09 08:11:35 +08:00
|
|
|
case ARM::VLDRS:
|
2010-06-22 23:08:57 +08:00
|
|
|
++NumVLDMGened;
|
2010-11-16 09:16:36 +08:00
|
|
|
switch (Mode) {
|
|
|
|
default: llvm_unreachable("Unhandled submode!");
|
|
|
|
case ARM_AM::ia: return ARM::VLDMSIA;
|
2011-03-30 00:45:53 +08:00
|
|
|
case ARM_AM::db: return 0; // Only VLDMSDB_UPD exists.
|
2010-11-16 09:16:36 +08:00
|
|
|
}
|
2009-11-09 08:11:35 +08:00
|
|
|
case ARM::VSTRS:
|
2010-06-22 23:08:57 +08:00
|
|
|
++NumVSTMGened;
|
2010-11-16 09:16:36 +08:00
|
|
|
switch (Mode) {
|
|
|
|
default: llvm_unreachable("Unhandled submode!");
|
|
|
|
case ARM_AM::ia: return ARM::VSTMSIA;
|
2011-03-30 00:45:53 +08:00
|
|
|
case ARM_AM::db: return 0; // Only VSTMSDB_UPD exists.
|
2010-11-16 09:16:36 +08:00
|
|
|
}
|
2009-11-09 08:11:35 +08:00
|
|
|
case ARM::VLDRD:
|
2010-06-22 23:08:57 +08:00
|
|
|
++NumVLDMGened;
|
2010-11-16 09:16:36 +08:00
|
|
|
switch (Mode) {
|
|
|
|
default: llvm_unreachable("Unhandled submode!");
|
|
|
|
case ARM_AM::ia: return ARM::VLDMDIA;
|
2011-03-30 00:45:53 +08:00
|
|
|
case ARM_AM::db: return 0; // Only VLDMDDB_UPD exists.
|
2010-11-16 09:16:36 +08:00
|
|
|
}
|
2009-11-09 08:11:35 +08:00
|
|
|
case ARM::VSTRD:
|
2010-06-22 23:08:57 +08:00
|
|
|
++NumVSTMGened;
|
2010-11-16 09:16:36 +08:00
|
|
|
switch (Mode) {
|
|
|
|
default: llvm_unreachable("Unhandled submode!");
|
|
|
|
case ARM_AM::ia: return ARM::VSTMDIA;
|
2011-03-30 00:45:53 +08:00
|
|
|
case ARM_AM::db: return 0; // Only VSTMDDB_UPD exists.
|
2010-11-16 09:16:36 +08:00
|
|
|
}
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-05 22:32:54 +08:00
|
|
|
static ARM_AM::AMSubMode getLoadStoreMultipleSubMode(unsigned Opcode) {
|
2010-11-16 09:16:36 +08:00
|
|
|
switch (Opcode) {
|
|
|
|
default: llvm_unreachable("Unhandled opcode!");
|
2010-11-19 03:44:29 +08:00
|
|
|
case ARM::LDMIA_RET:
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::LDMIA:
|
2010-11-18 03:16:20 +08:00
|
|
|
case ARM::LDMIA_UPD:
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::STMIA:
|
2010-11-18 03:16:20 +08:00
|
|
|
case ARM::STMIA_UPD:
|
2014-05-16 22:14:30 +08:00
|
|
|
case ARM::tLDMIA:
|
|
|
|
case ARM::tLDMIA_UPD:
|
|
|
|
case ARM::tSTMIA_UPD:
|
2010-11-19 03:44:29 +08:00
|
|
|
case ARM::t2LDMIA_RET:
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::t2LDMIA:
|
2010-11-18 03:16:20 +08:00
|
|
|
case ARM::t2LDMIA_UPD:
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::t2STMIA:
|
2010-11-18 03:16:20 +08:00
|
|
|
case ARM::t2STMIA_UPD:
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::VLDMSIA:
|
2010-11-18 03:16:20 +08:00
|
|
|
case ARM::VLDMSIA_UPD:
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::VSTMSIA:
|
2010-11-18 03:16:20 +08:00
|
|
|
case ARM::VSTMSIA_UPD:
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::VLDMDIA:
|
2010-11-18 03:16:20 +08:00
|
|
|
case ARM::VLDMDIA_UPD:
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::VSTMDIA:
|
2010-11-18 03:16:20 +08:00
|
|
|
case ARM::VSTMDIA_UPD:
|
2010-11-16 09:16:36 +08:00
|
|
|
return ARM_AM::ia;
|
|
|
|
|
|
|
|
case ARM::LDMDA:
|
2010-11-18 03:16:20 +08:00
|
|
|
case ARM::LDMDA_UPD:
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::STMDA:
|
2010-11-18 03:16:20 +08:00
|
|
|
case ARM::STMDA_UPD:
|
2010-11-16 09:16:36 +08:00
|
|
|
return ARM_AM::da;
|
|
|
|
|
|
|
|
case ARM::LDMDB:
|
2010-11-18 03:16:20 +08:00
|
|
|
case ARM::LDMDB_UPD:
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::STMDB:
|
2010-11-18 03:16:20 +08:00
|
|
|
case ARM::STMDB_UPD:
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::t2LDMDB:
|
2010-11-18 03:16:20 +08:00
|
|
|
case ARM::t2LDMDB_UPD:
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::t2STMDB:
|
2010-11-18 03:16:20 +08:00
|
|
|
case ARM::t2STMDB_UPD:
|
|
|
|
case ARM::VLDMSDB_UPD:
|
|
|
|
case ARM::VSTMSDB_UPD:
|
|
|
|
case ARM::VLDMDDB_UPD:
|
|
|
|
case ARM::VSTMDDB_UPD:
|
2010-11-16 09:16:36 +08:00
|
|
|
return ARM_AM::db;
|
|
|
|
|
|
|
|
case ARM::LDMIB:
|
2010-11-18 03:16:20 +08:00
|
|
|
case ARM::LDMIB_UPD:
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::STMIB:
|
2010-11-18 03:16:20 +08:00
|
|
|
case ARM::STMIB_UPD:
|
2010-11-16 09:16:36 +08:00
|
|
|
return ARM_AM::ib;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-16 22:14:30 +08:00
|
|
|
static bool isT1i32Load(unsigned Opc) {
|
2015-02-25 22:41:06 +08:00
|
|
|
return Opc == ARM::tLDRi || Opc == ARM::tLDRspi;
|
2014-05-16 22:14:30 +08:00
|
|
|
}
|
|
|
|
|
2009-08-04 09:43:45 +08:00
|
|
|
static bool isT2i32Load(unsigned Opc) {
|
|
|
|
return Opc == ARM::t2LDRi12 || Opc == ARM::t2LDRi8;
|
|
|
|
}
|
|
|
|
|
2009-07-10 07:11:34 +08:00
|
|
|
static bool isi32Load(unsigned Opc) {
|
2014-05-16 22:14:30 +08:00
|
|
|
return Opc == ARM::LDRi12 || isT1i32Load(Opc) || isT2i32Load(Opc) ;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool isT1i32Store(unsigned Opc) {
|
2015-02-25 22:41:06 +08:00
|
|
|
return Opc == ARM::tSTRi || Opc == ARM::tSTRspi;
|
2009-08-04 09:43:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool isT2i32Store(unsigned Opc) {
|
|
|
|
return Opc == ARM::t2STRi12 || Opc == ARM::t2STRi8;
|
2009-07-10 07:11:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool isi32Store(unsigned Opc) {
|
2014-05-16 22:14:30 +08:00
|
|
|
return Opc == ARM::STRi12 || isT1i32Store(Opc) || isT2i32Store(Opc);
|
|
|
|
}
|
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
static bool isLoadSingle(unsigned Opc) {
|
|
|
|
return isi32Load(Opc) || Opc == ARM::VLDRS || Opc == ARM::VLDRD;
|
|
|
|
}
|
|
|
|
|
2014-09-25 00:35:50 +08:00
|
|
|
static unsigned getImmScale(unsigned Opc) {
|
|
|
|
switch (Opc) {
|
|
|
|
default: llvm_unreachable("Unhandled opcode!");
|
|
|
|
case ARM::tLDRi:
|
|
|
|
case ARM::tSTRi:
|
2015-02-25 22:41:06 +08:00
|
|
|
case ARM::tLDRspi:
|
|
|
|
case ARM::tSTRspi:
|
2014-09-25 00:35:50 +08:00
|
|
|
return 1;
|
|
|
|
case ARM::tLDRHi:
|
|
|
|
case ARM::tSTRHi:
|
|
|
|
return 2;
|
|
|
|
case ARM::tLDRBi:
|
|
|
|
case ARM::tSTRBi:
|
|
|
|
return 4;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
static unsigned getLSMultipleTransferSize(const MachineInstr *MI) {
|
|
|
|
switch (MI->getOpcode()) {
|
|
|
|
default: return 0;
|
|
|
|
case ARM::LDRi12:
|
|
|
|
case ARM::STRi12:
|
|
|
|
case ARM::tLDRi:
|
|
|
|
case ARM::tSTRi:
|
|
|
|
case ARM::tLDRspi:
|
|
|
|
case ARM::tSTRspi:
|
|
|
|
case ARM::t2LDRi8:
|
|
|
|
case ARM::t2LDRi12:
|
|
|
|
case ARM::t2STRi8:
|
|
|
|
case ARM::t2STRi12:
|
|
|
|
case ARM::VLDRS:
|
|
|
|
case ARM::VSTRS:
|
|
|
|
return 4;
|
|
|
|
case ARM::VLDRD:
|
|
|
|
case ARM::VSTRD:
|
|
|
|
return 8;
|
|
|
|
case ARM::LDMIA:
|
|
|
|
case ARM::LDMDA:
|
|
|
|
case ARM::LDMDB:
|
|
|
|
case ARM::LDMIB:
|
|
|
|
case ARM::STMIA:
|
|
|
|
case ARM::STMDA:
|
|
|
|
case ARM::STMDB:
|
|
|
|
case ARM::STMIB:
|
|
|
|
case ARM::tLDMIA:
|
|
|
|
case ARM::tLDMIA_UPD:
|
|
|
|
case ARM::tSTMIA_UPD:
|
|
|
|
case ARM::t2LDMIA:
|
|
|
|
case ARM::t2LDMDB:
|
|
|
|
case ARM::t2STMIA:
|
|
|
|
case ARM::t2STMDB:
|
|
|
|
case ARM::VLDMSIA:
|
|
|
|
case ARM::VSTMSIA:
|
|
|
|
return (MI->getNumOperands() - MI->getDesc().getNumOperands() + 1) * 4;
|
|
|
|
case ARM::VLDMDIA:
|
|
|
|
case ARM::VSTMDIA:
|
|
|
|
return (MI->getNumOperands() - MI->getDesc().getNumOperands() + 1) * 8;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-25 00:35:50 +08:00
|
|
|
/// Update future uses of the base register with the offset introduced
|
|
|
|
/// due to writeback. This function only works on Thumb1.
|
2016-06-12 23:39:02 +08:00
|
|
|
void ARMLoadStoreOpt::UpdateBaseRegUses(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator MBBI,
|
|
|
|
const DebugLoc &DL, unsigned Base,
|
|
|
|
unsigned WordOffset,
|
|
|
|
ARMCC::CondCodes Pred,
|
|
|
|
unsigned PredReg) {
|
2014-09-25 00:35:50 +08:00
|
|
|
assert(isThumb1 && "Can only update base register uses for Thumb1!");
|
|
|
|
// Start updating any instructions with immediate offsets. Insert a SUB before
|
|
|
|
// the first non-updateable instruction (if any).
|
|
|
|
for (; MBBI != MBB.end(); ++MBBI) {
|
|
|
|
bool InsertSub = false;
|
|
|
|
unsigned Opc = MBBI->getOpcode();
|
|
|
|
|
|
|
|
if (MBBI->readsRegister(Base)) {
|
|
|
|
int Offset;
|
|
|
|
bool IsLoad =
|
|
|
|
Opc == ARM::tLDRi || Opc == ARM::tLDRHi || Opc == ARM::tLDRBi;
|
|
|
|
bool IsStore =
|
|
|
|
Opc == ARM::tSTRi || Opc == ARM::tSTRHi || Opc == ARM::tSTRBi;
|
|
|
|
|
|
|
|
if (IsLoad || IsStore) {
|
|
|
|
// Loads and stores with immediate offsets can be updated, but only if
|
|
|
|
// the new offset isn't negative.
|
|
|
|
// The MachineOperand containing the offset immediate is the last one
|
|
|
|
// before predicates.
|
|
|
|
MachineOperand &MO =
|
|
|
|
MBBI->getOperand(MBBI->getDesc().getNumOperands() - 3);
|
|
|
|
// The offsets are scaled by 1, 2 or 4 depending on the Opcode.
|
|
|
|
Offset = MO.getImm() - WordOffset * getImmScale(Opc);
|
|
|
|
|
|
|
|
// If storing the base register, it needs to be reset first.
|
2015-07-11 02:08:49 +08:00
|
|
|
unsigned InstrSrcReg = getLoadStoreRegOp(*MBBI).getReg();
|
2014-09-25 00:35:50 +08:00
|
|
|
|
|
|
|
if (Offset >= 0 && !(IsStore && InstrSrcReg == Base))
|
|
|
|
MO.setImm(Offset);
|
|
|
|
else
|
|
|
|
InsertSub = true;
|
|
|
|
} else if ((Opc == ARM::tSUBi8 || Opc == ARM::tADDi8) &&
|
2016-07-09 04:21:17 +08:00
|
|
|
!definesCPSR(*MBBI)) {
|
2014-09-25 00:35:50 +08:00
|
|
|
// SUBS/ADDS using this register, with a dead def of the CPSR.
|
|
|
|
// Merge it with the update; if the merged offset is too large,
|
|
|
|
// insert a new sub instead.
|
|
|
|
MachineOperand &MO =
|
|
|
|
MBBI->getOperand(MBBI->getDesc().getNumOperands() - 3);
|
|
|
|
Offset = (Opc == ARM::tSUBi8) ?
|
|
|
|
MO.getImm() + WordOffset * 4 :
|
|
|
|
MO.getImm() - WordOffset * 4 ;
|
|
|
|
if (Offset >= 0 && TL->isLegalAddImmediate(Offset)) {
|
|
|
|
// FIXME: Swap ADDS<->SUBS if Offset < 0, erase instruction if
|
|
|
|
// Offset == 0.
|
|
|
|
MO.setImm(Offset);
|
|
|
|
// The base register has now been reset, so exit early.
|
|
|
|
return;
|
|
|
|
} else {
|
|
|
|
InsertSub = true;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Can't update the instruction.
|
|
|
|
InsertSub = true;
|
|
|
|
}
|
2016-07-09 04:21:17 +08:00
|
|
|
} else if (definesCPSR(*MBBI) || MBBI->isCall() || MBBI->isBranch()) {
|
2014-09-25 00:35:50 +08:00
|
|
|
// Since SUBS sets the condition flags, we can't place the base reset
|
|
|
|
// after an instruction that has a live CPSR def.
|
|
|
|
// The base register might also contain an argument for a function call.
|
|
|
|
InsertSub = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (InsertSub) {
|
|
|
|
// An instruction above couldn't be updated, so insert a sub.
|
2017-01-13 18:37:37 +08:00
|
|
|
BuildMI(MBB, MBBI, DL, TII->get(ARM::tSUBi8), Base)
|
|
|
|
.add(t1CondCodeOp(true))
|
|
|
|
.addReg(Base)
|
|
|
|
.addImm(WordOffset * 4)
|
|
|
|
.addImm(Pred)
|
|
|
|
.addReg(PredReg);
|
2014-09-25 00:35:50 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-06-24 00:02:11 +08:00
|
|
|
if (MBBI->killsRegister(Base) || MBBI->definesRegister(Base))
|
2014-09-25 00:35:50 +08:00
|
|
|
// Register got killed. Stop updating.
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// End of block was reached.
|
|
|
|
if (MBB.succ_size() > 0) {
|
|
|
|
// FIXME: Because of a bug, live registers are sometimes missing from
|
|
|
|
// the successor blocks' live-in sets. This means we can't trust that
|
|
|
|
// information and *always* have to reset at the end of a block.
|
|
|
|
// See PR21029.
|
|
|
|
if (MBBI != MBB.end()) --MBBI;
|
2017-01-13 18:37:37 +08:00
|
|
|
BuildMI(MBB, MBBI, DL, TII->get(ARM::tSUBi8), Base)
|
|
|
|
.add(t1CondCodeOp(true))
|
|
|
|
.addReg(Base)
|
|
|
|
.addImm(WordOffset * 4)
|
|
|
|
.addImm(Pred)
|
|
|
|
.addReg(PredReg);
|
2014-09-25 00:35:50 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
/// Return the first register of class \p RegClass that is not in \p Regs.
|
|
|
|
unsigned ARMLoadStoreOpt::findFreeReg(const TargetRegisterClass &RegClass) {
|
|
|
|
if (!RegClassInfoValid) {
|
|
|
|
RegClassInfo.runOnMachineFunction(*MF);
|
|
|
|
RegClassInfoValid = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (unsigned Reg : RegClassInfo.getOrder(&RegClass))
|
|
|
|
if (!LiveRegs.contains(Reg))
|
|
|
|
return Reg;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Compute live registers just before instruction \p Before (in normal schedule
|
|
|
|
/// direction). Computes backwards so multiple queries in the same block must
|
|
|
|
/// come in reverse order.
|
|
|
|
void ARMLoadStoreOpt::moveLiveRegsBefore(const MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::const_iterator Before) {
|
|
|
|
// Initialize if we never queried in this block.
|
|
|
|
if (!LiveRegsValid) {
|
2016-12-08 08:15:51 +08:00
|
|
|
LiveRegs.init(*TRI);
|
2016-05-03 08:24:32 +08:00
|
|
|
LiveRegs.addLiveOuts(MBB);
|
2015-07-11 02:08:49 +08:00
|
|
|
LiveRegPos = MBB.end();
|
|
|
|
LiveRegsValid = true;
|
|
|
|
}
|
|
|
|
// Move backward just before the "Before" position.
|
|
|
|
while (LiveRegPos != Before) {
|
|
|
|
--LiveRegPos;
|
|
|
|
LiveRegs.stepBackward(*LiveRegPos);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool ContainsReg(const ArrayRef<std::pair<unsigned, bool>> &Regs,
|
|
|
|
unsigned Reg) {
|
|
|
|
for (const std::pair<unsigned, bool> &R : Regs)
|
|
|
|
if (R.first == Reg)
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-06-02 05:26:23 +08:00
|
|
|
/// Create and insert a LDM or STM with Base as base register and registers in
|
|
|
|
/// Regs as the register operands that would be loaded / stored. It returns
|
|
|
|
/// true if the transformation is done.
|
2016-06-12 23:39:02 +08:00
|
|
|
MachineInstr *ARMLoadStoreOpt::CreateLoadStoreMulti(
|
|
|
|
MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
|
|
|
|
int Offset, unsigned Base, bool BaseKill, unsigned Opcode,
|
|
|
|
ARMCC::CondCodes Pred, unsigned PredReg, const DebugLoc &DL,
|
|
|
|
ArrayRef<std::pair<unsigned, bool>> Regs) {
|
2007-01-19 15:51:42 +08:00
|
|
|
unsigned NumRegs = Regs.size();
|
2015-07-11 02:08:49 +08:00
|
|
|
assert(NumRegs > 1);
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2014-09-17 00:25:07 +08:00
|
|
|
// For Thumb1 targets, it might be necessary to clobber the CPSR to merge.
|
|
|
|
// Compute liveness information for that register to make the decision.
|
|
|
|
bool SafeToClobberCPSR = !isThumb1 ||
|
2015-07-11 02:08:49 +08:00
|
|
|
(MBB.computeRegisterLiveness(TRI, ARM::CPSR, InsertBefore, 20) ==
|
2014-09-17 00:25:07 +08:00
|
|
|
MachineBasicBlock::LQR_Dead);
|
|
|
|
|
2014-09-25 00:35:50 +08:00
|
|
|
bool Writeback = isThumb1; // Thumb1 LDM/STM have base reg writeback.
|
|
|
|
|
|
|
|
// Exception: If the base register is in the input reglist, Thumb1 LDM is
|
|
|
|
// non-writeback.
|
|
|
|
// It's also not possible to merge an STR of the base register in Thumb1.
|
2017-03-01 07:32:55 +08:00
|
|
|
if (isThumb1 && ContainsReg(Regs, Base)) {
|
2015-07-11 02:08:49 +08:00
|
|
|
assert(Base != ARM::SP && "Thumb1 does not allow SP in register list");
|
2017-03-01 07:32:55 +08:00
|
|
|
if (Opcode == ARM::tLDRi)
|
2015-07-11 02:08:49 +08:00
|
|
|
Writeback = false;
|
2017-03-01 07:32:55 +08:00
|
|
|
else if (Opcode == ARM::tSTRi)
|
2015-07-11 02:08:49 +08:00
|
|
|
return nullptr;
|
|
|
|
}
|
2014-09-25 00:35:50 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
ARM_AM::AMSubMode Mode = ARM_AM::ia;
|
2014-05-16 22:14:30 +08:00
|
|
|
// VFP and Thumb2 do not support IB or DA modes. Thumb1 only supports IA.
|
2010-08-28 07:18:17 +08:00
|
|
|
bool isNotVFP = isi32Load(Opcode) || isi32Store(Opcode);
|
2014-05-16 22:14:30 +08:00
|
|
|
bool haveIBAndDA = isNotVFP && !isThumb2 && !isThumb1;
|
|
|
|
|
2014-05-16 22:08:46 +08:00
|
|
|
if (Offset == 4 && haveIBAndDA) {
|
2007-01-19 15:51:42 +08:00
|
|
|
Mode = ARM_AM::ib;
|
2014-05-16 22:08:46 +08:00
|
|
|
} else if (Offset == -4 * (int)NumRegs + 4 && haveIBAndDA) {
|
2007-01-19 15:51:42 +08:00
|
|
|
Mode = ARM_AM::da;
|
2014-05-16 22:14:30 +08:00
|
|
|
} else if (Offset == -4 * (int)NumRegs && isNotVFP && !isThumb1) {
|
2010-08-28 07:57:52 +08:00
|
|
|
// VLDM/VSTM do not support DB mode without also updating the base reg.
|
2007-01-19 15:51:42 +08:00
|
|
|
Mode = ARM_AM::db;
|
2015-02-25 22:41:06 +08:00
|
|
|
} else if (Offset != 0 || Opcode == ARM::tLDRspi || Opcode == ARM::tSTRspi) {
|
2014-05-16 22:08:46 +08:00
|
|
|
// Check if this is a supported opcode before inserting instructions to
|
2011-03-30 04:27:38 +08:00
|
|
|
// calculate a new base register.
|
2015-07-11 02:08:49 +08:00
|
|
|
if (!getLoadStoreMultipleOpcode(Opcode, Mode)) return nullptr;
|
2011-03-30 04:27:38 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
// If starting offset isn't zero, insert a MI to materialize a new base.
|
|
|
|
// But only do so if it is cost effective, i.e. merging more than two
|
|
|
|
// loads / stores.
|
|
|
|
if (NumRegs <= 2)
|
2015-07-11 02:08:49 +08:00
|
|
|
return nullptr;
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2014-09-17 00:25:07 +08:00
|
|
|
// On Thumb1, it's not worth materializing a new base register without
|
|
|
|
// clobbering the CPSR (i.e. not using ADDS/SUBS).
|
|
|
|
if (!SafeToClobberCPSR)
|
2015-07-11 02:08:49 +08:00
|
|
|
return nullptr;
|
2014-09-17 00:25:07 +08:00
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
unsigned NewBase;
|
2014-05-16 22:08:46 +08:00
|
|
|
if (isi32Load(Opcode)) {
|
2015-10-01 19:56:19 +08:00
|
|
|
// If it is a load, then just use one of the destination registers
|
|
|
|
// as the new base. Will no longer be writeback in Thumb1.
|
2007-03-07 05:59:20 +08:00
|
|
|
NewBase = Regs[NumRegs-1].first;
|
2015-10-01 19:56:19 +08:00
|
|
|
Writeback = false;
|
2014-05-16 22:08:46 +08:00
|
|
|
} else {
|
2015-07-11 02:08:49 +08:00
|
|
|
// Find a free register that we can use as scratch register.
|
|
|
|
moveLiveRegsBefore(MBB, InsertBefore);
|
|
|
|
// The merged instruction does not exist yet but will use several Regs if
|
|
|
|
// it is a Store.
|
|
|
|
if (!isLoadSingle(Opcode))
|
|
|
|
for (const std::pair<unsigned, bool> &R : Regs)
|
|
|
|
LiveRegs.addReg(R.first);
|
|
|
|
|
|
|
|
NewBase = findFreeReg(isThumb1 ? ARM::tGPRRegClass : ARM::GPRRegClass);
|
2007-03-07 05:59:20 +08:00
|
|
|
if (NewBase == 0)
|
2015-07-11 02:08:49 +08:00
|
|
|
return nullptr;
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
2014-05-16 22:14:30 +08:00
|
|
|
|
|
|
|
int BaseOpc =
|
|
|
|
isThumb2 ? ARM::t2ADDri :
|
2015-02-25 22:41:06 +08:00
|
|
|
(isThumb1 && Base == ARM::SP) ? ARM::tADDrSPi :
|
2014-08-22 01:11:03 +08:00
|
|
|
(isThumb1 && Offset < 8) ? ARM::tADDi3 :
|
2014-05-16 22:14:30 +08:00
|
|
|
isThumb1 ? ARM::tADDi8 : ARM::ADDri;
|
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
if (Offset < 0) {
|
2014-08-22 01:11:03 +08:00
|
|
|
Offset = - Offset;
|
2014-05-16 22:14:30 +08:00
|
|
|
BaseOpc =
|
|
|
|
isThumb2 ? ARM::t2SUBri :
|
2015-02-25 22:41:06 +08:00
|
|
|
(isThumb1 && Offset < 8 && Base != ARM::SP) ? ARM::tSUBi3 :
|
2014-05-16 22:14:30 +08:00
|
|
|
isThumb1 ? ARM::tSUBi8 : ARM::SUBri;
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
2014-05-16 22:14:30 +08:00
|
|
|
|
|
|
|
if (!TL->isLegalAddImmediate(Offset))
|
|
|
|
// FIXME: Try add with register operand?
|
2015-07-11 02:08:49 +08:00
|
|
|
return nullptr; // Probably not worth it then.
|
|
|
|
|
|
|
|
// We can only append a kill flag to the add/sub input if the value is not
|
|
|
|
// used in the register list of the stm as well.
|
|
|
|
bool KillOldBase = BaseKill &&
|
|
|
|
(!isi32Store(Opcode) || !ContainsReg(Regs, Base));
|
2014-05-16 22:14:30 +08:00
|
|
|
|
|
|
|
if (isThumb1) {
|
2014-08-22 01:11:03 +08:00
|
|
|
// Thumb1: depending on immediate size, use either
|
2014-09-17 00:25:07 +08:00
|
|
|
// ADDS NewBase, Base, #imm3
|
2014-08-22 01:11:03 +08:00
|
|
|
// or
|
2014-09-17 00:25:07 +08:00
|
|
|
// MOV NewBase, Base
|
|
|
|
// ADDS NewBase, #imm8.
|
2015-02-25 22:41:06 +08:00
|
|
|
if (Base != NewBase &&
|
|
|
|
(BaseOpc == ARM::tADDi8 || BaseOpc == ARM::tSUBi8)) {
|
2014-05-16 22:14:30 +08:00
|
|
|
// Need to insert a MOV to the new base first.
|
2015-01-22 06:39:43 +08:00
|
|
|
if (isARMLowRegister(NewBase) && isARMLowRegister(Base) &&
|
2015-01-29 08:19:33 +08:00
|
|
|
!STI->hasV6Ops()) {
|
2015-01-22 06:39:43 +08:00
|
|
|
// thumbv4t doesn't have lo->lo copies, and we can't predicate tMOVSr
|
|
|
|
if (Pred != ARMCC::AL)
|
2015-07-11 02:08:49 +08:00
|
|
|
return nullptr;
|
|
|
|
BuildMI(MBB, InsertBefore, DL, TII->get(ARM::tMOVSr), NewBase)
|
|
|
|
.addReg(Base, getKillRegState(KillOldBase));
|
2015-01-22 06:39:43 +08:00
|
|
|
} else
|
2015-07-11 02:08:49 +08:00
|
|
|
BuildMI(MBB, InsertBefore, DL, TII->get(ARM::tMOVr), NewBase)
|
2017-01-20 16:15:24 +08:00
|
|
|
.addReg(Base, getKillRegState(KillOldBase))
|
|
|
|
.add(predOps(Pred, PredReg));
|
2015-01-22 06:39:43 +08:00
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
// The following ADDS/SUBS becomes an update.
|
2014-08-22 01:11:03 +08:00
|
|
|
Base = NewBase;
|
2015-07-11 02:08:49 +08:00
|
|
|
KillOldBase = true;
|
2014-05-16 22:14:30 +08:00
|
|
|
}
|
2015-02-25 22:41:06 +08:00
|
|
|
if (BaseOpc == ARM::tADDrSPi) {
|
|
|
|
assert(Offset % 4 == 0 && "tADDrSPi offset is scaled by 4");
|
2015-07-11 02:08:49 +08:00
|
|
|
BuildMI(MBB, InsertBefore, DL, TII->get(BaseOpc), NewBase)
|
2017-01-20 16:15:24 +08:00
|
|
|
.addReg(Base, getKillRegState(KillOldBase))
|
|
|
|
.addImm(Offset / 4)
|
|
|
|
.add(predOps(Pred, PredReg));
|
2015-02-25 22:41:06 +08:00
|
|
|
} else
|
2017-01-13 18:37:37 +08:00
|
|
|
BuildMI(MBB, InsertBefore, DL, TII->get(BaseOpc), NewBase)
|
|
|
|
.add(t1CondCodeOp(true))
|
|
|
|
.addReg(Base, getKillRegState(KillOldBase))
|
|
|
|
.addImm(Offset)
|
2017-01-20 16:15:24 +08:00
|
|
|
.add(predOps(Pred, PredReg));
|
2014-05-16 22:14:30 +08:00
|
|
|
} else {
|
2015-07-11 02:08:49 +08:00
|
|
|
BuildMI(MBB, InsertBefore, DL, TII->get(BaseOpc), NewBase)
|
2017-01-20 16:15:24 +08:00
|
|
|
.addReg(Base, getKillRegState(KillOldBase))
|
|
|
|
.addImm(Offset)
|
|
|
|
.add(predOps(Pred, PredReg))
|
|
|
|
.add(condCodeOp());
|
2014-05-16 22:14:30 +08:00
|
|
|
}
|
2007-01-19 15:51:42 +08:00
|
|
|
Base = NewBase;
|
2014-05-16 22:08:46 +08:00
|
|
|
BaseKill = true; // New base is always killed straight away.
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
bool isDef = isLoadSingle(Opcode);
|
2014-05-16 22:14:30 +08:00
|
|
|
|
|
|
|
// Get LS multiple opcode. Note that for Thumb1 this might be an opcode with
|
|
|
|
// base register writeback.
|
2010-11-16 09:16:36 +08:00
|
|
|
Opcode = getLoadStoreMultipleOpcode(Opcode, Mode);
|
2015-07-11 02:08:49 +08:00
|
|
|
if (!Opcode)
|
|
|
|
return nullptr;
|
2014-05-16 22:14:30 +08:00
|
|
|
|
2014-09-25 00:35:50 +08:00
|
|
|
// Check if a Thumb1 LDM/STM merge is safe. This is the case if:
|
|
|
|
// - There is no writeback (LDM of base register),
|
|
|
|
// - the base register is killed by the merged instruction,
|
|
|
|
// - or it's safe to overwrite the condition flags, i.e. to insert a SUBS
|
|
|
|
// to reset the base register.
|
|
|
|
// Otherwise, don't merge.
|
|
|
|
// It's safe to return here since the code to materialize a new base register
|
|
|
|
// above is also conditional on SafeToClobberCPSR.
|
|
|
|
if (isThumb1 && !SafeToClobberCPSR && Writeback && !BaseKill)
|
2015-07-11 02:08:49 +08:00
|
|
|
return nullptr;
|
2014-08-16 01:00:30 +08:00
|
|
|
|
2014-05-16 22:14:30 +08:00
|
|
|
MachineInstrBuilder MIB;
|
|
|
|
|
|
|
|
if (Writeback) {
|
2015-10-01 19:56:19 +08:00
|
|
|
assert(isThumb1 && "expected Writeback only inThumb1");
|
|
|
|
if (Opcode == ARM::tLDMIA) {
|
|
|
|
assert(!(ContainsReg(Regs, Base)) && "Thumb1 can't LDM ! with Base in Regs");
|
2014-05-16 22:14:30 +08:00
|
|
|
// Update tLDMIA with writeback if necessary.
|
|
|
|
Opcode = ARM::tLDMIA_UPD;
|
2015-10-01 19:56:19 +08:00
|
|
|
}
|
2014-05-16 22:14:30 +08:00
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
MIB = BuildMI(MBB, InsertBefore, DL, TII->get(Opcode));
|
2014-05-16 22:14:30 +08:00
|
|
|
|
|
|
|
// Thumb1: we might need to set base writeback when building the MI.
|
|
|
|
MIB.addReg(Base, getDefRegState(true))
|
|
|
|
.addReg(Base, getKillRegState(BaseKill));
|
2014-09-25 00:35:50 +08:00
|
|
|
|
|
|
|
// The base isn't dead after a merged instruction with writeback.
|
|
|
|
// Insert a sub instruction after the newly formed instruction to reset.
|
|
|
|
if (!BaseKill)
|
2015-07-11 02:08:49 +08:00
|
|
|
UpdateBaseRegUses(MBB, InsertBefore, DL, Base, NumRegs, Pred, PredReg);
|
2014-05-16 22:14:30 +08:00
|
|
|
} else {
|
|
|
|
// No writeback, simply build the MachineInstr.
|
2015-07-11 02:08:49 +08:00
|
|
|
MIB = BuildMI(MBB, InsertBefore, DL, TII->get(Opcode));
|
2014-05-16 22:14:30 +08:00
|
|
|
MIB.addReg(Base, getKillRegState(BaseKill));
|
|
|
|
}
|
|
|
|
|
|
|
|
MIB.addImm(Pred).addReg(PredReg);
|
|
|
|
|
2015-05-27 13:12:40 +08:00
|
|
|
for (const std::pair<unsigned, bool> &R : Regs)
|
2015-07-11 02:08:49 +08:00
|
|
|
MIB.addReg(R.first, getDefRegState(isDef) | getKillRegState(R.second));
|
ARM: correct liveness flags during ARMLoadStoreOpt
When we had a sequence like:
s1 = VLDRS [r0, 1], Q0<imp-def>
s3 = VLDRS [r0, 2], Q0<imp-use,kill>, Q0<imp-def>
s0 = VLDRS [r0, 0], Q0<imp-use,kill>, Q0<imp-def>
s2 = VLDRS [r0, 4], Q0<imp-use,kill>, Q0<imp-def>
we were gathering the {s0, s1} loads below the s3 load. This is fine,
but confused the verifier since now the s3 load had Q0<imp-use> with
no definition above it.
This should mark such uses <undef> as well. The liveness structure at
the beginning and end of the block is unaffected, and the true sN
definitions should prevent any dodgy reorderings being introduced
elsewhere.
rdar://problem/15124449
llvm-svn: 192344
2013-10-10 17:28:20 +08:00
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
return MIB.getInstr();
|
ARM: correct liveness flags during ARMLoadStoreOpt
When we had a sequence like:
s1 = VLDRS [r0, 1], Q0<imp-def>
s3 = VLDRS [r0, 2], Q0<imp-use,kill>, Q0<imp-def>
s0 = VLDRS [r0, 0], Q0<imp-use,kill>, Q0<imp-def>
s2 = VLDRS [r0, 4], Q0<imp-use,kill>, Q0<imp-def>
we were gathering the {s0, s1} loads below the s3 load. This is fine,
but confused the verifier since now the s3 load had Q0<imp-use> with
no definition above it.
This should mark such uses <undef> as well. The liveness structure at
the beginning and end of the block is unaffected, and the true sN
definitions should prevent any dodgy reorderings being introduced
elsewhere.
rdar://problem/15124449
llvm-svn: 192344
2013-10-10 17:28:20 +08:00
|
|
|
}
|
|
|
|
|
2016-06-12 23:39:02 +08:00
|
|
|
MachineInstr *ARMLoadStoreOpt::CreateLoadStoreDouble(
|
|
|
|
MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
|
|
|
|
int Offset, unsigned Base, bool BaseKill, unsigned Opcode,
|
|
|
|
ARMCC::CondCodes Pred, unsigned PredReg, const DebugLoc &DL,
|
|
|
|
ArrayRef<std::pair<unsigned, bool>> Regs) const {
|
2015-07-21 08:18:59 +08:00
|
|
|
bool IsLoad = isi32Load(Opcode);
|
|
|
|
assert((IsLoad || isi32Store(Opcode)) && "Must have integer load or store");
|
|
|
|
unsigned LoadStoreOpcode = IsLoad ? ARM::t2LDRDi8 : ARM::t2STRDi8;
|
|
|
|
|
|
|
|
assert(Regs.size() == 2);
|
|
|
|
MachineInstrBuilder MIB = BuildMI(MBB, InsertBefore, DL,
|
|
|
|
TII->get(LoadStoreOpcode));
|
|
|
|
if (IsLoad) {
|
|
|
|
MIB.addReg(Regs[0].first, RegState::Define)
|
|
|
|
.addReg(Regs[1].first, RegState::Define);
|
|
|
|
} else {
|
|
|
|
MIB.addReg(Regs[0].first, getKillRegState(Regs[0].second))
|
|
|
|
.addReg(Regs[1].first, getKillRegState(Regs[1].second));
|
|
|
|
}
|
|
|
|
MIB.addReg(Base).addImm(Offset).addImm(Pred).addReg(PredReg);
|
|
|
|
return MIB.getInstr();
|
|
|
|
}
|
|
|
|
|
2015-06-02 05:26:23 +08:00
|
|
|
/// Call MergeOps and update MemOps and merges accordingly on success.
|
2015-07-11 02:08:49 +08:00
|
|
|
MachineInstr *ARMLoadStoreOpt::MergeOpsUpdate(const MergeCandidate &Cand) {
|
|
|
|
const MachineInstr *First = Cand.Instrs.front();
|
|
|
|
unsigned Opcode = First->getOpcode();
|
|
|
|
bool IsLoad = isLoadSingle(Opcode);
|
|
|
|
SmallVector<std::pair<unsigned, bool>, 8> Regs;
|
|
|
|
SmallVector<unsigned, 4> ImpDefs;
|
|
|
|
DenseSet<unsigned> KilledRegs;
|
Clear kill flags in ARMLoadStoreOptimizer.
The pass here was clearing kill flags on instructions which had
their sources killed in the instruction being combined. But
given that the new instruction is inserted after the existing ones,
any existing instructions with kill flags will lead to the verifier
complaining that we are reading an undefined physreg.
For example, what we had prior to this optimization is
t2STRi12 %R1, %SP, 12
t2STRi12 %R1<kill>, %SP, 16
t2STRi12 %R0<kill>, %SP, 8
and prior to this fix that would generate
t2STRi12 %R1<kill>, %SP, 16
t2STRDi8 %R0<kill>, %R1, %SP, 8
This is clearly incorrect as it didn't clear the kill flag on R1
used with offset 16 because there was no kill flag on the instruction
with offset 12.
After this change we clear the kill flag on the offset 16 instruction
because we know it will be used afterwards in the new instruction.
I haven't provided a test case. I have a small test, but even it is
very sensitive to register allocation order which isn't ideal.
llvm-svn: 242359
2015-07-16 08:09:18 +08:00
|
|
|
DenseSet<unsigned> UsedRegs;
|
2015-07-11 02:08:49 +08:00
|
|
|
// Determine list of registers and list of implicit super-register defs.
|
|
|
|
for (const MachineInstr *MI : Cand.Instrs) {
|
|
|
|
const MachineOperand &MO = getLoadStoreRegOp(*MI);
|
|
|
|
unsigned Reg = MO.getReg();
|
|
|
|
bool IsKill = MO.isKill();
|
|
|
|
if (IsKill)
|
2010-06-22 05:21:14 +08:00
|
|
|
KilledRegs.insert(Reg);
|
2015-07-11 02:08:49 +08:00
|
|
|
Regs.push_back(std::make_pair(Reg, IsKill));
|
Clear kill flags in ARMLoadStoreOptimizer.
The pass here was clearing kill flags on instructions which had
their sources killed in the instruction being combined. But
given that the new instruction is inserted after the existing ones,
any existing instructions with kill flags will lead to the verifier
complaining that we are reading an undefined physreg.
For example, what we had prior to this optimization is
t2STRi12 %R1, %SP, 12
t2STRi12 %R1<kill>, %SP, 16
t2STRi12 %R0<kill>, %SP, 8
and prior to this fix that would generate
t2STRi12 %R1<kill>, %SP, 16
t2STRDi8 %R0<kill>, %R1, %SP, 8
This is clearly incorrect as it didn't clear the kill flag on R1
used with offset 16 because there was no kill flag on the instruction
with offset 12.
After this change we clear the kill flag on the offset 16 instruction
because we know it will be used afterwards in the new instruction.
I haven't provided a test case. I have a small test, but even it is
very sensitive to register allocation order which isn't ideal.
llvm-svn: 242359
2015-07-16 08:09:18 +08:00
|
|
|
UsedRegs.insert(Reg);
|
2015-07-11 02:08:49 +08:00
|
|
|
|
|
|
|
if (IsLoad) {
|
|
|
|
// Collect any implicit defs of super-registers, after merging we can't
|
|
|
|
// be sure anymore that we properly preserved these live ranges and must
|
|
|
|
// removed these implicit operands.
|
|
|
|
for (const MachineOperand &MO : MI->implicit_operands()) {
|
|
|
|
if (!MO.isReg() || !MO.isDef() || MO.isDead())
|
|
|
|
continue;
|
|
|
|
assert(MO.isImplicit());
|
|
|
|
unsigned DefReg = MO.getReg();
|
|
|
|
|
2016-08-12 06:21:41 +08:00
|
|
|
if (is_contained(ImpDefs, DefReg))
|
2015-07-11 02:08:49 +08:00
|
|
|
continue;
|
|
|
|
// We can ignore cases where the super-reg is read and written.
|
|
|
|
if (MI->readsRegister(DefReg))
|
|
|
|
continue;
|
|
|
|
ImpDefs.push_back(DefReg);
|
|
|
|
}
|
2010-06-22 05:21:14 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
// Attempt the merge.
|
2017-09-21 05:35:51 +08:00
|
|
|
using iterator = MachineBasicBlock::iterator;
|
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
MachineInstr *LatestMI = Cand.Instrs[Cand.LatestMIIdx];
|
|
|
|
iterator InsertBefore = std::next(iterator(LatestMI));
|
|
|
|
MachineBasicBlock &MBB = *LatestMI->getParent();
|
2016-07-09 04:21:17 +08:00
|
|
|
unsigned Offset = getMemoryOpOffset(*First);
|
2015-07-11 02:08:49 +08:00
|
|
|
unsigned Base = getLoadStoreBaseOp(*First).getReg();
|
|
|
|
bool BaseKill = LatestMI->killsRegister(Base);
|
|
|
|
unsigned PredReg = 0;
|
2016-02-23 10:46:52 +08:00
|
|
|
ARMCC::CondCodes Pred = getInstrPredicate(*First, PredReg);
|
2015-07-11 02:08:49 +08:00
|
|
|
DebugLoc DL = First->getDebugLoc();
|
2015-07-21 08:18:59 +08:00
|
|
|
MachineInstr *Merged = nullptr;
|
|
|
|
if (Cand.CanMergeToLSDouble)
|
|
|
|
Merged = CreateLoadStoreDouble(MBB, InsertBefore, Offset, Base, BaseKill,
|
|
|
|
Opcode, Pred, PredReg, DL, Regs);
|
|
|
|
if (!Merged && Cand.CanMergeToLSMulti)
|
|
|
|
Merged = CreateLoadStoreMulti(MBB, InsertBefore, Offset, Base, BaseKill,
|
2015-07-11 02:08:49 +08:00
|
|
|
Opcode, Pred, PredReg, DL, Regs);
|
|
|
|
if (!Merged)
|
|
|
|
return nullptr;
|
|
|
|
|
|
|
|
// Determine earliest instruction that will get removed. We then keep an
|
|
|
|
// iterator just above it so the following erases don't invalidated it.
|
|
|
|
iterator EarliestI(Cand.Instrs[Cand.EarliestMIIdx]);
|
|
|
|
bool EarliestAtBegin = false;
|
|
|
|
if (EarliestI == MBB.begin()) {
|
|
|
|
EarliestAtBegin = true;
|
|
|
|
} else {
|
|
|
|
EarliestI = std::prev(EarliestI);
|
2015-06-30 05:42:16 +08:00
|
|
|
}
|
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
// Remove instructions which have been merged.
|
|
|
|
for (MachineInstr *MI : Cand.Instrs)
|
|
|
|
MBB.erase(MI);
|
|
|
|
|
|
|
|
// Determine range between the earliest removed instruction and the new one.
|
|
|
|
if (EarliestAtBegin)
|
|
|
|
EarliestI = MBB.begin();
|
|
|
|
else
|
|
|
|
EarliestI = std::next(EarliestI);
|
|
|
|
auto FixupRange = make_range(EarliestI, iterator(Merged));
|
|
|
|
|
|
|
|
if (isLoadSingle(Opcode)) {
|
|
|
|
// If the previous loads defined a super-reg, then we have to mark earlier
|
|
|
|
// operands undef; Replicate the super-reg def on the merged instruction.
|
|
|
|
for (MachineInstr &MI : FixupRange) {
|
|
|
|
for (unsigned &ImpDefReg : ImpDefs) {
|
|
|
|
for (MachineOperand &MO : MI.implicit_operands()) {
|
|
|
|
if (!MO.isReg() || MO.getReg() != ImpDefReg)
|
|
|
|
continue;
|
|
|
|
if (MO.readsReg())
|
|
|
|
MO.setIsUndef();
|
|
|
|
else if (MO.isDef())
|
|
|
|
ImpDefReg = 0;
|
|
|
|
}
|
|
|
|
}
|
2012-03-29 06:50:56 +08:00
|
|
|
}
|
2009-12-24 05:28:31 +08:00
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
MachineInstrBuilder MIB(*Merged->getParent()->getParent(), Merged);
|
|
|
|
for (unsigned ImpDef : ImpDefs)
|
|
|
|
MIB.addReg(ImpDef, RegState::ImplicitDefine);
|
|
|
|
} else {
|
|
|
|
// Remove kill flags: We are possibly storing the values later now.
|
|
|
|
assert(isi32Store(Opcode) || Opcode == ARM::VSTRS || Opcode == ARM::VSTRD);
|
|
|
|
for (MachineInstr &MI : FixupRange) {
|
|
|
|
for (MachineOperand &MO : MI.uses()) {
|
|
|
|
if (!MO.isReg() || !MO.isKill())
|
|
|
|
continue;
|
Clear kill flags in ARMLoadStoreOptimizer.
The pass here was clearing kill flags on instructions which had
their sources killed in the instruction being combined. But
given that the new instruction is inserted after the existing ones,
any existing instructions with kill flags will lead to the verifier
complaining that we are reading an undefined physreg.
For example, what we had prior to this optimization is
t2STRi12 %R1, %SP, 12
t2STRi12 %R1<kill>, %SP, 16
t2STRi12 %R0<kill>, %SP, 8
and prior to this fix that would generate
t2STRi12 %R1<kill>, %SP, 16
t2STRDi8 %R0<kill>, %R1, %SP, 8
This is clearly incorrect as it didn't clear the kill flag on R1
used with offset 16 because there was no kill flag on the instruction
with offset 12.
After this change we clear the kill flag on the offset 16 instruction
because we know it will be used afterwards in the new instruction.
I haven't provided a test case. I have a small test, but even it is
very sensitive to register allocation order which isn't ideal.
llvm-svn: 242359
2015-07-16 08:09:18 +08:00
|
|
|
if (UsedRegs.count(MO.getReg()))
|
2015-07-11 02:08:49 +08:00
|
|
|
MO.setIsKill(false);
|
2010-06-22 05:21:14 +08:00
|
|
|
}
|
|
|
|
}
|
2015-07-11 02:08:49 +08:00
|
|
|
assert(ImpDefs.empty());
|
2009-12-24 05:28:23 +08:00
|
|
|
}
|
2014-09-25 00:35:50 +08:00
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
return Merged;
|
2009-12-24 05:28:23 +08:00
|
|
|
}
|
|
|
|
|
2015-07-21 08:18:59 +08:00
|
|
|
static bool isValidLSDoubleOffset(int Offset) {
|
|
|
|
unsigned Value = abs(Offset);
|
|
|
|
// t2LDRDi8/t2STRDi8 supports an 8 bit immediate which is internally
|
|
|
|
// multiplied by 4.
|
|
|
|
return (Value % 4) == 0 && Value < 1024;
|
|
|
|
}
|
|
|
|
|
2016-03-03 03:20:00 +08:00
|
|
|
/// Return true for loads/stores that can be combined to a double/multi
|
|
|
|
/// operation without increasing the requirements for alignment.
|
|
|
|
static bool mayCombineMisaligned(const TargetSubtargetInfo &STI,
|
|
|
|
const MachineInstr &MI) {
|
|
|
|
// vldr/vstr trap on misaligned pointers anyway, forming vldm makes no
|
|
|
|
// difference.
|
|
|
|
unsigned Opcode = MI.getOpcode();
|
|
|
|
if (!isi32Load(Opcode) && !isi32Store(Opcode))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Stack pointer alignment is out of the programmers control so we can trust
|
|
|
|
// SP-relative loads/stores.
|
|
|
|
if (getLoadStoreBaseOp(MI).getReg() == ARM::SP &&
|
|
|
|
STI.getFrameLowering()->getTransientStackAlignment() >= 4)
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
/// Find candidates for load/store multiple merge in list of MemOpQueueEntries.
|
|
|
|
void ARMLoadStoreOpt::FormCandidates(const MemOpQueue &MemOps) {
|
|
|
|
const MachineInstr *FirstMI = MemOps[0].MI;
|
|
|
|
unsigned Opcode = FirstMI->getOpcode();
|
2010-08-28 07:18:17 +08:00
|
|
|
bool isNotVFP = isi32Load(Opcode) || isi32Store(Opcode);
|
2015-07-11 02:08:49 +08:00
|
|
|
unsigned Size = getLSMultipleTransferSize(FirstMI);
|
2007-05-15 09:29:07 +08:00
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
unsigned SIndex = 0;
|
|
|
|
unsigned EIndex = MemOps.size();
|
|
|
|
do {
|
|
|
|
// Look at the first instruction.
|
|
|
|
const MachineInstr *MI = MemOps[SIndex].MI;
|
|
|
|
int Offset = MemOps[SIndex].Offset;
|
|
|
|
const MachineOperand &PMO = getLoadStoreRegOp(*MI);
|
|
|
|
unsigned PReg = PMO.getReg();
|
2017-09-21 05:35:51 +08:00
|
|
|
unsigned PRegNum = PMO.isUndef() ? std::numeric_limits<unsigned>::max()
|
|
|
|
: TRI->getEncodingValue(PReg);
|
2015-07-11 02:08:49 +08:00
|
|
|
unsigned Latest = SIndex;
|
|
|
|
unsigned Earliest = SIndex;
|
|
|
|
unsigned Count = 1;
|
2015-07-21 08:18:59 +08:00
|
|
|
bool CanMergeToLSDouble =
|
|
|
|
STI->isThumb2() && isNotVFP && isValidLSDoubleOffset(Offset);
|
|
|
|
// ARM errata 602117: LDRD with base in list may result in incorrect base
|
|
|
|
// register when interrupted or faulted.
|
|
|
|
if (STI->isCortexM3() && isi32Load(Opcode) &&
|
|
|
|
PReg == getLoadStoreBaseOp(*MI).getReg())
|
|
|
|
CanMergeToLSDouble = false;
|
|
|
|
|
|
|
|
bool CanMergeToLSMulti = true;
|
|
|
|
// On swift vldm/vstm starting with an odd register number as that needs
|
|
|
|
// more uops than single vldrs.
|
2016-07-06 17:22:23 +08:00
|
|
|
if (STI->hasSlowOddRegister() && !isNotVFP && (PRegNum % 2) == 1)
|
2015-07-21 08:18:59 +08:00
|
|
|
CanMergeToLSMulti = false;
|
|
|
|
|
|
|
|
// LDRD/STRD do not allow SP/PC. LDM/STM do not support it or have it
|
|
|
|
// deprecated; LDM to PC is fine but cannot happen here.
|
|
|
|
if (PReg == ARM::SP || PReg == ARM::PC)
|
|
|
|
CanMergeToLSMulti = CanMergeToLSDouble = false;
|
|
|
|
|
2016-03-03 03:20:00 +08:00
|
|
|
// Should we be conservative?
|
|
|
|
if (AssumeMisalignedLoadStores && !mayCombineMisaligned(*STI, *MI))
|
|
|
|
CanMergeToLSMulti = CanMergeToLSDouble = false;
|
|
|
|
|
2018-09-24 18:42:22 +08:00
|
|
|
// vldm / vstm limit are 32 for S variants, 16 for D variants.
|
|
|
|
unsigned Limit;
|
|
|
|
switch (Opcode) {
|
|
|
|
default:
|
|
|
|
Limit = UINT_MAX;
|
|
|
|
break;
|
|
|
|
case ARM::VLDRD:
|
|
|
|
case ARM::VSTRD:
|
|
|
|
Limit = 16;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-07-21 08:18:59 +08:00
|
|
|
// Merge following instructions where possible.
|
2015-07-11 02:08:49 +08:00
|
|
|
for (unsigned I = SIndex+1; I < EIndex; ++I, ++Count) {
|
|
|
|
int NewOffset = MemOps[I].Offset;
|
|
|
|
if (NewOffset != Offset + (int)Size)
|
|
|
|
break;
|
|
|
|
const MachineOperand &MO = getLoadStoreRegOp(*MemOps[I].MI);
|
|
|
|
unsigned Reg = MO.getReg();
|
2015-07-21 08:18:59 +08:00
|
|
|
if (Reg == ARM::SP || Reg == ARM::PC)
|
2015-07-21 07:17:20 +08:00
|
|
|
break;
|
2018-09-24 18:42:22 +08:00
|
|
|
if (Count == Limit)
|
|
|
|
break;
|
2015-07-21 08:18:59 +08:00
|
|
|
|
|
|
|
// See if the current load/store may be part of a multi load/store.
|
2017-09-21 05:35:51 +08:00
|
|
|
unsigned RegNum = MO.isUndef() ? std::numeric_limits<unsigned>::max()
|
|
|
|
: TRI->getEncodingValue(Reg);
|
2015-07-21 08:18:59 +08:00
|
|
|
bool PartOfLSMulti = CanMergeToLSMulti;
|
|
|
|
if (PartOfLSMulti) {
|
|
|
|
// Register numbers must be in ascending order.
|
|
|
|
if (RegNum <= PRegNum)
|
|
|
|
PartOfLSMulti = false;
|
|
|
|
// For VFP / NEON load/store multiples, the registers must be
|
|
|
|
// consecutive and within the limit on the number of registers per
|
|
|
|
// instruction.
|
|
|
|
else if (!isNotVFP && RegNum != PRegNum+1)
|
|
|
|
PartOfLSMulti = false;
|
|
|
|
}
|
|
|
|
// See if the current load/store may be part of a double load/store.
|
|
|
|
bool PartOfLSDouble = CanMergeToLSDouble && Count <= 1;
|
2015-07-21 07:17:20 +08:00
|
|
|
|
2015-07-21 08:18:59 +08:00
|
|
|
if (!PartOfLSMulti && !PartOfLSDouble)
|
|
|
|
break;
|
|
|
|
CanMergeToLSMulti &= PartOfLSMulti;
|
|
|
|
CanMergeToLSDouble &= PartOfLSDouble;
|
2015-07-11 02:08:49 +08:00
|
|
|
// Track MemOp with latest and earliest position (Positions are
|
|
|
|
// counted in reverse).
|
|
|
|
unsigned Position = MemOps[I].Position;
|
|
|
|
if (Position < MemOps[Latest].Position)
|
|
|
|
Latest = I;
|
|
|
|
else if (Position > MemOps[Earliest].Position)
|
|
|
|
Earliest = I;
|
|
|
|
// Prepare for next MemOp.
|
2007-01-19 15:51:42 +08:00
|
|
|
Offset += Size;
|
|
|
|
PRegNum = RegNum;
|
|
|
|
}
|
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
// Form a candidate from the Ops collected so far.
|
2015-07-21 08:18:59 +08:00
|
|
|
MergeCandidate *Candidate = new(Allocator.Allocate()) MergeCandidate;
|
2015-07-11 02:08:49 +08:00
|
|
|
for (unsigned C = SIndex, CE = SIndex + Count; C < CE; ++C)
|
|
|
|
Candidate->Instrs.push_back(MemOps[C].MI);
|
|
|
|
Candidate->LatestMIIdx = Latest - SIndex;
|
|
|
|
Candidate->EarliestMIIdx = Earliest - SIndex;
|
|
|
|
Candidate->InsertPos = MemOps[Latest].Position;
|
2015-07-21 08:18:59 +08:00
|
|
|
if (Count == 1)
|
|
|
|
CanMergeToLSMulti = CanMergeToLSDouble = false;
|
|
|
|
Candidate->CanMergeToLSMulti = CanMergeToLSMulti;
|
|
|
|
Candidate->CanMergeToLSDouble = CanMergeToLSDouble;
|
2015-07-11 02:08:49 +08:00
|
|
|
Candidates.push_back(Candidate);
|
|
|
|
// Continue after the chain.
|
|
|
|
SIndex += Count;
|
|
|
|
} while (SIndex < EIndex);
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
2010-11-16 09:16:36 +08:00
|
|
|
static unsigned getUpdatingLSMultipleOpcode(unsigned Opc,
|
|
|
|
ARM_AM::AMSubMode Mode) {
|
2010-03-13 09:08:20 +08:00
|
|
|
switch (Opc) {
|
|
|
|
default: llvm_unreachable("Unhandled opcode!");
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::LDMIA:
|
|
|
|
case ARM::LDMDA:
|
|
|
|
case ARM::LDMDB:
|
|
|
|
case ARM::LDMIB:
|
|
|
|
switch (Mode) {
|
|
|
|
default: llvm_unreachable("Unhandled submode!");
|
|
|
|
case ARM_AM::ia: return ARM::LDMIA_UPD;
|
|
|
|
case ARM_AM::ib: return ARM::LDMIB_UPD;
|
|
|
|
case ARM_AM::da: return ARM::LDMDA_UPD;
|
|
|
|
case ARM_AM::db: return ARM::LDMDB_UPD;
|
|
|
|
}
|
|
|
|
case ARM::STMIA:
|
|
|
|
case ARM::STMDA:
|
|
|
|
case ARM::STMDB:
|
|
|
|
case ARM::STMIB:
|
|
|
|
switch (Mode) {
|
|
|
|
default: llvm_unreachable("Unhandled submode!");
|
|
|
|
case ARM_AM::ia: return ARM::STMIA_UPD;
|
|
|
|
case ARM_AM::ib: return ARM::STMIB_UPD;
|
|
|
|
case ARM_AM::da: return ARM::STMDA_UPD;
|
|
|
|
case ARM_AM::db: return ARM::STMDB_UPD;
|
|
|
|
}
|
|
|
|
case ARM::t2LDMIA:
|
|
|
|
case ARM::t2LDMDB:
|
|
|
|
switch (Mode) {
|
|
|
|
default: llvm_unreachable("Unhandled submode!");
|
|
|
|
case ARM_AM::ia: return ARM::t2LDMIA_UPD;
|
|
|
|
case ARM_AM::db: return ARM::t2LDMDB_UPD;
|
|
|
|
}
|
|
|
|
case ARM::t2STMIA:
|
|
|
|
case ARM::t2STMDB:
|
|
|
|
switch (Mode) {
|
|
|
|
default: llvm_unreachable("Unhandled submode!");
|
|
|
|
case ARM_AM::ia: return ARM::t2STMIA_UPD;
|
|
|
|
case ARM_AM::db: return ARM::t2STMDB_UPD;
|
|
|
|
}
|
|
|
|
case ARM::VLDMSIA:
|
|
|
|
switch (Mode) {
|
|
|
|
default: llvm_unreachable("Unhandled submode!");
|
|
|
|
case ARM_AM::ia: return ARM::VLDMSIA_UPD;
|
|
|
|
case ARM_AM::db: return ARM::VLDMSDB_UPD;
|
|
|
|
}
|
|
|
|
case ARM::VLDMDIA:
|
|
|
|
switch (Mode) {
|
|
|
|
default: llvm_unreachable("Unhandled submode!");
|
|
|
|
case ARM_AM::ia: return ARM::VLDMDIA_UPD;
|
|
|
|
case ARM_AM::db: return ARM::VLDMDDB_UPD;
|
|
|
|
}
|
|
|
|
case ARM::VSTMSIA:
|
|
|
|
switch (Mode) {
|
|
|
|
default: llvm_unreachable("Unhandled submode!");
|
|
|
|
case ARM_AM::ia: return ARM::VSTMSIA_UPD;
|
|
|
|
case ARM_AM::db: return ARM::VSTMSDB_UPD;
|
|
|
|
}
|
|
|
|
case ARM::VSTMDIA:
|
|
|
|
switch (Mode) {
|
|
|
|
default: llvm_unreachable("Unhandled submode!");
|
|
|
|
case ARM_AM::ia: return ARM::VSTMDIA_UPD;
|
|
|
|
case ARM_AM::db: return ARM::VSTMDDB_UPD;
|
|
|
|
}
|
2010-03-13 09:08:20 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-21 08:19:01 +08:00
|
|
|
/// Check if the given instruction increments or decrements a register and
|
|
|
|
/// return the amount it is incremented/decremented. Returns 0 if the CPSR flags
|
|
|
|
/// generated by the instruction are possibly read as well.
|
|
|
|
static int isIncrementOrDecrement(const MachineInstr &MI, unsigned Reg,
|
|
|
|
ARMCC::CondCodes Pred, unsigned PredReg) {
|
|
|
|
bool CheckCPSRDef;
|
|
|
|
int Scale;
|
|
|
|
switch (MI.getOpcode()) {
|
|
|
|
case ARM::tADDi8: Scale = 4; CheckCPSRDef = true; break;
|
|
|
|
case ARM::tSUBi8: Scale = -4; CheckCPSRDef = true; break;
|
|
|
|
case ARM::t2SUBri:
|
|
|
|
case ARM::SUBri: Scale = -1; CheckCPSRDef = true; break;
|
|
|
|
case ARM::t2ADDri:
|
|
|
|
case ARM::ADDri: Scale = 1; CheckCPSRDef = true; break;
|
|
|
|
case ARM::tADDspi: Scale = 4; CheckCPSRDef = false; break;
|
|
|
|
case ARM::tSUBspi: Scale = -4; CheckCPSRDef = false; break;
|
|
|
|
default: return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned MIPredReg;
|
|
|
|
if (MI.getOperand(0).getReg() != Reg ||
|
|
|
|
MI.getOperand(1).getReg() != Reg ||
|
2016-02-23 10:46:52 +08:00
|
|
|
getInstrPredicate(MI, MIPredReg) != Pred ||
|
2015-07-21 08:19:01 +08:00
|
|
|
MIPredReg != PredReg)
|
|
|
|
return 0;
|
|
|
|
|
2016-07-09 04:21:17 +08:00
|
|
|
if (CheckCPSRDef && definesCPSR(MI))
|
2015-07-21 08:19:01 +08:00
|
|
|
return 0;
|
|
|
|
return MI.getOperand(2).getImm() * Scale;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Searches for an increment or decrement of \p Reg before \p MBBI.
|
|
|
|
static MachineBasicBlock::iterator
|
|
|
|
findIncDecBefore(MachineBasicBlock::iterator MBBI, unsigned Reg,
|
|
|
|
ARMCC::CondCodes Pred, unsigned PredReg, int &Offset) {
|
|
|
|
Offset = 0;
|
|
|
|
MachineBasicBlock &MBB = *MBBI->getParent();
|
|
|
|
MachineBasicBlock::iterator BeginMBBI = MBB.begin();
|
|
|
|
MachineBasicBlock::iterator EndMBBI = MBB.end();
|
|
|
|
if (MBBI == BeginMBBI)
|
|
|
|
return EndMBBI;
|
|
|
|
|
|
|
|
// Skip debug values.
|
|
|
|
MachineBasicBlock::iterator PrevMBBI = std::prev(MBBI);
|
2018-05-09 10:42:00 +08:00
|
|
|
while (PrevMBBI->isDebugInstr() && PrevMBBI != BeginMBBI)
|
2015-07-21 08:19:01 +08:00
|
|
|
--PrevMBBI;
|
|
|
|
|
|
|
|
Offset = isIncrementOrDecrement(*PrevMBBI, Reg, Pred, PredReg);
|
|
|
|
return Offset == 0 ? EndMBBI : PrevMBBI;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Searches for a increment or decrement of \p Reg after \p MBBI.
|
|
|
|
static MachineBasicBlock::iterator
|
|
|
|
findIncDecAfter(MachineBasicBlock::iterator MBBI, unsigned Reg,
|
|
|
|
ARMCC::CondCodes Pred, unsigned PredReg, int &Offset) {
|
|
|
|
Offset = 0;
|
|
|
|
MachineBasicBlock &MBB = *MBBI->getParent();
|
|
|
|
MachineBasicBlock::iterator EndMBBI = MBB.end();
|
|
|
|
MachineBasicBlock::iterator NextMBBI = std::next(MBBI);
|
|
|
|
// Skip debug values.
|
2018-05-09 10:42:00 +08:00
|
|
|
while (NextMBBI != EndMBBI && NextMBBI->isDebugInstr())
|
2015-07-21 08:19:01 +08:00
|
|
|
++NextMBBI;
|
|
|
|
if (NextMBBI == EndMBBI)
|
|
|
|
return EndMBBI;
|
|
|
|
|
|
|
|
Offset = isIncrementOrDecrement(*NextMBBI, Reg, Pred, PredReg);
|
|
|
|
return Offset == 0 ? EndMBBI : NextMBBI;
|
|
|
|
}
|
|
|
|
|
2015-06-02 05:26:23 +08:00
|
|
|
/// Fold proceeding/trailing inc/dec of base register into the
|
|
|
|
/// LDM/STM/VLDM{D|S}/VSTM{D|S} op when possible:
|
2007-01-19 15:51:42 +08:00
|
|
|
///
|
|
|
|
/// stmia rn, <ra, rb, rc>
|
|
|
|
/// rn := rn + 4 * 3;
|
|
|
|
/// =>
|
|
|
|
/// stmia rn!, <ra, rb, rc>
|
|
|
|
///
|
|
|
|
/// rn := rn - 4 * 3;
|
|
|
|
/// ldmia rn, <ra, rb, rc>
|
|
|
|
/// =>
|
|
|
|
/// ldmdb rn!, <ra, rb, rc>
|
2015-07-11 02:08:49 +08:00
|
|
|
bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(MachineInstr *MI) {
|
2014-05-16 22:14:30 +08:00
|
|
|
// Thumb1 is already using updating loads/stores.
|
|
|
|
if (isThumb1) return false;
|
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
const MachineOperand &BaseOP = MI->getOperand(0);
|
|
|
|
unsigned Base = BaseOP.getReg();
|
|
|
|
bool BaseKill = BaseOP.isKill();
|
2007-07-05 15:18:20 +08:00
|
|
|
unsigned PredReg = 0;
|
2016-02-23 10:46:52 +08:00
|
|
|
ARMCC::CondCodes Pred = getInstrPredicate(*MI, PredReg);
|
2015-05-19 04:27:55 +08:00
|
|
|
unsigned Opcode = MI->getOpcode();
|
2015-07-11 02:08:49 +08:00
|
|
|
DebugLoc DL = MI->getDebugLoc();
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2010-08-28 07:18:17 +08:00
|
|
|
// Can't use an updating ld/st if the base register is also a dest
|
|
|
|
// register. e.g. ldmdb r0!, {r0, r1, r2}. The behavior is undefined.
|
2010-11-16 09:16:36 +08:00
|
|
|
for (unsigned i = 2, e = MI->getNumOperands(); i != e; ++i)
|
2010-08-28 07:18:17 +08:00
|
|
|
if (MI->getOperand(i).getReg() == Base)
|
|
|
|
return false;
|
2010-11-16 09:16:36 +08:00
|
|
|
|
2015-07-21 08:19:01 +08:00
|
|
|
int Bytes = getLSMultipleTransferSize(MI);
|
2015-07-11 02:08:49 +08:00
|
|
|
MachineBasicBlock &MBB = *MI->getParent();
|
|
|
|
MachineBasicBlock::iterator MBBI(MI);
|
2015-07-21 08:19:01 +08:00
|
|
|
int Offset;
|
|
|
|
MachineBasicBlock::iterator MergeInstr
|
|
|
|
= findIncDecBefore(MBBI, Base, Pred, PredReg, Offset);
|
|
|
|
ARM_AM::AMSubMode Mode = getLoadStoreMultipleSubMode(Opcode);
|
|
|
|
if (Mode == ARM_AM::ia && Offset == -Bytes) {
|
|
|
|
Mode = ARM_AM::db;
|
|
|
|
} else if (Mode == ARM_AM::ib && Offset == -Bytes) {
|
|
|
|
Mode = ARM_AM::da;
|
|
|
|
} else {
|
|
|
|
MergeInstr = findIncDecAfter(MBBI, Base, Pred, PredReg, Offset);
|
|
|
|
if (((Mode != ARM_AM::ia && Mode != ARM_AM::ib) || Offset != Bytes) &&
|
2016-06-07 19:47:24 +08:00
|
|
|
((Mode != ARM_AM::da && Mode != ARM_AM::db) || Offset != -Bytes)) {
|
|
|
|
|
|
|
|
// We couldn't find an inc/dec to merge. But if the base is dead, we
|
|
|
|
// can still change to a writeback form as that will save us 2 bytes
|
|
|
|
// of code size. It can create WAW hazards though, so only do it if
|
|
|
|
// we're minimizing code size.
|
2019-02-08 15:57:42 +08:00
|
|
|
if (!STI->optForMinSize() || !BaseKill)
|
2016-06-07 19:47:24 +08:00
|
|
|
return false;
|
2018-07-31 03:41:25 +08:00
|
|
|
|
2016-06-07 19:47:24 +08:00
|
|
|
bool HighRegsUsed = false;
|
|
|
|
for (unsigned i = 2, e = MI->getNumOperands(); i != e; ++i)
|
|
|
|
if (MI->getOperand(i).getReg() >= ARM::R8) {
|
|
|
|
HighRegsUsed = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!HighRegsUsed)
|
|
|
|
MergeInstr = MBB.end();
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
}
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
2016-06-07 19:47:24 +08:00
|
|
|
if (MergeInstr != MBB.end())
|
|
|
|
MBB.erase(MergeInstr);
|
2010-03-13 09:08:20 +08:00
|
|
|
|
2010-11-16 09:16:36 +08:00
|
|
|
unsigned NewOpc = getUpdatingLSMultipleOpcode(Opcode, Mode);
|
2015-07-11 02:08:49 +08:00
|
|
|
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(NewOpc))
|
2010-03-13 09:08:20 +08:00
|
|
|
.addReg(Base, getDefRegState(true)) // WB base register
|
2010-08-28 07:18:17 +08:00
|
|
|
.addReg(Base, getKillRegState(BaseKill))
|
|
|
|
.addImm(Pred).addReg(PredReg);
|
2010-11-16 09:16:36 +08:00
|
|
|
|
2010-03-13 09:08:20 +08:00
|
|
|
// Transfer the rest of operands.
|
2010-11-16 09:16:36 +08:00
|
|
|
for (unsigned OpNum = 3, e = MI->getNumOperands(); OpNum != e; ++OpNum)
|
2017-01-13 17:58:52 +08:00
|
|
|
MIB.add(MI->getOperand(OpNum));
|
2010-11-16 09:16:36 +08:00
|
|
|
|
2010-03-13 09:08:20 +08:00
|
|
|
// Transfer memoperands.
|
2018-08-17 05:30:05 +08:00
|
|
|
MIB.setMemRefs(MI->memoperands());
|
2010-03-13 09:08:20 +08:00
|
|
|
|
|
|
|
MBB.erase(MBBI);
|
|
|
|
return true;
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
2010-11-16 09:16:36 +08:00
|
|
|
static unsigned getPreIndexedLoadStoreOpcode(unsigned Opc,
|
|
|
|
ARM_AM::AddrOpc Mode) {
|
2007-01-19 15:51:42 +08:00
|
|
|
switch (Opc) {
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::LDRi12:
|
2011-08-27 04:43:14 +08:00
|
|
|
return ARM::LDR_PRE_IMM;
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::STRi12:
|
2011-07-27 04:54:26 +08:00
|
|
|
return ARM::STR_PRE_IMM;
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::VLDRS:
|
|
|
|
return Mode == ARM_AM::add ? ARM::VLDMSIA_UPD : ARM::VLDMSDB_UPD;
|
|
|
|
case ARM::VLDRD:
|
|
|
|
return Mode == ARM_AM::add ? ARM::VLDMDIA_UPD : ARM::VLDMDDB_UPD;
|
|
|
|
case ARM::VSTRS:
|
|
|
|
return Mode == ARM_AM::add ? ARM::VSTMSIA_UPD : ARM::VSTMSDB_UPD;
|
|
|
|
case ARM::VSTRD:
|
|
|
|
return Mode == ARM_AM::add ? ARM::VSTMDIA_UPD : ARM::VSTMDDB_UPD;
|
2009-07-10 07:11:34 +08:00
|
|
|
case ARM::t2LDRi8:
|
|
|
|
case ARM::t2LDRi12:
|
|
|
|
return ARM::t2LDR_PRE;
|
|
|
|
case ARM::t2STRi8:
|
|
|
|
case ARM::t2STRi12:
|
|
|
|
return ARM::t2STR_PRE;
|
2009-07-15 00:55:14 +08:00
|
|
|
default: llvm_unreachable("Unhandled opcode!");
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-11-16 09:16:36 +08:00
|
|
|
static unsigned getPostIndexedLoadStoreOpcode(unsigned Opc,
|
|
|
|
ARM_AM::AddrOpc Mode) {
|
2007-01-19 15:51:42 +08:00
|
|
|
switch (Opc) {
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::LDRi12:
|
2011-07-27 04:54:26 +08:00
|
|
|
return ARM::LDR_POST_IMM;
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::STRi12:
|
2011-07-27 04:54:26 +08:00
|
|
|
return ARM::STR_POST_IMM;
|
2010-11-16 09:16:36 +08:00
|
|
|
case ARM::VLDRS:
|
|
|
|
return Mode == ARM_AM::add ? ARM::VLDMSIA_UPD : ARM::VLDMSDB_UPD;
|
|
|
|
case ARM::VLDRD:
|
|
|
|
return Mode == ARM_AM::add ? ARM::VLDMDIA_UPD : ARM::VLDMDDB_UPD;
|
|
|
|
case ARM::VSTRS:
|
|
|
|
return Mode == ARM_AM::add ? ARM::VSTMSIA_UPD : ARM::VSTMSDB_UPD;
|
|
|
|
case ARM::VSTRD:
|
|
|
|
return Mode == ARM_AM::add ? ARM::VSTMDIA_UPD : ARM::VSTMDDB_UPD;
|
2009-07-10 07:11:34 +08:00
|
|
|
case ARM::t2LDRi8:
|
|
|
|
case ARM::t2LDRi12:
|
|
|
|
return ARM::t2LDR_POST;
|
|
|
|
case ARM::t2STRi8:
|
|
|
|
case ARM::t2STRi12:
|
|
|
|
return ARM::t2STR_POST;
|
2009-07-15 00:55:14 +08:00
|
|
|
default: llvm_unreachable("Unhandled opcode!");
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-02 05:26:23 +08:00
|
|
|
/// Fold proceeding/trailing inc/dec of base register into the
|
|
|
|
/// LDR/STR/FLD{D|S}/FST{D|S} op when possible:
|
2015-07-11 02:08:49 +08:00
|
|
|
bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(MachineInstr *MI) {
|
2014-05-16 22:14:30 +08:00
|
|
|
// Thumb1 doesn't have updating LDR/STR.
|
|
|
|
// FIXME: Use LDM/STM with single register instead.
|
|
|
|
if (isThumb1) return false;
|
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
unsigned Base = getLoadStoreBaseOp(*MI).getReg();
|
|
|
|
bool BaseKill = getLoadStoreBaseOp(*MI).isKill();
|
2015-05-19 04:27:55 +08:00
|
|
|
unsigned Opcode = MI->getOpcode();
|
2015-07-11 02:08:49 +08:00
|
|
|
DebugLoc DL = MI->getDebugLoc();
|
2010-03-13 06:50:09 +08:00
|
|
|
bool isAM5 = (Opcode == ARM::VLDRD || Opcode == ARM::VLDRS ||
|
|
|
|
Opcode == ARM::VSTRD || Opcode == ARM::VSTRS);
|
2010-10-28 07:12:14 +08:00
|
|
|
bool isAM2 = (Opcode == ARM::LDRi12 || Opcode == ARM::STRi12);
|
|
|
|
if (isi32Load(Opcode) || isi32Store(Opcode))
|
2010-10-27 06:37:02 +08:00
|
|
|
if (MI->getOperand(2).getImm() != 0)
|
|
|
|
return false;
|
2010-03-13 06:50:09 +08:00
|
|
|
if (isAM5 && ARM_AM::getAM5Offset(MI->getOperand(2).getImm()) != 0)
|
2007-01-19 15:51:42 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Can't do the merge if the destination register is the same as the would-be
|
|
|
|
// writeback register.
|
2013-03-26 00:29:20 +08:00
|
|
|
if (MI->getOperand(0).getReg() == Base)
|
2007-01-19 15:51:42 +08:00
|
|
|
return false;
|
|
|
|
|
2007-07-05 15:18:20 +08:00
|
|
|
unsigned PredReg = 0;
|
2016-02-23 10:46:52 +08:00
|
|
|
ARMCC::CondCodes Pred = getInstrPredicate(*MI, PredReg);
|
2015-07-21 08:19:01 +08:00
|
|
|
int Bytes = getLSMultipleTransferSize(MI);
|
2015-07-11 02:08:49 +08:00
|
|
|
MachineBasicBlock &MBB = *MI->getParent();
|
|
|
|
MachineBasicBlock::iterator MBBI(MI);
|
2015-07-21 08:19:01 +08:00
|
|
|
int Offset;
|
|
|
|
MachineBasicBlock::iterator MergeInstr
|
|
|
|
= findIncDecBefore(MBBI, Base, Pred, PredReg, Offset);
|
|
|
|
unsigned NewOpc;
|
|
|
|
if (!isAM5 && Offset == Bytes) {
|
|
|
|
NewOpc = getPreIndexedLoadStoreOpcode(Opcode, ARM_AM::add);
|
|
|
|
} else if (Offset == -Bytes) {
|
|
|
|
NewOpc = getPreIndexedLoadStoreOpcode(Opcode, ARM_AM::sub);
|
|
|
|
} else {
|
|
|
|
MergeInstr = findIncDecAfter(MBBI, Base, Pred, PredReg, Offset);
|
|
|
|
if (Offset == Bytes) {
|
|
|
|
NewOpc = getPostIndexedLoadStoreOpcode(Opcode, ARM_AM::add);
|
|
|
|
} else if (!isAM5 && Offset == -Bytes) {
|
|
|
|
NewOpc = getPostIndexedLoadStoreOpcode(Opcode, ARM_AM::sub);
|
|
|
|
} else
|
|
|
|
return false;
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
2015-07-21 08:19:01 +08:00
|
|
|
MBB.erase(MergeInstr);
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2015-07-21 08:19:01 +08:00
|
|
|
ARM_AM::AddrOpc AddSub = Offset < 0 ? ARM_AM::sub : ARM_AM::add;
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2015-07-21 08:19:01 +08:00
|
|
|
bool isLd = isLoadSingle(Opcode);
|
2010-03-13 08:43:32 +08:00
|
|
|
if (isAM5) {
|
2014-05-16 22:08:46 +08:00
|
|
|
// VLDM[SD]_UPD, VSTM[SD]_UPD
|
2010-08-28 07:18:17 +08:00
|
|
|
// (There are no base-updating versions of VLDR/VSTR instructions, but the
|
|
|
|
// updating load/store-multiple instructions can be used with only one
|
|
|
|
// register.)
|
2010-03-13 08:43:32 +08:00
|
|
|
MachineOperand &MO = MI->getOperand(0);
|
2015-07-11 02:08:49 +08:00
|
|
|
BuildMI(MBB, MBBI, DL, TII->get(NewOpc))
|
2010-03-13 09:08:20 +08:00
|
|
|
.addReg(Base, getDefRegState(true)) // WB base register
|
2010-03-13 08:43:32 +08:00
|
|
|
.addReg(Base, getKillRegState(isLd ? BaseKill : false))
|
|
|
|
.addImm(Pred).addReg(PredReg)
|
|
|
|
.addReg(MO.getReg(), (isLd ? getDefRegState(true) :
|
|
|
|
getKillRegState(MO.isKill())));
|
|
|
|
} else if (isLd) {
|
2011-08-13 06:20:41 +08:00
|
|
|
if (isAM2) {
|
2011-08-30 01:59:41 +08:00
|
|
|
// LDR_PRE, LDR_POST
|
|
|
|
if (NewOpc == ARM::LDR_PRE_IMM || NewOpc == ARM::LDRB_PRE_IMM) {
|
2015-07-11 02:08:49 +08:00
|
|
|
BuildMI(MBB, MBBI, DL, TII->get(NewOpc), MI->getOperand(0).getReg())
|
2011-08-30 01:59:41 +08:00
|
|
|
.addReg(Base, RegState::Define)
|
|
|
|
.addReg(Base).addImm(Offset).addImm(Pred).addReg(PredReg);
|
|
|
|
} else {
|
2015-07-21 08:19:01 +08:00
|
|
|
int Imm = ARM_AM::getAM2Opc(AddSub, Bytes, ARM_AM::no_shift);
|
2015-07-11 02:08:49 +08:00
|
|
|
BuildMI(MBB, MBBI, DL, TII->get(NewOpc), MI->getOperand(0).getReg())
|
2017-01-20 16:15:24 +08:00
|
|
|
.addReg(Base, RegState::Define)
|
|
|
|
.addReg(Base)
|
|
|
|
.addReg(0)
|
|
|
|
.addImm(Imm)
|
|
|
|
.add(predOps(Pred, PredReg));
|
2011-08-30 01:59:41 +08:00
|
|
|
}
|
2011-08-13 06:20:41 +08:00
|
|
|
} else {
|
2009-08-04 09:43:45 +08:00
|
|
|
// t2LDR_PRE, t2LDR_POST
|
2015-07-11 02:08:49 +08:00
|
|
|
BuildMI(MBB, MBBI, DL, TII->get(NewOpc), MI->getOperand(0).getReg())
|
2017-01-20 16:15:24 +08:00
|
|
|
.addReg(Base, RegState::Define)
|
|
|
|
.addReg(Base)
|
|
|
|
.addImm(Offset)
|
|
|
|
.add(predOps(Pred, PredReg));
|
2011-08-13 06:20:41 +08:00
|
|
|
}
|
2009-08-04 09:43:45 +08:00
|
|
|
} else {
|
|
|
|
MachineOperand &MO = MI->getOperand(0);
|
2011-08-06 04:35:44 +08:00
|
|
|
// FIXME: post-indexed stores use am2offset_imm, which still encodes
|
|
|
|
// the vestigal zero-reg offset register. When that's fixed, this clause
|
|
|
|
// can be removed entirely.
|
2011-08-13 06:20:41 +08:00
|
|
|
if (isAM2 && NewOpc == ARM::STR_POST_IMM) {
|
2015-07-21 08:19:01 +08:00
|
|
|
int Imm = ARM_AM::getAM2Opc(AddSub, Bytes, ARM_AM::no_shift);
|
2009-08-04 09:43:45 +08:00
|
|
|
// STR_PRE, STR_POST
|
2015-07-11 02:08:49 +08:00
|
|
|
BuildMI(MBB, MBBI, DL, TII->get(NewOpc), Base)
|
2017-01-20 16:15:24 +08:00
|
|
|
.addReg(MO.getReg(), getKillRegState(MO.isKill()))
|
|
|
|
.addReg(Base)
|
|
|
|
.addReg(0)
|
|
|
|
.addImm(Imm)
|
|
|
|
.add(predOps(Pred, PredReg));
|
2011-08-13 06:20:41 +08:00
|
|
|
} else {
|
2009-08-04 09:43:45 +08:00
|
|
|
// t2STR_PRE, t2STR_POST
|
2015-07-11 02:08:49 +08:00
|
|
|
BuildMI(MBB, MBBI, DL, TII->get(NewOpc), Base)
|
2017-01-20 16:15:24 +08:00
|
|
|
.addReg(MO.getReg(), getKillRegState(MO.isKill()))
|
|
|
|
.addReg(Base)
|
|
|
|
.addImm(Offset)
|
|
|
|
.add(predOps(Pred, PredReg));
|
2011-08-13 06:20:41 +08:00
|
|
|
}
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
MBB.erase(MBBI);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-07-21 08:19:01 +08:00
|
|
|
bool ARMLoadStoreOpt::MergeBaseUpdateLSDouble(MachineInstr &MI) const {
|
|
|
|
unsigned Opcode = MI.getOpcode();
|
|
|
|
assert((Opcode == ARM::t2LDRDi8 || Opcode == ARM::t2STRDi8) &&
|
|
|
|
"Must have t2STRDi8 or t2LDRDi8");
|
|
|
|
if (MI.getOperand(3).getImm() != 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Behaviour for writeback is undefined if base register is the same as one
|
|
|
|
// of the others.
|
|
|
|
const MachineOperand &BaseOp = MI.getOperand(2);
|
|
|
|
unsigned Base = BaseOp.getReg();
|
|
|
|
const MachineOperand &Reg0Op = MI.getOperand(0);
|
|
|
|
const MachineOperand &Reg1Op = MI.getOperand(1);
|
|
|
|
if (Reg0Op.getReg() == Base || Reg1Op.getReg() == Base)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned PredReg;
|
2016-02-23 10:46:52 +08:00
|
|
|
ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
|
2015-07-21 08:19:01 +08:00
|
|
|
MachineBasicBlock::iterator MBBI(MI);
|
|
|
|
MachineBasicBlock &MBB = *MI.getParent();
|
|
|
|
int Offset;
|
|
|
|
MachineBasicBlock::iterator MergeInstr = findIncDecBefore(MBBI, Base, Pred,
|
|
|
|
PredReg, Offset);
|
|
|
|
unsigned NewOpc;
|
|
|
|
if (Offset == 8 || Offset == -8) {
|
|
|
|
NewOpc = Opcode == ARM::t2LDRDi8 ? ARM::t2LDRD_PRE : ARM::t2STRD_PRE;
|
|
|
|
} else {
|
|
|
|
MergeInstr = findIncDecAfter(MBBI, Base, Pred, PredReg, Offset);
|
|
|
|
if (Offset == 8 || Offset == -8) {
|
|
|
|
NewOpc = Opcode == ARM::t2LDRDi8 ? ARM::t2LDRD_POST : ARM::t2STRD_POST;
|
|
|
|
} else
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
MBB.erase(MergeInstr);
|
|
|
|
|
|
|
|
DebugLoc DL = MI.getDebugLoc();
|
|
|
|
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(NewOpc));
|
|
|
|
if (NewOpc == ARM::t2LDRD_PRE || NewOpc == ARM::t2LDRD_POST) {
|
2017-01-13 17:58:52 +08:00
|
|
|
MIB.add(Reg0Op).add(Reg1Op).addReg(BaseOp.getReg(), RegState::Define);
|
2015-07-21 08:19:01 +08:00
|
|
|
} else {
|
|
|
|
assert(NewOpc == ARM::t2STRD_PRE || NewOpc == ARM::t2STRD_POST);
|
2017-01-13 17:58:52 +08:00
|
|
|
MIB.addReg(BaseOp.getReg(), RegState::Define).add(Reg0Op).add(Reg1Op);
|
2015-07-21 08:19:01 +08:00
|
|
|
}
|
|
|
|
MIB.addReg(BaseOp.getReg(), RegState::Kill)
|
|
|
|
.addImm(Offset).addImm(Pred).addReg(PredReg);
|
|
|
|
assert(TII->get(Opcode).getNumOperands() == 6 &&
|
|
|
|
TII->get(NewOpc).getNumOperands() == 7 &&
|
|
|
|
"Unexpected number of operands in Opcode specification.");
|
|
|
|
|
|
|
|
// Transfer implicit operands.
|
|
|
|
for (const MachineOperand &MO : MI.implicit_operands())
|
2017-01-13 17:58:52 +08:00
|
|
|
MIB.add(MO);
|
2018-08-17 05:30:05 +08:00
|
|
|
MIB.setMemRefs(MI.memoperands());
|
2015-07-21 08:19:01 +08:00
|
|
|
|
|
|
|
MBB.erase(MBBI);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-06-02 05:26:23 +08:00
|
|
|
/// Returns true if instruction is a memory operation that this pass is capable
|
|
|
|
/// of operating on.
|
2015-11-21 10:09:49 +08:00
|
|
|
static bool isMemoryOp(const MachineInstr &MI) {
|
|
|
|
unsigned Opcode = MI.getOpcode();
|
|
|
|
switch (Opcode) {
|
|
|
|
case ARM::VLDRS:
|
|
|
|
case ARM::VSTRS:
|
|
|
|
case ARM::VLDRD:
|
|
|
|
case ARM::VSTRD:
|
|
|
|
case ARM::LDRi12:
|
|
|
|
case ARM::STRi12:
|
|
|
|
case ARM::tLDRi:
|
|
|
|
case ARM::tSTRi:
|
|
|
|
case ARM::tLDRspi:
|
|
|
|
case ARM::tSTRspi:
|
|
|
|
case ARM::t2LDRi8:
|
|
|
|
case ARM::t2LDRi12:
|
|
|
|
case ARM::t2STRi8:
|
|
|
|
case ARM::t2STRi12:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (!MI.getOperand(1).isReg())
|
|
|
|
return false;
|
|
|
|
|
2010-06-29 09:13:07 +08:00
|
|
|
// When no memory operands are present, conservatively assume unaligned,
|
|
|
|
// volatile, unfoldable.
|
2015-11-21 10:09:49 +08:00
|
|
|
if (!MI.hasOneMemOperand())
|
2010-06-29 09:13:07 +08:00
|
|
|
return false;
|
2010-01-14 08:54:10 +08:00
|
|
|
|
2015-11-21 10:09:49 +08:00
|
|
|
const MachineMemOperand &MMO = **MI.memoperands_begin();
|
2010-01-14 08:54:10 +08:00
|
|
|
|
2010-06-29 09:13:07 +08:00
|
|
|
// Don't touch volatile memory accesses - we may be changing their order.
|
2019-02-26 12:30:33 +08:00
|
|
|
// TODO: We could allow unordered and monotonic atomics here, but we need to
|
|
|
|
// make sure the resulting ldm/stm is correctly marked as atomic.
|
|
|
|
if (MMO.isVolatile() || MMO.isAtomic())
|
2010-06-29 09:13:07 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Unaligned ldr/str is emulated by some kernels, but unaligned ldm/stm is
|
|
|
|
// not.
|
2015-11-21 10:09:49 +08:00
|
|
|
if (MMO.getAlignment() < 4)
|
2010-06-29 09:13:07 +08:00
|
|
|
return false;
|
2010-01-14 08:54:10 +08:00
|
|
|
|
2010-02-25 02:57:08 +08:00
|
|
|
// str <undef> could probably be eliminated entirely, but for now we just want
|
|
|
|
// to avoid making a mess of it.
|
|
|
|
// FIXME: Use str <undef> as a wildcard to enable better stm folding.
|
2015-11-21 10:09:49 +08:00
|
|
|
if (MI.getOperand(0).isReg() && MI.getOperand(0).isUndef())
|
2010-02-25 02:57:08 +08:00
|
|
|
return false;
|
|
|
|
|
2010-03-05 05:04:38 +08:00
|
|
|
// Likewise don't mess with references to undefined addresses.
|
2015-11-21 10:09:49 +08:00
|
|
|
if (MI.getOperand(1).isUndef())
|
2010-03-05 05:04:38 +08:00
|
|
|
return false;
|
|
|
|
|
2015-11-21 10:09:49 +08:00
|
|
|
return true;
|
2007-03-07 02:02:41 +08:00
|
|
|
}
|
|
|
|
|
2009-06-15 16:28:29 +08:00
|
|
|
static void InsertLDR_STR(MachineBasicBlock &MBB,
|
2016-06-12 23:39:02 +08:00
|
|
|
MachineBasicBlock::iterator &MBBI, int Offset,
|
2017-08-29 03:03:45 +08:00
|
|
|
bool isDef, unsigned NewOpc, unsigned Reg,
|
|
|
|
bool RegDeadKill, bool RegUndef, unsigned BaseReg,
|
|
|
|
bool BaseKill, bool BaseUndef, ARMCC::CondCodes Pred,
|
|
|
|
unsigned PredReg, const TargetInstrInfo *TII) {
|
2009-09-27 17:46:04 +08:00
|
|
|
if (isDef) {
|
|
|
|
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MBBI->getDebugLoc(),
|
|
|
|
TII->get(NewOpc))
|
2009-06-19 09:59:04 +08:00
|
|
|
.addReg(Reg, getDefRegState(true) | getDeadRegState(RegDeadKill))
|
2009-09-27 17:46:04 +08:00
|
|
|
.addReg(BaseReg, getKillRegState(BaseKill)|getUndefRegState(BaseUndef));
|
|
|
|
MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
|
|
|
|
} else {
|
|
|
|
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MBBI->getDebugLoc(),
|
|
|
|
TII->get(NewOpc))
|
|
|
|
.addReg(Reg, getKillRegState(RegDeadKill) | getUndefRegState(RegUndef))
|
|
|
|
.addReg(BaseReg, getKillRegState(BaseKill)|getUndefRegState(BaseUndef));
|
|
|
|
MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
|
|
|
|
}
|
2009-06-15 16:28:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool ARMLoadStoreOpt::FixInvalidRegPairOp(MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator &MBBI) {
|
|
|
|
MachineInstr *MI = &*MBBI;
|
|
|
|
unsigned Opcode = MI->getOpcode();
|
2017-08-29 03:03:45 +08:00
|
|
|
// FIXME: Code/comments below check Opcode == t2STRDi8, but this check returns
|
|
|
|
// if we see this opcode.
|
2015-06-25 04:03:27 +08:00
|
|
|
if (Opcode != ARM::LDRD && Opcode != ARM::STRD && Opcode != ARM::t2LDRDi8)
|
|
|
|
return false;
|
2009-06-15 16:28:29 +08:00
|
|
|
|
2015-06-25 04:03:27 +08:00
|
|
|
const MachineOperand &BaseOp = MI->getOperand(2);
|
|
|
|
unsigned BaseReg = BaseOp.getReg();
|
|
|
|
unsigned EvenReg = MI->getOperand(0).getReg();
|
|
|
|
unsigned OddReg = MI->getOperand(1).getReg();
|
|
|
|
unsigned EvenRegNum = TRI->getDwarfRegNum(EvenReg, false);
|
|
|
|
unsigned OddRegNum = TRI->getDwarfRegNum(OddReg, false);
|
|
|
|
|
|
|
|
// ARM errata 602117: LDRD with base in list may result in incorrect base
|
|
|
|
// register when interrupted or faulted.
|
|
|
|
bool Errata602117 = EvenReg == BaseReg &&
|
|
|
|
(Opcode == ARM::LDRD || Opcode == ARM::t2LDRDi8) && STI->isCortexM3();
|
|
|
|
// ARM LDRD/STRD needs consecutive registers.
|
|
|
|
bool NonConsecutiveRegs = (Opcode == ARM::LDRD || Opcode == ARM::STRD) &&
|
|
|
|
(EvenRegNum % 2 != 0 || EvenRegNum + 1 != OddRegNum);
|
|
|
|
|
|
|
|
if (!Errata602117 && !NonConsecutiveRegs)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
bool isT2 = Opcode == ARM::t2LDRDi8 || Opcode == ARM::t2STRDi8;
|
|
|
|
bool isLd = Opcode == ARM::LDRD || Opcode == ARM::t2LDRDi8;
|
|
|
|
bool EvenDeadKill = isLd ?
|
|
|
|
MI->getOperand(0).isDead() : MI->getOperand(0).isKill();
|
|
|
|
bool EvenUndef = MI->getOperand(0).isUndef();
|
|
|
|
bool OddDeadKill = isLd ?
|
|
|
|
MI->getOperand(1).isDead() : MI->getOperand(1).isKill();
|
|
|
|
bool OddUndef = MI->getOperand(1).isUndef();
|
|
|
|
bool BaseKill = BaseOp.isKill();
|
|
|
|
bool BaseUndef = BaseOp.isUndef();
|
2017-08-29 03:03:45 +08:00
|
|
|
assert((isT2 || MI->getOperand(3).getReg() == ARM::NoRegister) &&
|
|
|
|
"register offset not handled below");
|
2016-07-09 04:21:17 +08:00
|
|
|
int OffImm = getMemoryOpOffset(*MI);
|
2015-06-25 04:03:27 +08:00
|
|
|
unsigned PredReg = 0;
|
2016-02-23 10:46:52 +08:00
|
|
|
ARMCC::CondCodes Pred = getInstrPredicate(*MI, PredReg);
|
2015-06-25 04:03:27 +08:00
|
|
|
|
|
|
|
if (OddRegNum > EvenRegNum && OffImm == 0) {
|
|
|
|
// Ascending register numbers and no offset. It's safe to change it to a
|
|
|
|
// ldm or stm.
|
|
|
|
unsigned NewOpc = (isLd)
|
|
|
|
? (isT2 ? ARM::t2LDMIA : ARM::LDMIA)
|
|
|
|
: (isT2 ? ARM::t2STMIA : ARM::STMIA);
|
|
|
|
if (isLd) {
|
|
|
|
BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(NewOpc))
|
|
|
|
.addReg(BaseReg, getKillRegState(BaseKill))
|
|
|
|
.addImm(Pred).addReg(PredReg)
|
|
|
|
.addReg(EvenReg, getDefRegState(isLd) | getDeadRegState(EvenDeadKill))
|
|
|
|
.addReg(OddReg, getDefRegState(isLd) | getDeadRegState(OddDeadKill));
|
|
|
|
++NumLDRD2LDM;
|
|
|
|
} else {
|
|
|
|
BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(NewOpc))
|
|
|
|
.addReg(BaseReg, getKillRegState(BaseKill))
|
|
|
|
.addImm(Pred).addReg(PredReg)
|
|
|
|
.addReg(EvenReg,
|
|
|
|
getKillRegState(EvenDeadKill) | getUndefRegState(EvenUndef))
|
|
|
|
.addReg(OddReg,
|
|
|
|
getKillRegState(OddDeadKill) | getUndefRegState(OddUndef));
|
|
|
|
++NumSTRD2STM;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Split into two instructions.
|
|
|
|
unsigned NewOpc = (isLd)
|
|
|
|
? (isT2 ? (OffImm < 0 ? ARM::t2LDRi8 : ARM::t2LDRi12) : ARM::LDRi12)
|
|
|
|
: (isT2 ? (OffImm < 0 ? ARM::t2STRi8 : ARM::t2STRi12) : ARM::STRi12);
|
|
|
|
// Be extra careful for thumb2. t2LDRi8 can't reference a zero offset,
|
|
|
|
// so adjust and use t2LDRi12 here for that.
|
|
|
|
unsigned NewOpc2 = (isLd)
|
|
|
|
? (isT2 ? (OffImm+4 < 0 ? ARM::t2LDRi8 : ARM::t2LDRi12) : ARM::LDRi12)
|
|
|
|
: (isT2 ? (OffImm+4 < 0 ? ARM::t2STRi8 : ARM::t2STRi12) : ARM::STRi12);
|
2017-08-29 03:03:45 +08:00
|
|
|
// If this is a load, make sure the first load does not clobber the base
|
|
|
|
// register before the second load reads it.
|
|
|
|
if (isLd && TRI->regsOverlap(EvenReg, BaseReg)) {
|
2015-06-25 04:03:27 +08:00
|
|
|
assert(!TRI->regsOverlap(OddReg, BaseReg));
|
2017-08-29 03:03:45 +08:00
|
|
|
InsertLDR_STR(MBB, MBBI, OffImm + 4, isLd, NewOpc2, OddReg, OddDeadKill,
|
|
|
|
false, BaseReg, false, BaseUndef, Pred, PredReg, TII);
|
|
|
|
InsertLDR_STR(MBB, MBBI, OffImm, isLd, NewOpc, EvenReg, EvenDeadKill,
|
|
|
|
false, BaseReg, BaseKill, BaseUndef, Pred, PredReg, TII);
|
2009-06-15 16:28:29 +08:00
|
|
|
} else {
|
2015-06-25 04:03:27 +08:00
|
|
|
if (OddReg == EvenReg && EvenDeadKill) {
|
|
|
|
// If the two source operands are the same, the kill marker is
|
|
|
|
// probably on the first one. e.g.
|
2017-12-07 18:40:31 +08:00
|
|
|
// t2STRDi8 killed %r5, %r5, killed %r9, 0, 14, %reg0
|
2015-06-25 04:03:27 +08:00
|
|
|
EvenDeadKill = false;
|
|
|
|
OddDeadKill = true;
|
2009-06-15 16:28:29 +08:00
|
|
|
}
|
2015-06-25 04:03:27 +08:00
|
|
|
// Never kill the base register in the first instruction.
|
|
|
|
if (EvenReg == BaseReg)
|
|
|
|
EvenDeadKill = false;
|
2017-08-29 03:03:45 +08:00
|
|
|
InsertLDR_STR(MBB, MBBI, OffImm, isLd, NewOpc, EvenReg, EvenDeadKill,
|
|
|
|
EvenUndef, BaseReg, false, BaseUndef, Pred, PredReg, TII);
|
|
|
|
InsertLDR_STR(MBB, MBBI, OffImm + 4, isLd, NewOpc2, OddReg, OddDeadKill,
|
|
|
|
OddUndef, BaseReg, BaseKill, BaseUndef, Pred, PredReg, TII);
|
2009-06-15 16:28:29 +08:00
|
|
|
}
|
2015-06-25 04:03:27 +08:00
|
|
|
if (isLd)
|
|
|
|
++NumLDRD2LDR;
|
|
|
|
else
|
|
|
|
++NumSTRD2STR;
|
2009-06-15 16:28:29 +08:00
|
|
|
}
|
2015-06-25 04:03:27 +08:00
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
MBBI = MBB.erase(MBBI);
|
2015-06-25 04:03:27 +08:00
|
|
|
return true;
|
2009-06-15 16:28:29 +08:00
|
|
|
}
|
|
|
|
|
2015-06-02 05:26:23 +08:00
|
|
|
/// An optimization pass to turn multiple LDR / STR ops of the same base and
|
|
|
|
/// incrementing offset into LDM / STM ops.
|
2007-01-19 15:51:42 +08:00
|
|
|
bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) {
|
|
|
|
MemOpQueue MemOps;
|
|
|
|
unsigned CurrBase = 0;
|
2015-05-19 04:27:55 +08:00
|
|
|
unsigned CurrOpc = ~0u;
|
2007-05-15 09:29:07 +08:00
|
|
|
ARMCC::CondCodes CurrPred = ARMCC::AL;
|
2007-01-19 15:51:42 +08:00
|
|
|
unsigned Position = 0;
|
2015-07-11 02:08:49 +08:00
|
|
|
assert(Candidates.size() == 0);
|
2015-07-21 08:19:01 +08:00
|
|
|
assert(MergeBaseCandidates.size() == 0);
|
2015-07-11 02:08:49 +08:00
|
|
|
LiveRegsValid = false;
|
2007-03-07 02:02:41 +08:00
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
for (MachineBasicBlock::iterator I = MBB.end(), MBBI; I != MBB.begin();
|
|
|
|
I = MBBI) {
|
|
|
|
// The instruction in front of the iterator is the one we look at.
|
|
|
|
MBBI = std::prev(I);
|
2009-06-15 16:28:29 +08:00
|
|
|
if (FixInvalidRegPairOp(MBB, MBBI))
|
|
|
|
continue;
|
2015-07-11 02:08:49 +08:00
|
|
|
++Position;
|
2009-06-15 16:28:29 +08:00
|
|
|
|
2015-11-21 10:09:49 +08:00
|
|
|
if (isMemoryOp(*MBBI)) {
|
2015-05-19 04:27:55 +08:00
|
|
|
unsigned Opcode = MBBI->getOpcode();
|
2010-06-22 05:21:14 +08:00
|
|
|
const MachineOperand &MO = MBBI->getOperand(0);
|
|
|
|
unsigned Reg = MO.getReg();
|
2015-07-11 02:08:49 +08:00
|
|
|
unsigned Base = getLoadStoreBaseOp(*MBBI).getReg();
|
2007-07-05 15:18:20 +08:00
|
|
|
unsigned PredReg = 0;
|
2016-02-23 10:46:52 +08:00
|
|
|
ARMCC::CondCodes Pred = getInstrPredicate(*MBBI, PredReg);
|
2016-07-09 04:21:17 +08:00
|
|
|
int Offset = getMemoryOpOffset(*MBBI);
|
2015-07-11 02:08:49 +08:00
|
|
|
if (CurrBase == 0) {
|
2007-01-19 15:51:42 +08:00
|
|
|
// Start of a new chain.
|
|
|
|
CurrBase = Base;
|
|
|
|
CurrOpc = Opcode;
|
2007-05-15 09:29:07 +08:00
|
|
|
CurrPred = Pred;
|
2016-07-09 04:21:17 +08:00
|
|
|
MemOps.push_back(MemOpQueueEntry(*MBBI, Offset, Position));
|
2015-07-11 02:08:49 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// Note: No need to match PredReg in the next if.
|
|
|
|
if (CurrOpc == Opcode && CurrBase == Base && CurrPred == Pred) {
|
|
|
|
// Watch out for:
|
|
|
|
// r4 := ldr [r0, #8]
|
|
|
|
// r4 := ldr [r0, #4]
|
|
|
|
// or
|
|
|
|
// r0 := ldr [r0]
|
|
|
|
// If a load overrides the base register or a register loaded by
|
|
|
|
// another load in our chain, we cannot take this instruction.
|
|
|
|
bool Overlap = false;
|
|
|
|
if (isLoadSingle(Opcode)) {
|
|
|
|
Overlap = (Base == Reg);
|
|
|
|
if (!Overlap) {
|
|
|
|
for (const MemOpQueueEntry &E : MemOps) {
|
|
|
|
if (TRI->regsOverlap(Reg, E.MI->getOperand(0).getReg())) {
|
|
|
|
Overlap = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
if (!Overlap) {
|
|
|
|
// Check offset and sort memory operation into the current chain.
|
2007-01-19 15:51:42 +08:00
|
|
|
if (Offset > MemOps.back().Offset) {
|
2016-07-09 04:21:17 +08:00
|
|
|
MemOps.push_back(MemOpQueueEntry(*MBBI, Offset, Position));
|
2015-07-11 02:08:49 +08:00
|
|
|
continue;
|
2007-01-19 15:51:42 +08:00
|
|
|
} else {
|
2015-07-11 02:08:49 +08:00
|
|
|
MemOpQueue::iterator MI, ME;
|
|
|
|
for (MI = MemOps.begin(), ME = MemOps.end(); MI != ME; ++MI) {
|
|
|
|
if (Offset < MI->Offset) {
|
|
|
|
// Found a place to insert.
|
2007-01-19 15:51:42 +08:00
|
|
|
break;
|
2015-07-11 02:08:49 +08:00
|
|
|
}
|
|
|
|
if (Offset == MI->Offset) {
|
|
|
|
// Collision, abort.
|
|
|
|
MI = ME;
|
2007-01-19 15:51:42 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2015-07-11 02:08:49 +08:00
|
|
|
if (MI != MemOps.end()) {
|
2016-07-09 04:21:17 +08:00
|
|
|
MemOps.insert(MI, MemOpQueueEntry(*MBBI, Offset, Position));
|
2015-07-11 02:08:49 +08:00
|
|
|
continue;
|
|
|
|
}
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
// Don't advance the iterator; The op will start a new chain next.
|
|
|
|
MBBI = I;
|
|
|
|
--Position;
|
|
|
|
// Fallthrough to look into existing chain.
|
2018-05-09 10:42:00 +08:00
|
|
|
} else if (MBBI->isDebugInstr()) {
|
2015-07-11 02:08:49 +08:00
|
|
|
continue;
|
2015-07-21 08:19:01 +08:00
|
|
|
} else if (MBBI->getOpcode() == ARM::t2LDRDi8 ||
|
|
|
|
MBBI->getOpcode() == ARM::t2STRDi8) {
|
|
|
|
// ARMPreAllocLoadStoreOpt has already formed some LDRD/STRD instructions
|
|
|
|
// remember them because we may still be able to merge add/sub into them.
|
2016-07-09 04:21:17 +08:00
|
|
|
MergeBaseCandidates.push_back(&*MBBI);
|
2015-07-21 08:19:01 +08:00
|
|
|
}
|
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
// If we are here then the chain is broken; Extract candidates for a merge.
|
|
|
|
if (MemOps.size() > 0) {
|
|
|
|
FormCandidates(MemOps);
|
|
|
|
// Reset for the next chain.
|
2007-01-19 15:51:42 +08:00
|
|
|
CurrBase = 0;
|
2015-05-19 04:27:55 +08:00
|
|
|
CurrOpc = ~0u;
|
2007-05-15 09:29:07 +08:00
|
|
|
CurrPred = ARMCC::AL;
|
2015-07-11 02:08:49 +08:00
|
|
|
MemOps.clear();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (MemOps.size() > 0)
|
|
|
|
FormCandidates(MemOps);
|
2007-01-19 15:51:42 +08:00
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
// Sort candidates so they get processed from end to begin of the basic
|
|
|
|
// block later; This is necessary for liveness calculation.
|
|
|
|
auto LessThan = [](const MergeCandidate* M0, const MergeCandidate *M1) {
|
|
|
|
return M0->InsertPos < M1->InsertPos;
|
|
|
|
};
|
llvm::sort(C.begin(), C.end(), ...) -> llvm::sort(C, ...)
Summary: The convenience wrapper in STLExtras is available since rL342102.
Reviewers: dblaikie, javed.absar, JDevlieghere, andreadb
Subscribers: MatzeB, sanjoy, arsenm, dschuff, mehdi_amini, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, javed.absar, gbedwell, jrtc27, mgrang, atanasyan, steven_wu, george.burgess.iv, dexonsmith, kristina, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D52573
llvm-svn: 343163
2018-09-27 10:13:45 +08:00
|
|
|
llvm::sort(Candidates, LessThan);
|
2015-07-11 02:08:49 +08:00
|
|
|
|
|
|
|
// Go through list of candidates and merge.
|
|
|
|
bool Changed = false;
|
|
|
|
for (const MergeCandidate *Candidate : Candidates) {
|
2015-07-21 08:18:59 +08:00
|
|
|
if (Candidate->CanMergeToLSMulti || Candidate->CanMergeToLSDouble) {
|
2015-07-11 02:08:49 +08:00
|
|
|
MachineInstr *Merged = MergeOpsUpdate(*Candidate);
|
|
|
|
// Merge preceding/trailing base inc/dec into the merged op.
|
|
|
|
if (Merged) {
|
|
|
|
Changed = true;
|
2015-07-21 08:18:59 +08:00
|
|
|
unsigned Opcode = Merged->getOpcode();
|
2015-07-21 08:19:01 +08:00
|
|
|
if (Opcode == ARM::t2STRDi8 || Opcode == ARM::t2LDRDi8)
|
|
|
|
MergeBaseUpdateLSDouble(*Merged);
|
|
|
|
else
|
2015-07-21 08:18:59 +08:00
|
|
|
MergeBaseUpdateLSMultiple(Merged);
|
2015-07-11 02:08:49 +08:00
|
|
|
} else {
|
|
|
|
for (MachineInstr *MI : Candidate->Instrs) {
|
|
|
|
if (MergeBaseUpdateLoadStore(MI))
|
|
|
|
Changed = true;
|
|
|
|
}
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
2015-07-11 02:08:49 +08:00
|
|
|
} else {
|
|
|
|
assert(Candidate->Instrs.size() == 1);
|
|
|
|
if (MergeBaseUpdateLoadStore(Candidate->Instrs.front()))
|
|
|
|
Changed = true;
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
}
|
2015-07-11 02:08:49 +08:00
|
|
|
Candidates.clear();
|
2015-07-21 08:19:01 +08:00
|
|
|
// Try to fold add/sub into the LDRD/STRD formed by ARMPreAllocLoadStoreOpt.
|
|
|
|
for (MachineInstr *MI : MergeBaseCandidates)
|
|
|
|
MergeBaseUpdateLSDouble(*MI);
|
|
|
|
MergeBaseCandidates.clear();
|
2015-07-11 02:08:49 +08:00
|
|
|
|
|
|
|
return Changed;
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
|
2015-06-02 05:26:23 +08:00
|
|
|
/// If this is a exit BB, try merging the return ops ("bx lr" and "mov pc, lr")
|
|
|
|
/// into the preceding stack restore so it directly restore the value of LR
|
|
|
|
/// into pc.
|
2010-03-21 06:20:40 +08:00
|
|
|
/// ldmfd sp!, {..., lr}
|
2007-01-19 15:51:42 +08:00
|
|
|
/// bx lr
|
2010-03-21 06:20:40 +08:00
|
|
|
/// or
|
|
|
|
/// ldmfd sp!, {..., lr}
|
|
|
|
/// mov pc, lr
|
2007-01-19 15:51:42 +08:00
|
|
|
/// =>
|
2010-03-21 06:20:40 +08:00
|
|
|
/// ldmfd sp!, {..., pc}
|
2007-01-19 15:51:42 +08:00
|
|
|
bool ARMLoadStoreOpt::MergeReturnIntoLDM(MachineBasicBlock &MBB) {
|
2014-05-16 22:14:30 +08:00
|
|
|
// Thumb1 LDM doesn't allow high registers.
|
|
|
|
if (isThumb1) return false;
|
2007-01-19 15:51:42 +08:00
|
|
|
if (MBB.empty()) return false;
|
|
|
|
|
2011-01-14 06:47:43 +08:00
|
|
|
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
|
2016-08-26 21:00:39 +08:00
|
|
|
if (MBBI != MBB.begin() && MBBI != MBB.end() &&
|
2010-03-21 06:20:40 +08:00
|
|
|
(MBBI->getOpcode() == ARM::BX_RET ||
|
|
|
|
MBBI->getOpcode() == ARM::tBX_RET ||
|
|
|
|
MBBI->getOpcode() == ARM::MOVPCLR)) {
|
2015-12-22 03:25:03 +08:00
|
|
|
MachineBasicBlock::iterator PrevI = std::prev(MBBI);
|
2018-05-09 10:42:00 +08:00
|
|
|
// Ignore any debug instructions.
|
|
|
|
while (PrevI->isDebugInstr() && PrevI != MBB.begin())
|
2015-12-22 03:25:03 +08:00
|
|
|
--PrevI;
|
2016-07-09 04:21:17 +08:00
|
|
|
MachineInstr &PrevMI = *PrevI;
|
|
|
|
unsigned Opcode = PrevMI.getOpcode();
|
2010-11-16 09:16:36 +08:00
|
|
|
if (Opcode == ARM::LDMIA_UPD || Opcode == ARM::LDMDA_UPD ||
|
|
|
|
Opcode == ARM::LDMDB_UPD || Opcode == ARM::LDMIB_UPD ||
|
|
|
|
Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
|
2016-07-09 04:21:17 +08:00
|
|
|
MachineOperand &MO = PrevMI.getOperand(PrevMI.getNumOperands() - 1);
|
2009-08-04 09:43:45 +08:00
|
|
|
if (MO.getReg() != ARM::LR)
|
|
|
|
return false;
|
2010-11-16 09:16:36 +08:00
|
|
|
unsigned NewOpc = (isThumb2 ? ARM::t2LDMIA_RET : ARM::LDMIA_RET);
|
|
|
|
assert(((isThumb2 && Opcode == ARM::t2LDMIA_UPD) ||
|
|
|
|
Opcode == ARM::LDMIA_UPD) && "Unsupported multiple load-return!");
|
2016-07-09 04:21:17 +08:00
|
|
|
PrevMI.setDesc(TII->get(NewOpc));
|
2009-08-04 09:43:45 +08:00
|
|
|
MO.setReg(ARM::PC);
|
2016-07-09 04:21:17 +08:00
|
|
|
PrevMI.copyImplicitOps(*MBB.getParent(), *MBBI);
|
2009-08-04 09:43:45 +08:00
|
|
|
MBB.erase(MBBI);
|
2017-09-29 07:12:06 +08:00
|
|
|
// We now restore LR into PC so it is not live-out of the return block
|
|
|
|
// anymore: Clear the CSI Restored bit.
|
|
|
|
MachineFrameInfo &MFI = MBB.getParent()->getFrameInfo();
|
|
|
|
// CSI should be fixed after PrologEpilog Insertion
|
|
|
|
assert(MFI.isCalleeSavedInfoValid() && "CSI should be valid");
|
|
|
|
for (CalleeSavedInfo &Info : MFI.getCalleeSavedInfo()) {
|
|
|
|
if (Info.getReg() == ARM::LR) {
|
|
|
|
Info.setRestored(false);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2009-08-04 09:43:45 +08:00
|
|
|
return true;
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2015-12-29 05:40:45 +08:00
|
|
|
bool ARMLoadStoreOpt::CombineMovBx(MachineBasicBlock &MBB) {
|
|
|
|
MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
|
|
|
|
if (MBBI == MBB.begin() || MBBI == MBB.end() ||
|
|
|
|
MBBI->getOpcode() != ARM::tBX_RET)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
MachineBasicBlock::iterator Prev = MBBI;
|
|
|
|
--Prev;
|
|
|
|
if (Prev->getOpcode() != ARM::tMOVr || !Prev->definesRegister(ARM::LR))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (auto Use : Prev->uses())
|
|
|
|
if (Use.isKill()) {
|
2017-08-29 04:20:47 +08:00
|
|
|
assert(STI->hasV4TOps());
|
2017-01-13 17:37:56 +08:00
|
|
|
BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(ARM::tBX))
|
|
|
|
.addReg(Use.getReg(), RegState::Kill)
|
|
|
|
.add(predOps(ARMCC::AL))
|
2016-02-28 04:01:33 +08:00
|
|
|
.copyImplicitOps(*MBBI);
|
2015-12-29 05:40:45 +08:00
|
|
|
MBB.erase(MBBI);
|
|
|
|
MBB.erase(Prev);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
llvm_unreachable("tMOVr doesn't kill a reg before tBX_RET?");
|
|
|
|
}
|
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
bool ARMLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
|
2017-12-16 06:22:58 +08:00
|
|
|
if (skipFunction(Fn.getFunction()))
|
2016-04-26 06:01:04 +08:00
|
|
|
return false;
|
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
MF = &Fn;
|
2015-01-29 08:19:33 +08:00
|
|
|
STI = &static_cast<const ARMSubtarget &>(Fn.getSubtarget());
|
|
|
|
TL = STI->getTargetLowering();
|
2007-03-08 04:30:36 +08:00
|
|
|
AFI = Fn.getInfo<ARMFunctionInfo>();
|
2015-01-29 08:19:33 +08:00
|
|
|
TII = STI->getInstrInfo();
|
|
|
|
TRI = STI->getRegisterInfo();
|
2015-08-08 01:02:29 +08:00
|
|
|
|
2015-07-11 02:08:49 +08:00
|
|
|
RegClassInfoValid = false;
|
2009-07-10 07:11:34 +08:00
|
|
|
isThumb2 = AFI->isThumb2Function();
|
2014-05-16 22:11:38 +08:00
|
|
|
isThumb1 = AFI->isThumbFunction() && !isThumb2;
|
|
|
|
|
2007-01-19 15:51:42 +08:00
|
|
|
bool Modified = false;
|
|
|
|
for (MachineFunction::iterator MFI = Fn.begin(), E = Fn.end(); MFI != E;
|
|
|
|
++MFI) {
|
|
|
|
MachineBasicBlock &MBB = *MFI;
|
|
|
|
Modified |= LoadStoreMultipleOpti(MBB);
|
2015-01-29 08:19:33 +08:00
|
|
|
if (STI->hasV5TOps())
|
2011-01-07 03:24:41 +08:00
|
|
|
Modified |= MergeReturnIntoLDM(MBB);
|
2015-12-29 05:40:45 +08:00
|
|
|
if (isThumb1)
|
|
|
|
Modified |= CombineMovBx(MBB);
|
2007-01-19 15:51:42 +08:00
|
|
|
}
|
2007-03-07 02:02:41 +08:00
|
|
|
|
2015-07-21 08:18:59 +08:00
|
|
|
Allocator.DestroyAll();
|
2007-01-19 15:51:42 +08:00
|
|
|
return Modified;
|
|
|
|
}
|
2009-06-13 17:12:55 +08:00
|
|
|
|
2015-09-16 21:11:31 +08:00
|
|
|
#define ARM_PREALLOC_LOAD_STORE_OPT_NAME \
|
|
|
|
"ARM pre- register allocation load / store optimization pass"
|
|
|
|
|
2009-06-13 17:12:55 +08:00
|
|
|
namespace {
|
2017-09-21 05:35:51 +08:00
|
|
|
|
2015-06-02 05:26:23 +08:00
|
|
|
/// Pre- register allocation pass that move load / stores from consecutive
|
|
|
|
/// locations close to make it more likely they will be combined later.
|
2009-10-25 14:33:48 +08:00
|
|
|
struct ARMPreAllocLoadStoreOpt : public MachineFunctionPass{
|
2009-06-13 17:12:55 +08:00
|
|
|
static char ID;
|
|
|
|
|
2017-03-17 08:34:26 +08:00
|
|
|
AliasAnalysis *AA;
|
2012-10-09 00:38:25 +08:00
|
|
|
const DataLayout *TD;
|
2009-06-13 17:12:55 +08:00
|
|
|
const TargetInstrInfo *TII;
|
|
|
|
const TargetRegisterInfo *TRI;
|
2009-06-15 16:28:29 +08:00
|
|
|
const ARMSubtarget *STI;
|
2009-06-13 17:12:55 +08:00
|
|
|
MachineRegisterInfo *MRI;
|
2009-09-26 05:44:53 +08:00
|
|
|
MachineFunction *MF;
|
2009-06-13 17:12:55 +08:00
|
|
|
|
2017-09-21 05:35:51 +08:00
|
|
|
ARMPreAllocLoadStoreOpt() : MachineFunctionPass(ID) {}
|
|
|
|
|
2014-03-10 10:09:33 +08:00
|
|
|
bool runOnMachineFunction(MachineFunction &Fn) override;
|
2009-06-13 17:12:55 +08:00
|
|
|
|
2016-10-01 10:56:57 +08:00
|
|
|
StringRef getPassName() const override {
|
2015-09-16 21:11:31 +08:00
|
|
|
return ARM_PREALLOC_LOAD_STORE_OPT_NAME;
|
2009-06-13 17:12:55 +08:00
|
|
|
}
|
|
|
|
|
2017-09-21 05:35:51 +08:00
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
2017-03-17 08:34:26 +08:00
|
|
|
AU.addRequired<AAResultsWrapperPass>();
|
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
|
|
|
}
|
|
|
|
|
2009-06-13 17:12:55 +08:00
|
|
|
private:
|
2009-06-16 04:54:56 +08:00
|
|
|
bool CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1, DebugLoc &dl,
|
|
|
|
unsigned &NewOpc, unsigned &EvenReg,
|
|
|
|
unsigned &OddReg, unsigned &BaseReg,
|
2010-10-28 07:12:14 +08:00
|
|
|
int &Offset,
|
2009-09-26 05:44:53 +08:00
|
|
|
unsigned &PredReg, ARMCC::CondCodes &Pred,
|
|
|
|
bool &isT2);
|
2009-06-13 17:12:55 +08:00
|
|
|
bool RescheduleOps(MachineBasicBlock *MBB,
|
2013-07-04 09:31:24 +08:00
|
|
|
SmallVectorImpl<MachineInstr *> &Ops,
|
2009-06-13 17:12:55 +08:00
|
|
|
unsigned Base, bool isLd,
|
|
|
|
DenseMap<MachineInstr*, unsigned> &MI2LocMap);
|
|
|
|
bool RescheduleLoadStoreInstrs(MachineBasicBlock *MBB);
|
|
|
|
};
|
2017-09-21 05:35:51 +08:00
|
|
|
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
|
|
|
char ARMPreAllocLoadStoreOpt::ID = 0;
|
2009-06-13 17:12:55 +08:00
|
|
|
|
2016-07-16 10:24:10 +08:00
|
|
|
INITIALIZE_PASS(ARMPreAllocLoadStoreOpt, "arm-prera-ldst-opt",
|
2015-09-16 21:11:31 +08:00
|
|
|
ARM_PREALLOC_LOAD_STORE_OPT_NAME, false, false)
|
|
|
|
|
2019-02-11 17:37:42 +08:00
|
|
|
// Limit the number of instructions to be rescheduled.
|
|
|
|
// FIXME: tune this limit, and/or come up with some better heuristics.
|
|
|
|
static cl::opt<unsigned> InstReorderLimit("arm-prera-ldst-opt-reorder-limit",
|
|
|
|
cl::init(8), cl::Hidden);
|
|
|
|
|
2009-06-13 17:12:55 +08:00
|
|
|
bool ARMPreAllocLoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
|
2017-12-16 06:22:58 +08:00
|
|
|
if (AssumeMisalignedLoadStores || skipFunction(Fn.getFunction()))
|
2016-03-03 03:20:00 +08:00
|
|
|
return false;
|
|
|
|
|
2015-07-16 14:11:10 +08:00
|
|
|
TD = &Fn.getDataLayout();
|
2014-10-14 16:44:19 +08:00
|
|
|
STI = &static_cast<const ARMSubtarget &>(Fn.getSubtarget());
|
2015-01-29 08:19:33 +08:00
|
|
|
TII = STI->getInstrInfo();
|
|
|
|
TRI = STI->getRegisterInfo();
|
2009-06-13 17:12:55 +08:00
|
|
|
MRI = &Fn.getRegInfo();
|
2009-09-26 05:44:53 +08:00
|
|
|
MF = &Fn;
|
2017-03-17 08:34:26 +08:00
|
|
|
AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
|
2009-06-13 17:12:55 +08:00
|
|
|
|
|
|
|
bool Modified = false;
|
2015-10-20 07:25:57 +08:00
|
|
|
for (MachineBasicBlock &MFI : Fn)
|
|
|
|
Modified |= RescheduleLoadStoreInstrs(&MFI);
|
2009-06-13 17:12:55 +08:00
|
|
|
|
|
|
|
return Modified;
|
|
|
|
}
|
|
|
|
|
2009-06-20 07:17:27 +08:00
|
|
|
static bool IsSafeAndProfitableToMove(bool isLd, unsigned Base,
|
|
|
|
MachineBasicBlock::iterator I,
|
|
|
|
MachineBasicBlock::iterator E,
|
2014-08-21 13:55:13 +08:00
|
|
|
SmallPtrSetImpl<MachineInstr*> &MemOps,
|
2009-06-20 07:17:27 +08:00
|
|
|
SmallSet<unsigned, 4> &MemRegs,
|
2017-03-17 08:34:26 +08:00
|
|
|
const TargetRegisterInfo *TRI,
|
|
|
|
AliasAnalysis *AA) {
|
2009-06-13 17:12:55 +08:00
|
|
|
// Are there stores / loads / calls between them?
|
2009-06-20 07:17:27 +08:00
|
|
|
SmallSet<unsigned, 4> AddedRegPressure;
|
2009-06-13 17:12:55 +08:00
|
|
|
while (++I != E) {
|
2018-05-09 10:42:00 +08:00
|
|
|
if (I->isDebugInstr() || MemOps.count(&*I))
|
2009-06-20 07:17:27 +08:00
|
|
|
continue;
|
2011-12-07 15:15:52 +08:00
|
|
|
if (I->isCall() || I->isTerminator() || I->hasUnmodeledSideEffects())
|
2009-06-13 17:12:55 +08:00
|
|
|
return false;
|
2017-03-17 08:34:26 +08:00
|
|
|
if (I->mayStore() || (!isLd && I->mayLoad()))
|
|
|
|
for (MachineInstr *MemOp : MemOps)
|
|
|
|
if (I->mayAlias(AA, *MemOp, /*UseTBAA*/ false))
|
|
|
|
return false;
|
2009-06-13 17:12:55 +08:00
|
|
|
for (unsigned j = 0, NumOps = I->getNumOperands(); j != NumOps; ++j) {
|
|
|
|
MachineOperand &MO = I->getOperand(j);
|
2009-06-20 07:17:27 +08:00
|
|
|
if (!MO.isReg())
|
|
|
|
continue;
|
|
|
|
unsigned Reg = MO.getReg();
|
|
|
|
if (MO.isDef() && TRI->regsOverlap(Reg, Base))
|
2009-06-13 17:12:55 +08:00
|
|
|
return false;
|
2009-06-20 07:17:27 +08:00
|
|
|
if (Reg != Base && !MemRegs.count(Reg))
|
|
|
|
AddedRegPressure.insert(Reg);
|
2009-06-13 17:12:55 +08:00
|
|
|
}
|
|
|
|
}
|
2009-06-20 07:17:27 +08:00
|
|
|
|
|
|
|
// Estimate register pressure increase due to the transformation.
|
|
|
|
if (MemRegs.size() <= 4)
|
|
|
|
// Ok if we are moving small number of instructions.
|
|
|
|
return true;
|
|
|
|
return AddedRegPressure.size() <= MemRegs.size() * 2;
|
2009-06-13 17:12:55 +08:00
|
|
|
}
|
|
|
|
|
2009-06-16 04:54:56 +08:00
|
|
|
bool
|
|
|
|
ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1,
|
2015-06-04 00:30:24 +08:00
|
|
|
DebugLoc &dl, unsigned &NewOpc,
|
|
|
|
unsigned &FirstReg,
|
|
|
|
unsigned &SecondReg,
|
|
|
|
unsigned &BaseReg, int &Offset,
|
|
|
|
unsigned &PredReg,
|
2009-09-26 05:44:53 +08:00
|
|
|
ARMCC::CondCodes &Pred,
|
|
|
|
bool &isT2) {
|
2009-09-29 15:07:30 +08:00
|
|
|
// Make sure we're allowed to generate LDRD/STRD.
|
|
|
|
if (!STI->hasV5TEOps())
|
|
|
|
return false;
|
|
|
|
|
2009-11-09 08:11:35 +08:00
|
|
|
// FIXME: VLDRS / VSTRS -> VLDRD / VSTRD
|
2009-09-26 05:44:53 +08:00
|
|
|
unsigned Scale = 1;
|
2009-06-16 04:54:56 +08:00
|
|
|
unsigned Opcode = Op0->getOpcode();
|
2014-05-16 22:08:46 +08:00
|
|
|
if (Opcode == ARM::LDRi12) {
|
2009-06-16 04:54:56 +08:00
|
|
|
NewOpc = ARM::LDRD;
|
2014-05-16 22:08:46 +08:00
|
|
|
} else if (Opcode == ARM::STRi12) {
|
2009-06-16 04:54:56 +08:00
|
|
|
NewOpc = ARM::STRD;
|
2014-05-16 22:08:46 +08:00
|
|
|
} else if (Opcode == ARM::t2LDRi8 || Opcode == ARM::t2LDRi12) {
|
2009-09-26 05:44:53 +08:00
|
|
|
NewOpc = ARM::t2LDRDi8;
|
|
|
|
Scale = 4;
|
|
|
|
isT2 = true;
|
|
|
|
} else if (Opcode == ARM::t2STRi8 || Opcode == ARM::t2STRi12) {
|
|
|
|
NewOpc = ARM::t2STRDi8;
|
|
|
|
Scale = 4;
|
|
|
|
isT2 = true;
|
2014-05-16 22:08:46 +08:00
|
|
|
} else {
|
2009-09-26 05:44:53 +08:00
|
|
|
return false;
|
2014-05-16 22:08:46 +08:00
|
|
|
}
|
2009-09-26 05:44:53 +08:00
|
|
|
|
2010-10-27 03:34:41 +08:00
|
|
|
// Make sure the base address satisfies i64 ld / st alignment requirement.
|
2013-06-21 06:51:44 +08:00
|
|
|
// At the moment, we ignore the memoryoperand's value.
|
|
|
|
// If we want to use AliasAnalysis, we should check it accordingly.
|
2009-06-16 04:54:56 +08:00
|
|
|
if (!Op0->hasOneMemOperand() ||
|
2019-02-26 12:30:33 +08:00
|
|
|
(*Op0->memoperands_begin())->isVolatile() ||
|
|
|
|
(*Op0->memoperands_begin())->isAtomic())
|
2009-06-15 16:28:29 +08:00
|
|
|
return false;
|
|
|
|
|
2009-09-26 04:36:54 +08:00
|
|
|
unsigned Align = (*Op0->memoperands_begin())->getAlignment();
|
2017-12-16 06:22:58 +08:00
|
|
|
const Function &Func = MF->getFunction();
|
2009-06-15 16:28:29 +08:00
|
|
|
unsigned ReqAlign = STI->hasV6Ops()
|
2017-12-16 06:22:58 +08:00
|
|
|
? TD->getABITypeAlignment(Type::getInt64Ty(Func.getContext()))
|
2009-09-26 05:44:53 +08:00
|
|
|
: 8; // Pre-v6 need 8-byte align
|
2009-06-16 04:54:56 +08:00
|
|
|
if (Align < ReqAlign)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Then make sure the immediate offset fits.
|
2016-07-09 04:21:17 +08:00
|
|
|
int OffImm = getMemoryOpOffset(*Op0);
|
2009-09-27 17:46:04 +08:00
|
|
|
if (isT2) {
|
2011-03-16 02:41:52 +08:00
|
|
|
int Limit = (1 << 8) * Scale;
|
|
|
|
if (OffImm >= Limit || (OffImm <= -Limit) || (OffImm & (Scale-1)))
|
|
|
|
return false;
|
2009-09-26 05:44:53 +08:00
|
|
|
Offset = OffImm;
|
2009-09-27 17:46:04 +08:00
|
|
|
} else {
|
|
|
|
ARM_AM::AddrOpc AddSub = ARM_AM::add;
|
|
|
|
if (OffImm < 0) {
|
|
|
|
AddSub = ARM_AM::sub;
|
|
|
|
OffImm = - OffImm;
|
|
|
|
}
|
|
|
|
int Limit = (1 << 8) * Scale;
|
|
|
|
if (OffImm >= Limit || (OffImm & (Scale-1)))
|
|
|
|
return false;
|
2009-09-26 05:44:53 +08:00
|
|
|
Offset = ARM_AM::getAM3Opc(AddSub, OffImm);
|
2009-09-27 17:46:04 +08:00
|
|
|
}
|
2015-06-04 00:30:24 +08:00
|
|
|
FirstReg = Op0->getOperand(0).getReg();
|
|
|
|
SecondReg = Op1->getOperand(0).getReg();
|
|
|
|
if (FirstReg == SecondReg)
|
2009-06-16 04:54:56 +08:00
|
|
|
return false;
|
|
|
|
BaseReg = Op0->getOperand(1).getReg();
|
2016-02-23 10:46:52 +08:00
|
|
|
Pred = getInstrPredicate(*Op0, PredReg);
|
2009-06-16 04:54:56 +08:00
|
|
|
dl = Op0->getDebugLoc();
|
|
|
|
return true;
|
2009-06-15 16:28:29 +08:00
|
|
|
}
|
|
|
|
|
2009-06-13 17:12:55 +08:00
|
|
|
bool ARMPreAllocLoadStoreOpt::RescheduleOps(MachineBasicBlock *MBB,
|
2013-07-04 09:31:24 +08:00
|
|
|
SmallVectorImpl<MachineInstr *> &Ops,
|
2009-06-13 17:12:55 +08:00
|
|
|
unsigned Base, bool isLd,
|
|
|
|
DenseMap<MachineInstr*, unsigned> &MI2LocMap) {
|
|
|
|
bool RetVal = false;
|
|
|
|
|
|
|
|
// Sort by offset (in reverse order).
|
llvm::sort(C.begin(), C.end(), ...) -> llvm::sort(C, ...)
Summary: The convenience wrapper in STLExtras is available since rL342102.
Reviewers: dblaikie, javed.absar, JDevlieghere, andreadb
Subscribers: MatzeB, sanjoy, arsenm, dschuff, mehdi_amini, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, eraman, aheejin, kbarton, JDevlieghere, javed.absar, gbedwell, jrtc27, mgrang, atanasyan, steven_wu, george.burgess.iv, dexonsmith, kristina, jsji, llvm-commits
Differential Revision: https://reviews.llvm.org/D52573
llvm-svn: 343163
2018-09-27 10:13:45 +08:00
|
|
|
llvm::sort(Ops, [](const MachineInstr *LHS, const MachineInstr *RHS) {
|
|
|
|
int LOffset = getMemoryOpOffset(*LHS);
|
|
|
|
int ROffset = getMemoryOpOffset(*RHS);
|
|
|
|
assert(LHS == RHS || LOffset != ROffset);
|
|
|
|
return LOffset > ROffset;
|
|
|
|
});
|
2009-06-13 17:12:55 +08:00
|
|
|
|
|
|
|
// The loads / stores of the same base are in order. Scan them from first to
|
2010-06-04 08:15:00 +08:00
|
|
|
// last and check for the following:
|
2009-06-13 17:12:55 +08:00
|
|
|
// 1. Any def of base.
|
|
|
|
// 2. Any gaps.
|
|
|
|
while (Ops.size() > 1) {
|
|
|
|
unsigned FirstLoc = ~0U;
|
|
|
|
unsigned LastLoc = 0;
|
2014-04-25 13:30:21 +08:00
|
|
|
MachineInstr *FirstOp = nullptr;
|
|
|
|
MachineInstr *LastOp = nullptr;
|
2009-06-13 17:12:55 +08:00
|
|
|
int LastOffset = 0;
|
2009-06-18 10:04:01 +08:00
|
|
|
unsigned LastOpcode = 0;
|
2009-06-13 17:12:55 +08:00
|
|
|
unsigned LastBytes = 0;
|
|
|
|
unsigned NumMove = 0;
|
|
|
|
for (int i = Ops.size() - 1; i >= 0; --i) {
|
2017-03-03 05:39:39 +08:00
|
|
|
// Make sure each operation has the same kind.
|
2009-06-13 17:12:55 +08:00
|
|
|
MachineInstr *Op = Ops[i];
|
2012-01-11 11:56:08 +08:00
|
|
|
unsigned LSMOpcode
|
|
|
|
= getLoadStoreMultipleOpcode(Op->getOpcode(), ARM_AM::ia);
|
|
|
|
if (LastOpcode && LSMOpcode != LastOpcode)
|
2009-06-18 10:04:01 +08:00
|
|
|
break;
|
|
|
|
|
2017-03-03 05:39:39 +08:00
|
|
|
// Check that we have a continuous set of offsets.
|
2016-07-09 04:21:17 +08:00
|
|
|
int Offset = getMemoryOpOffset(*Op);
|
2009-06-13 17:12:55 +08:00
|
|
|
unsigned Bytes = getLSMultipleTransferSize(Op);
|
|
|
|
if (LastBytes) {
|
|
|
|
if (Bytes != LastBytes || Offset != (LastOffset + (int)Bytes))
|
|
|
|
break;
|
|
|
|
}
|
2017-03-03 05:39:39 +08:00
|
|
|
|
|
|
|
// Don't try to reschedule too many instructions.
|
2019-02-11 17:37:42 +08:00
|
|
|
if (NumMove == InstReorderLimit)
|
2017-03-03 05:39:39 +08:00
|
|
|
break;
|
|
|
|
|
|
|
|
// Found a mergable instruction; save information about it.
|
|
|
|
++NumMove;
|
2009-06-13 17:12:55 +08:00
|
|
|
LastOffset = Offset;
|
|
|
|
LastBytes = Bytes;
|
2012-01-11 11:56:08 +08:00
|
|
|
LastOpcode = LSMOpcode;
|
2017-03-03 05:39:39 +08:00
|
|
|
|
|
|
|
unsigned Loc = MI2LocMap[Op];
|
|
|
|
if (Loc <= FirstLoc) {
|
|
|
|
FirstLoc = Loc;
|
|
|
|
FirstOp = Op;
|
|
|
|
}
|
|
|
|
if (Loc >= LastLoc) {
|
|
|
|
LastLoc = Loc;
|
|
|
|
LastOp = Op;
|
|
|
|
}
|
2009-06-13 17:12:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (NumMove <= 1)
|
|
|
|
Ops.pop_back();
|
|
|
|
else {
|
2009-06-20 07:17:27 +08:00
|
|
|
SmallPtrSet<MachineInstr*, 4> MemOps;
|
|
|
|
SmallSet<unsigned, 4> MemRegs;
|
2017-03-02 06:56:20 +08:00
|
|
|
for (size_t i = Ops.size() - NumMove, e = Ops.size(); i != e; ++i) {
|
2009-06-20 07:17:27 +08:00
|
|
|
MemOps.insert(Ops[i]);
|
|
|
|
MemRegs.insert(Ops[i]->getOperand(0).getReg());
|
|
|
|
}
|
2009-06-13 17:12:55 +08:00
|
|
|
|
|
|
|
// Be conservative, if the instructions are too far apart, don't
|
|
|
|
// move them. We want to limit the increase of register pressure.
|
2009-06-20 07:17:27 +08:00
|
|
|
bool DoMove = (LastLoc - FirstLoc) <= NumMove*4; // FIXME: Tune this.
|
2009-06-13 17:12:55 +08:00
|
|
|
if (DoMove)
|
2009-06-20 07:17:27 +08:00
|
|
|
DoMove = IsSafeAndProfitableToMove(isLd, Base, FirstOp, LastOp,
|
2017-03-17 08:34:26 +08:00
|
|
|
MemOps, MemRegs, TRI, AA);
|
2009-06-13 17:12:55 +08:00
|
|
|
if (!DoMove) {
|
|
|
|
for (unsigned i = 0; i != NumMove; ++i)
|
|
|
|
Ops.pop_back();
|
|
|
|
} else {
|
|
|
|
// This is the new location for the loads / stores.
|
|
|
|
MachineBasicBlock::iterator InsertPos = isLd ? FirstOp : LastOp;
|
2016-07-09 04:21:17 +08:00
|
|
|
while (InsertPos != MBB->end() &&
|
2018-05-09 10:42:00 +08:00
|
|
|
(MemOps.count(&*InsertPos) || InsertPos->isDebugInstr()))
|
2009-06-13 17:12:55 +08:00
|
|
|
++InsertPos;
|
2009-06-15 16:28:29 +08:00
|
|
|
|
|
|
|
// If we are moving a pair of loads / stores, see if it makes sense
|
|
|
|
// to try to allocate a pair of registers that can form register pairs.
|
2009-06-16 04:54:56 +08:00
|
|
|
MachineInstr *Op0 = Ops.back();
|
|
|
|
MachineInstr *Op1 = Ops[Ops.size()-2];
|
2015-06-04 00:30:24 +08:00
|
|
|
unsigned FirstReg = 0, SecondReg = 0;
|
2010-10-28 07:12:14 +08:00
|
|
|
unsigned BaseReg = 0, PredReg = 0;
|
2009-06-16 04:54:56 +08:00
|
|
|
ARMCC::CondCodes Pred = ARMCC::AL;
|
2009-09-26 05:44:53 +08:00
|
|
|
bool isT2 = false;
|
2009-06-16 04:54:56 +08:00
|
|
|
unsigned NewOpc = 0;
|
2009-09-27 17:46:04 +08:00
|
|
|
int Offset = 0;
|
2009-06-16 04:54:56 +08:00
|
|
|
DebugLoc dl;
|
|
|
|
if (NumMove == 2 && CanFormLdStDWord(Op0, Op1, dl, NewOpc,
|
2015-06-04 00:30:24 +08:00
|
|
|
FirstReg, SecondReg, BaseReg,
|
2009-09-26 05:44:53 +08:00
|
|
|
Offset, PredReg, Pred, isT2)) {
|
2009-06-16 04:54:56 +08:00
|
|
|
Ops.pop_back();
|
|
|
|
Ops.pop_back();
|
2009-06-15 16:28:29 +08:00
|
|
|
|
2011-06-29 03:10:37 +08:00
|
|
|
const MCInstrDesc &MCID = TII->get(NewOpc);
|
2012-05-08 06:10:26 +08:00
|
|
|
const TargetRegisterClass *TRC = TII->getRegClass(MCID, 0, TRI, *MF);
|
2015-06-04 00:30:24 +08:00
|
|
|
MRI->constrainRegClass(FirstReg, TRC);
|
|
|
|
MRI->constrainRegClass(SecondReg, TRC);
|
2011-05-19 05:25:14 +08:00
|
|
|
|
2009-06-16 04:54:56 +08:00
|
|
|
// Form the pair instruction.
|
2009-06-18 10:04:01 +08:00
|
|
|
if (isLd) {
|
2011-06-29 03:10:37 +08:00
|
|
|
MachineInstrBuilder MIB = BuildMI(*MBB, InsertPos, dl, MCID)
|
2015-06-04 00:30:24 +08:00
|
|
|
.addReg(FirstReg, RegState::Define)
|
|
|
|
.addReg(SecondReg, RegState::Define)
|
2009-09-26 05:44:53 +08:00
|
|
|
.addReg(BaseReg);
|
2010-10-28 07:12:14 +08:00
|
|
|
// FIXME: We're converting from LDRi12 to an insn that still
|
2010-10-27 06:37:02 +08:00
|
|
|
// uses addrmode2, so we need an explicit offset reg. It should
|
2010-10-28 07:12:14 +08:00
|
|
|
// always by reg0 since we're transforming LDRi12s.
|
2009-09-26 05:44:53 +08:00
|
|
|
if (!isT2)
|
2010-10-27 06:37:02 +08:00
|
|
|
MIB.addReg(0);
|
2009-09-26 05:44:53 +08:00
|
|
|
MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
|
2018-08-17 05:30:05 +08:00
|
|
|
MIB.cloneMergedMemRefs({Op0, Op1});
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Formed " << *MIB << "\n");
|
2009-06-18 10:04:01 +08:00
|
|
|
++NumLDRDFormed;
|
|
|
|
} else {
|
2011-06-29 03:10:37 +08:00
|
|
|
MachineInstrBuilder MIB = BuildMI(*MBB, InsertPos, dl, MCID)
|
2015-06-04 00:30:24 +08:00
|
|
|
.addReg(FirstReg)
|
|
|
|
.addReg(SecondReg)
|
2009-09-26 05:44:53 +08:00
|
|
|
.addReg(BaseReg);
|
2010-10-28 07:12:14 +08:00
|
|
|
// FIXME: We're converting from LDRi12 to an insn that still
|
|
|
|
// uses addrmode2, so we need an explicit offset reg. It should
|
|
|
|
// always by reg0 since we're transforming STRi12s.
|
2009-09-26 05:44:53 +08:00
|
|
|
if (!isT2)
|
2010-10-28 07:12:14 +08:00
|
|
|
MIB.addReg(0);
|
2009-09-26 05:44:53 +08:00
|
|
|
MIB.addImm(Offset).addImm(Pred).addReg(PredReg);
|
2018-08-17 05:30:05 +08:00
|
|
|
MIB.cloneMergedMemRefs({Op0, Op1});
|
2018-05-14 20:53:11 +08:00
|
|
|
LLVM_DEBUG(dbgs() << "Formed " << *MIB << "\n");
|
2009-06-18 10:04:01 +08:00
|
|
|
++NumSTRDFormed;
|
|
|
|
}
|
|
|
|
MBB->erase(Op0);
|
|
|
|
MBB->erase(Op1);
|
2009-06-15 16:28:29 +08:00
|
|
|
|
2015-06-04 00:30:24 +08:00
|
|
|
if (!isT2) {
|
|
|
|
// Add register allocation hints to form register pairs.
|
|
|
|
MRI->setRegAllocationHint(FirstReg, ARMRI::RegPairEven, SecondReg);
|
|
|
|
MRI->setRegAllocationHint(SecondReg, ARMRI::RegPairOdd, FirstReg);
|
|
|
|
}
|
2009-06-16 04:54:56 +08:00
|
|
|
} else {
|
|
|
|
for (unsigned i = 0; i != NumMove; ++i) {
|
|
|
|
MachineInstr *Op = Ops.back();
|
|
|
|
Ops.pop_back();
|
|
|
|
MBB->splice(InsertPos, MBB, Op);
|
|
|
|
}
|
2009-06-13 17:12:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
NumLdStMoved += NumMove;
|
|
|
|
RetVal = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return RetVal;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
ARMPreAllocLoadStoreOpt::RescheduleLoadStoreInstrs(MachineBasicBlock *MBB) {
|
|
|
|
bool RetVal = false;
|
|
|
|
|
|
|
|
DenseMap<MachineInstr*, unsigned> MI2LocMap;
|
2019-02-11 16:47:59 +08:00
|
|
|
using MapIt = DenseMap<unsigned, SmallVector<MachineInstr *, 4>>::iterator;
|
|
|
|
using Base2InstMap = DenseMap<unsigned, SmallVector<MachineInstr *, 4>>;
|
|
|
|
using BaseVec = SmallVector<unsigned, 4>;
|
|
|
|
Base2InstMap Base2LdsMap;
|
|
|
|
Base2InstMap Base2StsMap;
|
|
|
|
BaseVec LdBases;
|
|
|
|
BaseVec StBases;
|
2009-06-13 17:12:55 +08:00
|
|
|
|
|
|
|
unsigned Loc = 0;
|
|
|
|
MachineBasicBlock::iterator MBBI = MBB->begin();
|
|
|
|
MachineBasicBlock::iterator E = MBB->end();
|
|
|
|
while (MBBI != E) {
|
|
|
|
for (; MBBI != E; ++MBBI) {
|
2016-07-09 04:21:17 +08:00
|
|
|
MachineInstr &MI = *MBBI;
|
|
|
|
if (MI.isCall() || MI.isTerminator()) {
|
2009-06-13 17:12:55 +08:00
|
|
|
// Stop at barriers.
|
|
|
|
++MBBI;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-05-09 10:42:00 +08:00
|
|
|
if (!MI.isDebugInstr())
|
2016-07-09 04:21:17 +08:00
|
|
|
MI2LocMap[&MI] = ++Loc;
|
2010-06-04 09:23:30 +08:00
|
|
|
|
2016-07-09 04:21:17 +08:00
|
|
|
if (!isMemoryOp(MI))
|
2009-06-13 17:12:55 +08:00
|
|
|
continue;
|
|
|
|
unsigned PredReg = 0;
|
2016-07-09 04:21:17 +08:00
|
|
|
if (getInstrPredicate(MI, PredReg) != ARMCC::AL)
|
2009-06-13 17:12:55 +08:00
|
|
|
continue;
|
|
|
|
|
2016-07-09 04:21:17 +08:00
|
|
|
int Opc = MI.getOpcode();
|
2015-07-11 02:08:49 +08:00
|
|
|
bool isLd = isLoadSingle(Opc);
|
2016-07-09 04:21:17 +08:00
|
|
|
unsigned Base = MI.getOperand(1).getReg();
|
2009-06-13 17:12:55 +08:00
|
|
|
int Offset = getMemoryOpOffset(MI);
|
|
|
|
bool StopHere = false;
|
2019-02-11 16:47:59 +08:00
|
|
|
auto FindBases = [&] (Base2InstMap &Base2Ops, BaseVec &Bases) {
|
|
|
|
MapIt BI = Base2Ops.find(Base);
|
|
|
|
if (BI == Base2Ops.end()) {
|
|
|
|
Base2Ops[Base].push_back(&MI);
|
|
|
|
Bases.push_back(Base);
|
|
|
|
return;
|
2009-06-13 17:12:55 +08:00
|
|
|
}
|
2019-02-11 16:47:59 +08:00
|
|
|
for (unsigned i = 0, e = BI->second.size(); i != e; ++i) {
|
|
|
|
if (Offset == getMemoryOpOffset(*BI->second[i])) {
|
|
|
|
StopHere = true;
|
|
|
|
break;
|
2009-06-13 17:12:55 +08:00
|
|
|
}
|
|
|
|
}
|
2019-02-11 16:47:59 +08:00
|
|
|
if (!StopHere)
|
|
|
|
BI->second.push_back(&MI);
|
|
|
|
};
|
|
|
|
|
|
|
|
if (isLd)
|
|
|
|
FindBases(Base2LdsMap, LdBases);
|
|
|
|
else
|
|
|
|
FindBases(Base2StsMap, StBases);
|
2009-06-13 17:12:55 +08:00
|
|
|
|
|
|
|
if (StopHere) {
|
2009-06-20 07:17:27 +08:00
|
|
|
// Found a duplicate (a base+offset combination that's seen earlier).
|
|
|
|
// Backtrack.
|
2009-06-13 17:12:55 +08:00
|
|
|
--Loc;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Re-schedule loads.
|
|
|
|
for (unsigned i = 0, e = LdBases.size(); i != e; ++i) {
|
|
|
|
unsigned Base = LdBases[i];
|
2013-07-04 09:31:24 +08:00
|
|
|
SmallVectorImpl<MachineInstr *> &Lds = Base2LdsMap[Base];
|
2009-06-13 17:12:55 +08:00
|
|
|
if (Lds.size() > 1)
|
|
|
|
RetVal |= RescheduleOps(MBB, Lds, Base, true, MI2LocMap);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Re-schedule stores.
|
|
|
|
for (unsigned i = 0, e = StBases.size(); i != e; ++i) {
|
|
|
|
unsigned Base = StBases[i];
|
2013-07-04 09:31:24 +08:00
|
|
|
SmallVectorImpl<MachineInstr *> &Sts = Base2StsMap[Base];
|
2009-06-13 17:12:55 +08:00
|
|
|
if (Sts.size() > 1)
|
|
|
|
RetVal |= RescheduleOps(MBB, Sts, Base, false, MI2LocMap);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (MBBI != E) {
|
|
|
|
Base2LdsMap.clear();
|
|
|
|
Base2StsMap.clear();
|
|
|
|
LdBases.clear();
|
|
|
|
StBases.clear();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return RetVal;
|
|
|
|
}
|
|
|
|
|
2015-06-02 05:26:23 +08:00
|
|
|
/// Returns an instance of the load / store optimization pass.
|
2009-06-13 17:12:55 +08:00
|
|
|
FunctionPass *llvm::createARMLoadStoreOptimizationPass(bool PreAlloc) {
|
|
|
|
if (PreAlloc)
|
|
|
|
return new ARMPreAllocLoadStoreOpt();
|
|
|
|
return new ARMLoadStoreOpt();
|
|
|
|
}
|