2017-06-27 06:44:03 +08:00
|
|
|
//===- llvm/CodeGen/GlobalISel/InstructionSelector.cpp --------------------===//
|
2016-07-27 22:31:55 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
2017-09-30 05:55:49 +08:00
|
|
|
//
|
2016-07-27 22:31:55 +08:00
|
|
|
/// \file
|
|
|
|
/// This file implements the InstructionSelector class.
|
2017-09-30 05:55:49 +08:00
|
|
|
//
|
2016-07-27 22:31:55 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
|
2016-12-23 05:56:19 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/Utils.h"
|
2017-06-27 06:44:03 +08:00
|
|
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
2016-07-27 22:31:55 +08:00
|
|
|
#include "llvm/CodeGen/MachineInstr.h"
|
2017-06-27 06:44:03 +08:00
|
|
|
#include "llvm/CodeGen/MachineOperand.h"
|
2017-10-16 11:36:29 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2017-06-27 06:44:03 +08:00
|
|
|
#include "llvm/MC/MCInstrDesc.h"
|
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2016-07-27 22:31:55 +08:00
|
|
|
#include "llvm/Target/TargetRegisterInfo.h"
|
2017-06-27 06:44:03 +08:00
|
|
|
#include <cassert>
|
2016-07-27 22:31:55 +08:00
|
|
|
|
|
|
|
#define DEBUG_TYPE "instructionselector"
|
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
[globalisel][tablegen] Partially fix compile-time regressions by converting matcher to state-machine(s)
Summary:
Replace the matcher if-statements for each rule with a state-machine. This
significantly reduces compile time, memory allocations, and cumulative memory
allocation when compiling AArch64InstructionSelector.cpp.o after r303259 is
recommitted.
The following patches will expand on this further to fully fix the regressions.
Reviewers: rovka, ab, t.p.northover, qcolombet, aditya_nandakumar
Reviewed By: ab
Subscribers: vitalybuka, aemerson, javed.absar, igorb, llvm-commits, kristof.beyls
Differential Revision: https://reviews.llvm.org/D33758
llvm-svn: 307079
2017-07-04 22:35:06 +08:00
|
|
|
InstructionSelector::MatcherState::MatcherState(unsigned MaxRenderers)
|
2017-10-16 02:22:54 +08:00
|
|
|
: Renderers(MaxRenderers), MIs() {}
|
[globalisel][tablegen] Partially fix compile-time regressions by converting matcher to state-machine(s)
Summary:
Replace the matcher if-statements for each rule with a state-machine. This
significantly reduces compile time, memory allocations, and cumulative memory
allocation when compiling AArch64InstructionSelector.cpp.o after r303259 is
recommitted.
The following patches will expand on this further to fully fix the regressions.
Reviewers: rovka, ab, t.p.northover, qcolombet, aditya_nandakumar
Reviewed By: ab
Subscribers: vitalybuka, aemerson, javed.absar, igorb, llvm-commits, kristof.beyls
Differential Revision: https://reviews.llvm.org/D33758
llvm-svn: 307079
2017-07-04 22:35:06 +08:00
|
|
|
|
2017-06-27 06:44:03 +08:00
|
|
|
InstructionSelector::InstructionSelector() = default;
|
2016-07-27 22:31:55 +08:00
|
|
|
|
2017-06-20 20:36:34 +08:00
|
|
|
bool InstructionSelector::constrainOperandRegToRegClass(
|
|
|
|
MachineInstr &I, unsigned OpIdx, const TargetRegisterClass &RC,
|
|
|
|
const TargetInstrInfo &TII, const TargetRegisterInfo &TRI,
|
|
|
|
const RegisterBankInfo &RBI) const {
|
|
|
|
MachineBasicBlock &MBB = *I.getParent();
|
|
|
|
MachineFunction &MF = *MBB.getParent();
|
|
|
|
MachineRegisterInfo &MRI = MF.getRegInfo();
|
|
|
|
|
2017-06-27 06:44:03 +08:00
|
|
|
return
|
|
|
|
constrainRegToClass(MRI, TII, RBI, I, I.getOperand(OpIdx).getReg(), RC);
|
2017-06-20 20:36:34 +08:00
|
|
|
}
|
|
|
|
|
2016-07-27 22:31:55 +08:00
|
|
|
bool InstructionSelector::constrainSelectedInstRegOperands(
|
|
|
|
MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI,
|
|
|
|
const RegisterBankInfo &RBI) const {
|
|
|
|
MachineBasicBlock &MBB = *I.getParent();
|
|
|
|
MachineFunction &MF = *MBB.getParent();
|
|
|
|
MachineRegisterInfo &MRI = MF.getRegInfo();
|
|
|
|
|
|
|
|
for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
|
|
|
|
MachineOperand &MO = I.getOperand(OpI);
|
|
|
|
|
2016-10-11 05:50:00 +08:00
|
|
|
// There's nothing to be done on non-register operands.
|
|
|
|
if (!MO.isReg())
|
2016-07-30 00:56:16 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
DEBUG(dbgs() << "Converting operand: " << MO << '\n');
|
|
|
|
assert(MO.isReg() && "Unsupported non-reg operand");
|
2016-07-27 22:31:55 +08:00
|
|
|
|
2016-12-23 05:56:19 +08:00
|
|
|
unsigned Reg = MO.getReg();
|
2016-08-16 22:37:46 +08:00
|
|
|
// Physical registers don't need to be constrained.
|
2016-12-23 05:56:19 +08:00
|
|
|
if (TRI.isPhysicalRegister(Reg))
|
2016-08-16 22:37:46 +08:00
|
|
|
continue;
|
|
|
|
|
2016-12-16 20:54:46 +08:00
|
|
|
// Register operands with a value of 0 (e.g. predicate operands) don't need
|
|
|
|
// to be constrained.
|
2016-12-23 05:56:19 +08:00
|
|
|
if (Reg == 0)
|
2016-12-16 20:54:46 +08:00
|
|
|
continue;
|
|
|
|
|
2016-07-27 22:31:55 +08:00
|
|
|
// If the operand is a vreg, we should constrain its regclass, and only
|
|
|
|
// insert COPYs if that's impossible.
|
2016-12-23 05:56:19 +08:00
|
|
|
// constrainOperandRegClass does that for us.
|
|
|
|
MO.setReg(constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(),
|
|
|
|
Reg, OpI));
|
2017-02-22 20:25:09 +08:00
|
|
|
|
2017-04-30 01:30:09 +08:00
|
|
|
// Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
|
|
|
|
// done.
|
2017-02-22 20:25:09 +08:00
|
|
|
if (MO.isUse()) {
|
|
|
|
int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
|
2017-04-30 01:30:09 +08:00
|
|
|
if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
|
2017-02-22 20:25:09 +08:00
|
|
|
I.tieOperands(DefIdx, OpI);
|
|
|
|
}
|
2016-07-27 22:31:55 +08:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2017-03-20 00:12:48 +08:00
|
|
|
|
|
|
|
bool InstructionSelector::isOperandImmEqual(
|
|
|
|
const MachineOperand &MO, int64_t Value,
|
|
|
|
const MachineRegisterInfo &MRI) const {
|
2017-05-18 18:33:36 +08:00
|
|
|
if (MO.isReg() && MO.getReg())
|
2017-03-28 00:35:27 +08:00
|
|
|
if (auto VRegVal = getConstantVRegVal(MO.getReg(), MRI))
|
|
|
|
return *VRegVal == Value;
|
2017-03-20 00:12:48 +08:00
|
|
|
return false;
|
|
|
|
}
|
[tablegen][globalisel] Add support for nested instruction matching.
Summary:
Lift the restrictions that prevented the tree walking introduced in the
previous change and add support for patterns like:
(G_ADD (G_MUL (G_SEXT $src1), (G_SEXT $src2)), $src3) -> SMADDWrrr $dst, $src1, $src2, $src3
Also adds support for G_SEXT and G_ZEXT to support these cases.
One particular aspect of this that I should draw attention to is that I've
tried to be overly conservative in determining the safety of matches that
involve non-adjacent instructions and multiple basic blocks. This is intended
to be used as a cheap initial check and we may add a more expensive check in
the future. The current rules are:
* Reject if any instruction may load/store (we'd need to check for intervening
memory operations.
* Reject if any instruction has implicit operands.
* Reject if any instruction has unmodelled side-effects.
See isObviouslySafeToFold().
Reviewers: t.p.northover, javed.absar, qcolombet, aditya_nandakumar, ab, rovka
Reviewed By: ab
Subscribers: igorb, dberris, llvm-commits, kristof.beyls
Differential Revision: https://reviews.llvm.org/D30539
llvm-svn: 299430
2017-04-04 21:25:23 +08:00
|
|
|
|
2017-10-16 11:36:29 +08:00
|
|
|
bool InstructionSelector::isBaseWithConstantOffset(
|
|
|
|
const MachineOperand &Root, const MachineRegisterInfo &MRI) const {
|
|
|
|
if (!Root.isReg())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
MachineInstr *RootI = MRI.getVRegDef(Root.getReg());
|
|
|
|
if (RootI->getOpcode() != TargetOpcode::G_GEP)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
MachineOperand &RHS = RootI->getOperand(2);
|
|
|
|
MachineInstr *RHSI = MRI.getVRegDef(RHS.getReg());
|
|
|
|
if (RHSI->getOpcode() != TargetOpcode::G_CONSTANT)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
[tablegen][globalisel] Add support for nested instruction matching.
Summary:
Lift the restrictions that prevented the tree walking introduced in the
previous change and add support for patterns like:
(G_ADD (G_MUL (G_SEXT $src1), (G_SEXT $src2)), $src3) -> SMADDWrrr $dst, $src1, $src2, $src3
Also adds support for G_SEXT and G_ZEXT to support these cases.
One particular aspect of this that I should draw attention to is that I've
tried to be overly conservative in determining the safety of matches that
involve non-adjacent instructions and multiple basic blocks. This is intended
to be used as a cheap initial check and we may add a more expensive check in
the future. The current rules are:
* Reject if any instruction may load/store (we'd need to check for intervening
memory operations.
* Reject if any instruction has implicit operands.
* Reject if any instruction has unmodelled side-effects.
See isObviouslySafeToFold().
Reviewers: t.p.northover, javed.absar, qcolombet, aditya_nandakumar, ab, rovka
Reviewed By: ab
Subscribers: igorb, dberris, llvm-commits, kristof.beyls
Differential Revision: https://reviews.llvm.org/D30539
llvm-svn: 299430
2017-04-04 21:25:23 +08:00
|
|
|
bool InstructionSelector::isObviouslySafeToFold(MachineInstr &MI) const {
|
|
|
|
return !MI.mayLoadOrStore() && !MI.hasUnmodeledSideEffects() &&
|
|
|
|
MI.implicit_operands().begin() == MI.implicit_operands().end();
|
|
|
|
}
|