2016-07-27 22:31:55 +08:00
|
|
|
//===- AArch64InstructionSelector.cpp ----------------------------*- C++ -*-==//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
/// \file
|
|
|
|
/// This file implements the targeting of the InstructionSelector class for
|
|
|
|
/// AArch64.
|
|
|
|
/// \todo This should be generated by TableGen.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "AArch64InstrInfo.h"
|
2017-02-09 01:57:27 +08:00
|
|
|
#include "AArch64MachineFunctionInfo.h"
|
2016-07-27 22:31:55 +08:00
|
|
|
#include "AArch64RegisterBankInfo.h"
|
|
|
|
#include "AArch64RegisterInfo.h"
|
|
|
|
#include "AArch64Subtarget.h"
|
2016-10-11 05:50:00 +08:00
|
|
|
#include "AArch64TargetMachine.h"
|
2016-11-08 08:45:29 +08:00
|
|
|
#include "MCTargetDesc/AArch64AddressingModes.h"
|
2017-04-06 17:49:34 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
|
2017-04-20 04:48:50 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/Utils.h"
|
2016-07-27 22:31:55 +08:00
|
|
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstr.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
2017-04-06 17:49:34 +08:00
|
|
|
#include "llvm/CodeGen/MachineOperand.h"
|
2016-07-27 22:31:55 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
|
|
|
#include "llvm/IR/Type.h"
|
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Support/raw_ostream.h"
|
|
|
|
|
|
|
|
#define DEBUG_TYPE "aarch64-isel"
|
|
|
|
|
[globalisel][tablegen] Partially fix compile-time regressions by converting matcher to state-machine(s)
Summary:
Replace the matcher if-statements for each rule with a state-machine. This
significantly reduces compile time, memory allocations, and cumulative memory
allocation when compiling AArch64InstructionSelector.cpp.o after r303259 is
recommitted.
The following patches will expand on this further to fully fix the regressions.
Reviewers: rovka, ab, t.p.northover, qcolombet, aditya_nandakumar
Reviewed By: ab
Subscribers: vitalybuka, aemerson, javed.absar, igorb, llvm-commits, kristof.beyls
Differential Revision: https://reviews.llvm.org/D33758
llvm-svn: 307079
2017-07-04 22:35:06 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
|
|
|
|
|
2016-07-27 22:31:55 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2017-04-06 17:49:34 +08:00
|
|
|
namespace {
|
|
|
|
|
[globalisel][tablegen] Import SelectionDAG's rule predicates and support the equivalent in GIRule.
Summary:
The SelectionDAG importer now imports rules with Predicate's attached via
Requires, PredicateControl, etc. These predicates are implemented as
bitset's to allow multiple predicates to be tested together. However,
unlike the MC layer subtarget features, each target only pays for it's own
predicates (e.g. AArch64 doesn't have 192 feature bits just because X86
needs a lot).
Both AArch64 and X86 derive at least one predicate from the MachineFunction
or Function so they must re-initialize AvailableFeatures before each
function. They also declare locals in <Target>InstructionSelector so that
computeAvailableFeatures() can use the code from SelectionDAG without
modification.
Reviewers: rovka, qcolombet, aditya_nandakumar, t.p.northover, ab
Reviewed By: rovka
Subscribers: aemerson, rengolin, dberris, kristof.beyls, llvm-commits, igorb
Differential Revision: https://reviews.llvm.org/D31418
llvm-svn: 300993
2017-04-21 23:59:56 +08:00
|
|
|
#define GET_GLOBALISEL_PREDICATE_BITSET
|
|
|
|
#include "AArch64GenGlobalISel.inc"
|
|
|
|
#undef GET_GLOBALISEL_PREDICATE_BITSET
|
|
|
|
|
2017-04-06 17:49:34 +08:00
|
|
|
class AArch64InstructionSelector : public InstructionSelector {
|
|
|
|
public:
|
|
|
|
AArch64InstructionSelector(const AArch64TargetMachine &TM,
|
|
|
|
const AArch64Subtarget &STI,
|
|
|
|
const AArch64RegisterBankInfo &RBI);
|
|
|
|
|
|
|
|
bool select(MachineInstr &I) const override;
|
|
|
|
|
|
|
|
private:
|
|
|
|
/// tblgen-erated 'select' implementation, used as the initial selector for
|
|
|
|
/// the patterns that don't require complex C++.
|
|
|
|
bool selectImpl(MachineInstr &I) const;
|
|
|
|
|
|
|
|
bool selectVaStartAAPCS(MachineInstr &I, MachineFunction &MF,
|
|
|
|
MachineRegisterInfo &MRI) const;
|
|
|
|
bool selectVaStartDarwin(MachineInstr &I, MachineFunction &MF,
|
|
|
|
MachineRegisterInfo &MRI) const;
|
|
|
|
|
|
|
|
bool selectCompareBranch(MachineInstr &I, MachineFunction &MF,
|
|
|
|
MachineRegisterInfo &MRI) const;
|
|
|
|
|
[globalisel][tablegen] Revise API for ComplexPattern operands to improve flexibility.
Summary:
Some targets need to be able to do more complex rendering than just adding an
operand or two to an instruction. For example, it may need to insert an
instruction to extract a subreg first, or it may need to perform an operation
on the operand.
In SelectionDAG, targets would create SDNode's to achieve the desired effect
during the complex pattern predicate. This worked because SelectionDAG had a
form of garbage collection that would take care of SDNode's that were created
but not used due to a later predicate rejecting a match. This doesn't translate
well to GlobalISel and the churn was wasteful.
The API changes in this patch enable GlobalISel to accomplish the same thing
without the waste. The API is now:
InstructionSelector::OptionalComplexRendererFn selectArithImmed(MachineOperand &Root) const;
where Root is the root of the match. The return value can be omitted to
indicate that the predicate failed to match, or a function with the signature
ComplexRendererFn can be returned. For example:
return OptionalComplexRendererFn(
[=](MachineInstrBuilder &MIB) { MIB.addImm(Immed).addImm(ShVal); });
adds two immediate operands to the rendered instruction. Immed and ShVal are
captured from the predicate function.
As an added bonus, this also reduces the amount of information we need to
provide to GIComplexOperandMatcher.
Depends on D31418
Reviewers: aditya_nandakumar, t.p.northover, qcolombet, rovka, ab, javed.absar
Reviewed By: ab
Subscribers: dberris, kristof.beyls, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D31761
llvm-svn: 301079
2017-04-22 23:11:04 +08:00
|
|
|
ComplexRendererFn selectArithImmed(MachineOperand &Root) const;
|
2017-04-06 17:49:34 +08:00
|
|
|
|
2017-10-16 11:36:29 +08:00
|
|
|
ComplexRendererFn selectAddrModeUnscaled(MachineOperand &Root,
|
|
|
|
unsigned Size) const;
|
|
|
|
|
|
|
|
ComplexRendererFn selectAddrModeUnscaled8(MachineOperand &Root) const {
|
|
|
|
return selectAddrModeUnscaled(Root, 1);
|
|
|
|
}
|
|
|
|
ComplexRendererFn selectAddrModeUnscaled16(MachineOperand &Root) const {
|
|
|
|
return selectAddrModeUnscaled(Root, 2);
|
|
|
|
}
|
|
|
|
ComplexRendererFn selectAddrModeUnscaled32(MachineOperand &Root) const {
|
|
|
|
return selectAddrModeUnscaled(Root, 4);
|
|
|
|
}
|
|
|
|
ComplexRendererFn selectAddrModeUnscaled64(MachineOperand &Root) const {
|
|
|
|
return selectAddrModeUnscaled(Root, 8);
|
|
|
|
}
|
|
|
|
ComplexRendererFn selectAddrModeUnscaled128(MachineOperand &Root) const {
|
|
|
|
return selectAddrModeUnscaled(Root, 16);
|
|
|
|
}
|
|
|
|
|
|
|
|
ComplexRendererFn selectAddrModeIndexed(MachineOperand &Root,
|
|
|
|
unsigned Size) const;
|
|
|
|
template <int Width>
|
|
|
|
ComplexRendererFn selectAddrModeIndexed(MachineOperand &Root) const {
|
|
|
|
return selectAddrModeIndexed(Root, Width / 8);
|
|
|
|
}
|
|
|
|
|
2017-04-06 17:49:34 +08:00
|
|
|
const AArch64TargetMachine &TM;
|
|
|
|
const AArch64Subtarget &STI;
|
|
|
|
const AArch64InstrInfo &TII;
|
|
|
|
const AArch64RegisterInfo &TRI;
|
|
|
|
const AArch64RegisterBankInfo &RBI;
|
[globalisel][tablegen] Import SelectionDAG's rule predicates and support the equivalent in GIRule.
Summary:
The SelectionDAG importer now imports rules with Predicate's attached via
Requires, PredicateControl, etc. These predicates are implemented as
bitset's to allow multiple predicates to be tested together. However,
unlike the MC layer subtarget features, each target only pays for it's own
predicates (e.g. AArch64 doesn't have 192 feature bits just because X86
needs a lot).
Both AArch64 and X86 derive at least one predicate from the MachineFunction
or Function so they must re-initialize AvailableFeatures before each
function. They also declare locals in <Target>InstructionSelector so that
computeAvailableFeatures() can use the code from SelectionDAG without
modification.
Reviewers: rovka, qcolombet, aditya_nandakumar, t.p.northover, ab
Reviewed By: rovka
Subscribers: aemerson, rengolin, dberris, kristof.beyls, llvm-commits, igorb
Differential Revision: https://reviews.llvm.org/D31418
llvm-svn: 300993
2017-04-21 23:59:56 +08:00
|
|
|
|
2017-04-30 01:30:09 +08:00
|
|
|
#define GET_GLOBALISEL_PREDICATES_DECL
|
|
|
|
#include "AArch64GenGlobalISel.inc"
|
|
|
|
#undef GET_GLOBALISEL_PREDICATES_DECL
|
2017-04-06 17:49:34 +08:00
|
|
|
|
|
|
|
// We declare the temporaries used by selectImpl() in the class to minimize the
|
|
|
|
// cost of constructing placeholder values.
|
|
|
|
#define GET_GLOBALISEL_TEMPORARIES_DECL
|
|
|
|
#include "AArch64GenGlobalISel.inc"
|
|
|
|
#undef GET_GLOBALISEL_TEMPORARIES_DECL
|
|
|
|
};
|
|
|
|
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
2017-03-15 05:32:08 +08:00
|
|
|
#define GET_GLOBALISEL_IMPL
|
2016-12-22 07:26:20 +08:00
|
|
|
#include "AArch64GenGlobalISel.inc"
|
2017-03-15 05:32:08 +08:00
|
|
|
#undef GET_GLOBALISEL_IMPL
|
2016-12-22 07:26:20 +08:00
|
|
|
|
2016-07-27 22:31:55 +08:00
|
|
|
AArch64InstructionSelector::AArch64InstructionSelector(
|
2016-10-11 05:50:00 +08:00
|
|
|
const AArch64TargetMachine &TM, const AArch64Subtarget &STI,
|
|
|
|
const AArch64RegisterBankInfo &RBI)
|
2017-03-15 05:32:08 +08:00
|
|
|
: InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
|
2017-04-30 01:30:09 +08:00
|
|
|
TRI(*STI.getRegisterInfo()), RBI(RBI),
|
|
|
|
#define GET_GLOBALISEL_PREDICATES_INIT
|
|
|
|
#include "AArch64GenGlobalISel.inc"
|
|
|
|
#undef GET_GLOBALISEL_PREDICATES_INIT
|
2017-03-15 05:32:08 +08:00
|
|
|
#define GET_GLOBALISEL_TEMPORARIES_INIT
|
|
|
|
#include "AArch64GenGlobalISel.inc"
|
|
|
|
#undef GET_GLOBALISEL_TEMPORARIES_INIT
|
|
|
|
{
|
|
|
|
}
|
2016-07-27 22:31:55 +08:00
|
|
|
|
2016-10-13 06:49:15 +08:00
|
|
|
// FIXME: This should be target-independent, inferred from the types declared
|
|
|
|
// for each class in the bank.
|
|
|
|
static const TargetRegisterClass *
|
|
|
|
getRegClassForTypeOnBank(LLT Ty, const RegisterBank &RB,
|
|
|
|
const RegisterBankInfo &RBI) {
|
|
|
|
if (RB.getID() == AArch64::GPRRegBankID) {
|
|
|
|
if (Ty.getSizeInBits() <= 32)
|
|
|
|
return &AArch64::GPR32RegClass;
|
|
|
|
if (Ty.getSizeInBits() == 64)
|
|
|
|
return &AArch64::GPR64RegClass;
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (RB.getID() == AArch64::FPRRegBankID) {
|
|
|
|
if (Ty.getSizeInBits() == 32)
|
|
|
|
return &AArch64::FPR32RegClass;
|
|
|
|
if (Ty.getSizeInBits() == 64)
|
|
|
|
return &AArch64::FPR64RegClass;
|
|
|
|
if (Ty.getSizeInBits() == 128)
|
|
|
|
return &AArch64::FPR128RegClass;
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2016-08-16 22:37:40 +08:00
|
|
|
/// Check whether \p I is a currently unsupported binary operation:
|
|
|
|
/// - it has an unsized type
|
|
|
|
/// - an operand is not a vreg
|
|
|
|
/// - all operands are not in the same bank
|
|
|
|
/// These are checks that should someday live in the verifier, but right now,
|
|
|
|
/// these are mostly limitations of the aarch64 selector.
|
|
|
|
static bool unsupportedBinOp(const MachineInstr &I,
|
|
|
|
const AArch64RegisterBankInfo &RBI,
|
|
|
|
const MachineRegisterInfo &MRI,
|
|
|
|
const AArch64RegisterInfo &TRI) {
|
2016-09-09 19:46:34 +08:00
|
|
|
LLT Ty = MRI.getType(I.getOperand(0).getReg());
|
2016-09-15 18:09:59 +08:00
|
|
|
if (!Ty.isValid()) {
|
|
|
|
DEBUG(dbgs() << "Generic binop register should be typed\n");
|
2016-08-16 22:37:40 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
const RegisterBank *PrevOpBank = nullptr;
|
|
|
|
for (auto &MO : I.operands()) {
|
|
|
|
// FIXME: Support non-register operands.
|
|
|
|
if (!MO.isReg()) {
|
|
|
|
DEBUG(dbgs() << "Generic inst non-reg operands are unsupported\n");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME: Can generic operations have physical registers operands? If
|
|
|
|
// so, this will need to be taught about that, and we'll need to get the
|
|
|
|
// bank out of the minimal class for the register.
|
|
|
|
// Either way, this needs to be documented (and possibly verified).
|
|
|
|
if (!TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
|
|
|
|
DEBUG(dbgs() << "Generic inst has physical register operand\n");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
const RegisterBank *OpBank = RBI.getRegBank(MO.getReg(), MRI, TRI);
|
|
|
|
if (!OpBank) {
|
|
|
|
DEBUG(dbgs() << "Generic register has no bank or class\n");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (PrevOpBank && OpBank != PrevOpBank) {
|
|
|
|
DEBUG(dbgs() << "Generic inst operands have different banks\n");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
PrevOpBank = OpBank;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-07-27 22:31:55 +08:00
|
|
|
/// Select the AArch64 opcode for the basic binary operation \p GenericOpc
|
[AArch64][GlobalISel] Legalize narrow scalar ops again.
Since r279760, we've been marking as legal operations on narrow integer
types that have wider legal equivalents (for instance, G_ADD s8).
Compared to legalizing these operations, this reduced the amount of
extends/truncates required, but was always a weird legalization decision
made at selection time.
So far, we haven't been able to formalize it in a way that permits the
selector generated from SelectionDAG patterns to be sufficient.
Using a wide instruction (say, s64), when a narrower instruction exists
(s32) would introduce register class incompatibilities (when one narrow
generic instruction is selected to the wider variant, but another is
selected to the narrower variant).
It's also impractical to limit which narrow operations are matched for
which instruction, as restricting "narrow selection" to ranges of types
clashes with potentially incompatible instruction predicates.
Concerns were also raised regarding MIPS64's sign-extended register
assumptions, as well as wrapping behavior.
See discussions in https://reviews.llvm.org/D26878.
Instead, legalize the operations.
Should we ever revert to selecting these narrow operations, we should
try to represent this more accurately: for instance, by separating
a "concrete" type on operations, and an "underlying" type on vregs, we
could move the "this narrow-looking op is really legal" decision to the
legalizer, and let the selector use the "underlying" vreg type only,
which would be guaranteed to map to a register class.
In any case, we eventually should mitigate:
- the performance impact by selecting no-op extract/truncates to COPYs
(which we currently do), and the COPYs to register reuses (which we
don't do yet).
- the compile-time impact by optimizing away extract/truncate sequences
in the legalizer.
llvm-svn: 292827
2017-01-24 05:10:05 +08:00
|
|
|
/// (such as G_OR or G_SDIV), appropriate for the register bank \p RegBankID
|
2016-07-27 22:31:55 +08:00
|
|
|
/// and of size \p OpSize.
|
|
|
|
/// \returns \p GenericOpc if the combination is unsupported.
|
|
|
|
static unsigned selectBinaryOp(unsigned GenericOpc, unsigned RegBankID,
|
|
|
|
unsigned OpSize) {
|
|
|
|
switch (RegBankID) {
|
|
|
|
case AArch64::GPRRegBankID:
|
2017-01-25 10:41:38 +08:00
|
|
|
if (OpSize == 32) {
|
2016-07-27 22:31:55 +08:00
|
|
|
switch (GenericOpc) {
|
2016-08-16 22:02:47 +08:00
|
|
|
case TargetOpcode::G_SHL:
|
|
|
|
return AArch64::LSLVWr;
|
|
|
|
case TargetOpcode::G_LSHR:
|
|
|
|
return AArch64::LSRVWr;
|
|
|
|
case TargetOpcode::G_ASHR:
|
|
|
|
return AArch64::ASRVWr;
|
2016-07-27 22:31:55 +08:00
|
|
|
default:
|
|
|
|
return GenericOpc;
|
|
|
|
}
|
2016-10-19 04:03:48 +08:00
|
|
|
} else if (OpSize == 64) {
|
2016-07-27 22:31:55 +08:00
|
|
|
switch (GenericOpc) {
|
2016-10-11 05:49:49 +08:00
|
|
|
case TargetOpcode::G_GEP:
|
2016-07-27 22:31:55 +08:00
|
|
|
return AArch64::ADDXrr;
|
2016-08-16 22:02:47 +08:00
|
|
|
case TargetOpcode::G_SHL:
|
|
|
|
return AArch64::LSLVXr;
|
|
|
|
case TargetOpcode::G_LSHR:
|
|
|
|
return AArch64::LSRVXr;
|
|
|
|
case TargetOpcode::G_ASHR:
|
|
|
|
return AArch64::ASRVXr;
|
2016-07-27 22:31:55 +08:00
|
|
|
default:
|
|
|
|
return GenericOpc;
|
|
|
|
}
|
|
|
|
}
|
2017-07-09 03:28:24 +08:00
|
|
|
break;
|
2016-08-19 00:05:11 +08:00
|
|
|
case AArch64::FPRRegBankID:
|
|
|
|
switch (OpSize) {
|
|
|
|
case 32:
|
|
|
|
switch (GenericOpc) {
|
|
|
|
case TargetOpcode::G_FADD:
|
|
|
|
return AArch64::FADDSrr;
|
|
|
|
case TargetOpcode::G_FSUB:
|
|
|
|
return AArch64::FSUBSrr;
|
|
|
|
case TargetOpcode::G_FMUL:
|
|
|
|
return AArch64::FMULSrr;
|
|
|
|
case TargetOpcode::G_FDIV:
|
|
|
|
return AArch64::FDIVSrr;
|
|
|
|
default:
|
|
|
|
return GenericOpc;
|
|
|
|
}
|
|
|
|
case 64:
|
|
|
|
switch (GenericOpc) {
|
|
|
|
case TargetOpcode::G_FADD:
|
|
|
|
return AArch64::FADDDrr;
|
|
|
|
case TargetOpcode::G_FSUB:
|
|
|
|
return AArch64::FSUBDrr;
|
|
|
|
case TargetOpcode::G_FMUL:
|
|
|
|
return AArch64::FMULDrr;
|
|
|
|
case TargetOpcode::G_FDIV:
|
|
|
|
return AArch64::FDIVDrr;
|
2016-10-11 08:21:11 +08:00
|
|
|
case TargetOpcode::G_OR:
|
|
|
|
return AArch64::ORRv8i8;
|
2016-08-19 00:05:11 +08:00
|
|
|
default:
|
|
|
|
return GenericOpc;
|
|
|
|
}
|
|
|
|
}
|
2017-07-09 03:28:24 +08:00
|
|
|
break;
|
|
|
|
}
|
2016-07-27 22:31:55 +08:00
|
|
|
return GenericOpc;
|
|
|
|
}
|
|
|
|
|
2016-07-30 00:56:16 +08:00
|
|
|
/// Select the AArch64 opcode for the G_LOAD or G_STORE operation \p GenericOpc,
|
|
|
|
/// appropriate for the (value) register bank \p RegBankID and of memory access
|
|
|
|
/// size \p OpSize. This returns the variant with the base+unsigned-immediate
|
|
|
|
/// addressing mode (e.g., LDRXui).
|
|
|
|
/// \returns \p GenericOpc if the combination is unsupported.
|
|
|
|
static unsigned selectLoadStoreUIOp(unsigned GenericOpc, unsigned RegBankID,
|
|
|
|
unsigned OpSize) {
|
|
|
|
const bool isStore = GenericOpc == TargetOpcode::G_STORE;
|
|
|
|
switch (RegBankID) {
|
|
|
|
case AArch64::GPRRegBankID:
|
|
|
|
switch (OpSize) {
|
2016-10-18 02:36:53 +08:00
|
|
|
case 8:
|
|
|
|
return isStore ? AArch64::STRBBui : AArch64::LDRBBui;
|
|
|
|
case 16:
|
|
|
|
return isStore ? AArch64::STRHHui : AArch64::LDRHHui;
|
2016-07-30 00:56:16 +08:00
|
|
|
case 32:
|
|
|
|
return isStore ? AArch64::STRWui : AArch64::LDRWui;
|
|
|
|
case 64:
|
|
|
|
return isStore ? AArch64::STRXui : AArch64::LDRXui;
|
|
|
|
}
|
2017-07-09 03:28:24 +08:00
|
|
|
break;
|
2016-10-11 08:21:14 +08:00
|
|
|
case AArch64::FPRRegBankID:
|
|
|
|
switch (OpSize) {
|
2016-10-18 02:36:53 +08:00
|
|
|
case 8:
|
|
|
|
return isStore ? AArch64::STRBui : AArch64::LDRBui;
|
|
|
|
case 16:
|
|
|
|
return isStore ? AArch64::STRHui : AArch64::LDRHui;
|
2016-10-11 08:21:14 +08:00
|
|
|
case 32:
|
|
|
|
return isStore ? AArch64::STRSui : AArch64::LDRSui;
|
|
|
|
case 64:
|
|
|
|
return isStore ? AArch64::STRDui : AArch64::LDRDui;
|
|
|
|
}
|
2017-07-09 03:28:24 +08:00
|
|
|
break;
|
|
|
|
}
|
2016-07-30 00:56:16 +08:00
|
|
|
return GenericOpc;
|
|
|
|
}
|
|
|
|
|
2016-10-12 11:57:49 +08:00
|
|
|
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII,
|
|
|
|
MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
|
|
|
|
const RegisterBankInfo &RBI) {
|
|
|
|
|
|
|
|
unsigned DstReg = I.getOperand(0).getReg();
|
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
|
|
|
|
assert(I.isCopy() && "Generic operators do not allow physical registers");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
|
|
|
|
const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
|
|
|
|
unsigned SrcReg = I.getOperand(1).getReg();
|
|
|
|
const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
|
|
|
|
(void)SrcSize;
|
|
|
|
assert((!TargetRegisterInfo::isPhysicalRegister(SrcReg) || I.isCopy()) &&
|
|
|
|
"No phys reg on generic operators");
|
|
|
|
assert(
|
|
|
|
(DstSize == SrcSize ||
|
|
|
|
// Copies are a mean to setup initial types, the number of
|
|
|
|
// bits may not exactly match.
|
|
|
|
(TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
|
|
|
|
DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI)) ||
|
|
|
|
// Copies are a mean to copy bits around, as long as we are
|
|
|
|
// on the same register class, that's fine. Otherwise, that
|
|
|
|
// means we need some SUBREG_TO_REG or AND & co.
|
|
|
|
(((DstSize + 31) / 32 == (SrcSize + 31) / 32) && DstSize > SrcSize)) &&
|
|
|
|
"Copy with different width?!");
|
|
|
|
assert((DstSize <= 64 || RegBank.getID() == AArch64::FPRRegBankID) &&
|
|
|
|
"GPRs cannot get more than 64-bit width values");
|
|
|
|
const TargetRegisterClass *RC = nullptr;
|
|
|
|
|
|
|
|
if (RegBank.getID() == AArch64::FPRRegBankID) {
|
2017-09-13 05:04:10 +08:00
|
|
|
if (DstSize <= 16)
|
|
|
|
RC = &AArch64::FPR16RegClass;
|
|
|
|
else if (DstSize <= 32)
|
2016-10-12 11:57:49 +08:00
|
|
|
RC = &AArch64::FPR32RegClass;
|
|
|
|
else if (DstSize <= 64)
|
|
|
|
RC = &AArch64::FPR64RegClass;
|
|
|
|
else if (DstSize <= 128)
|
|
|
|
RC = &AArch64::FPR128RegClass;
|
|
|
|
else {
|
|
|
|
DEBUG(dbgs() << "Unexpected bitcast size " << DstSize << '\n');
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
assert(RegBank.getID() == AArch64::GPRRegBankID &&
|
|
|
|
"Bitcast for the flags?");
|
|
|
|
RC =
|
|
|
|
DstSize <= 32 ? &AArch64::GPR32allRegClass : &AArch64::GPR64allRegClass;
|
|
|
|
}
|
|
|
|
|
|
|
|
// No need to constrain SrcReg. It will get constrained when
|
|
|
|
// we hit another of its use or its defs.
|
|
|
|
// Copies do not have constraints.
|
|
|
|
if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
|
|
|
|
DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
|
|
|
|
<< " operand\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
I.setDesc(TII.get(AArch64::COPY));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-10-13 06:49:11 +08:00
|
|
|
static unsigned selectFPConvOpc(unsigned GenericOpc, LLT DstTy, LLT SrcTy) {
|
|
|
|
if (!DstTy.isScalar() || !SrcTy.isScalar())
|
|
|
|
return GenericOpc;
|
|
|
|
|
|
|
|
const unsigned DstSize = DstTy.getSizeInBits();
|
|
|
|
const unsigned SrcSize = SrcTy.getSizeInBits();
|
|
|
|
|
|
|
|
switch (DstSize) {
|
|
|
|
case 32:
|
|
|
|
switch (SrcSize) {
|
|
|
|
case 32:
|
|
|
|
switch (GenericOpc) {
|
|
|
|
case TargetOpcode::G_SITOFP:
|
|
|
|
return AArch64::SCVTFUWSri;
|
|
|
|
case TargetOpcode::G_UITOFP:
|
|
|
|
return AArch64::UCVTFUWSri;
|
|
|
|
case TargetOpcode::G_FPTOSI:
|
|
|
|
return AArch64::FCVTZSUWSr;
|
|
|
|
case TargetOpcode::G_FPTOUI:
|
|
|
|
return AArch64::FCVTZUUWSr;
|
|
|
|
default:
|
|
|
|
return GenericOpc;
|
|
|
|
}
|
|
|
|
case 64:
|
|
|
|
switch (GenericOpc) {
|
|
|
|
case TargetOpcode::G_SITOFP:
|
|
|
|
return AArch64::SCVTFUXSri;
|
|
|
|
case TargetOpcode::G_UITOFP:
|
|
|
|
return AArch64::UCVTFUXSri;
|
|
|
|
case TargetOpcode::G_FPTOSI:
|
|
|
|
return AArch64::FCVTZSUWDr;
|
|
|
|
case TargetOpcode::G_FPTOUI:
|
|
|
|
return AArch64::FCVTZUUWDr;
|
|
|
|
default:
|
|
|
|
return GenericOpc;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return GenericOpc;
|
|
|
|
}
|
|
|
|
case 64:
|
|
|
|
switch (SrcSize) {
|
|
|
|
case 32:
|
|
|
|
switch (GenericOpc) {
|
|
|
|
case TargetOpcode::G_SITOFP:
|
|
|
|
return AArch64::SCVTFUWDri;
|
|
|
|
case TargetOpcode::G_UITOFP:
|
|
|
|
return AArch64::UCVTFUWDri;
|
|
|
|
case TargetOpcode::G_FPTOSI:
|
|
|
|
return AArch64::FCVTZSUXSr;
|
|
|
|
case TargetOpcode::G_FPTOUI:
|
|
|
|
return AArch64::FCVTZUUXSr;
|
|
|
|
default:
|
|
|
|
return GenericOpc;
|
|
|
|
}
|
|
|
|
case 64:
|
|
|
|
switch (GenericOpc) {
|
|
|
|
case TargetOpcode::G_SITOFP:
|
|
|
|
return AArch64::SCVTFUXDri;
|
|
|
|
case TargetOpcode::G_UITOFP:
|
|
|
|
return AArch64::UCVTFUXDri;
|
|
|
|
case TargetOpcode::G_FPTOSI:
|
|
|
|
return AArch64::FCVTZSUXDr;
|
|
|
|
case TargetOpcode::G_FPTOUI:
|
|
|
|
return AArch64::FCVTZUUXDr;
|
|
|
|
default:
|
|
|
|
return GenericOpc;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return GenericOpc;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return GenericOpc;
|
|
|
|
};
|
|
|
|
return GenericOpc;
|
|
|
|
}
|
|
|
|
|
2016-10-13 06:49:04 +08:00
|
|
|
static AArch64CC::CondCode changeICMPPredToAArch64CC(CmpInst::Predicate P) {
|
|
|
|
switch (P) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unknown condition code!");
|
|
|
|
case CmpInst::ICMP_NE:
|
|
|
|
return AArch64CC::NE;
|
|
|
|
case CmpInst::ICMP_EQ:
|
|
|
|
return AArch64CC::EQ;
|
|
|
|
case CmpInst::ICMP_SGT:
|
|
|
|
return AArch64CC::GT;
|
|
|
|
case CmpInst::ICMP_SGE:
|
|
|
|
return AArch64CC::GE;
|
|
|
|
case CmpInst::ICMP_SLT:
|
|
|
|
return AArch64CC::LT;
|
|
|
|
case CmpInst::ICMP_SLE:
|
|
|
|
return AArch64CC::LE;
|
|
|
|
case CmpInst::ICMP_UGT:
|
|
|
|
return AArch64CC::HI;
|
|
|
|
case CmpInst::ICMP_UGE:
|
|
|
|
return AArch64CC::HS;
|
|
|
|
case CmpInst::ICMP_ULT:
|
|
|
|
return AArch64CC::LO;
|
|
|
|
case CmpInst::ICMP_ULE:
|
|
|
|
return AArch64CC::LS;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-13 06:49:07 +08:00
|
|
|
static void changeFCMPPredToAArch64CC(CmpInst::Predicate P,
|
|
|
|
AArch64CC::CondCode &CondCode,
|
|
|
|
AArch64CC::CondCode &CondCode2) {
|
|
|
|
CondCode2 = AArch64CC::AL;
|
|
|
|
switch (P) {
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Unknown FP condition!");
|
|
|
|
case CmpInst::FCMP_OEQ:
|
|
|
|
CondCode = AArch64CC::EQ;
|
|
|
|
break;
|
|
|
|
case CmpInst::FCMP_OGT:
|
|
|
|
CondCode = AArch64CC::GT;
|
|
|
|
break;
|
|
|
|
case CmpInst::FCMP_OGE:
|
|
|
|
CondCode = AArch64CC::GE;
|
|
|
|
break;
|
|
|
|
case CmpInst::FCMP_OLT:
|
|
|
|
CondCode = AArch64CC::MI;
|
|
|
|
break;
|
|
|
|
case CmpInst::FCMP_OLE:
|
|
|
|
CondCode = AArch64CC::LS;
|
|
|
|
break;
|
|
|
|
case CmpInst::FCMP_ONE:
|
|
|
|
CondCode = AArch64CC::MI;
|
|
|
|
CondCode2 = AArch64CC::GT;
|
|
|
|
break;
|
|
|
|
case CmpInst::FCMP_ORD:
|
|
|
|
CondCode = AArch64CC::VC;
|
|
|
|
break;
|
|
|
|
case CmpInst::FCMP_UNO:
|
|
|
|
CondCode = AArch64CC::VS;
|
|
|
|
break;
|
|
|
|
case CmpInst::FCMP_UEQ:
|
|
|
|
CondCode = AArch64CC::EQ;
|
|
|
|
CondCode2 = AArch64CC::VS;
|
|
|
|
break;
|
|
|
|
case CmpInst::FCMP_UGT:
|
|
|
|
CondCode = AArch64CC::HI;
|
|
|
|
break;
|
|
|
|
case CmpInst::FCMP_UGE:
|
|
|
|
CondCode = AArch64CC::PL;
|
|
|
|
break;
|
|
|
|
case CmpInst::FCMP_ULT:
|
|
|
|
CondCode = AArch64CC::LT;
|
|
|
|
break;
|
|
|
|
case CmpInst::FCMP_ULE:
|
|
|
|
CondCode = AArch64CC::LE;
|
|
|
|
break;
|
|
|
|
case CmpInst::FCMP_UNE:
|
|
|
|
CondCode = AArch64CC::NE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-28 00:35:31 +08:00
|
|
|
bool AArch64InstructionSelector::selectCompareBranch(
|
|
|
|
MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
|
|
|
|
|
|
|
|
const unsigned CondReg = I.getOperand(0).getReg();
|
|
|
|
MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
|
|
|
|
MachineInstr *CCMI = MRI.getVRegDef(CondReg);
|
2017-08-01 01:00:16 +08:00
|
|
|
if (CCMI->getOpcode() == TargetOpcode::G_TRUNC)
|
|
|
|
CCMI = MRI.getVRegDef(CCMI->getOperand(1).getReg());
|
2017-03-28 00:35:31 +08:00
|
|
|
if (CCMI->getOpcode() != TargetOpcode::G_ICMP)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned LHS = CCMI->getOperand(2).getReg();
|
|
|
|
unsigned RHS = CCMI->getOperand(3).getReg();
|
|
|
|
if (!getConstantVRegVal(RHS, MRI))
|
|
|
|
std::swap(RHS, LHS);
|
|
|
|
|
|
|
|
const auto RHSImm = getConstantVRegVal(RHS, MRI);
|
|
|
|
if (!RHSImm || *RHSImm != 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const RegisterBank &RB = *RBI.getRegBank(LHS, MRI, TRI);
|
|
|
|
if (RB.getID() != AArch64::GPRRegBankID)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const auto Pred = (CmpInst::Predicate)CCMI->getOperand(1).getPredicate();
|
|
|
|
if (Pred != CmpInst::ICMP_NE && Pred != CmpInst::ICMP_EQ)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const unsigned CmpWidth = MRI.getType(LHS).getSizeInBits();
|
|
|
|
unsigned CBOpc = 0;
|
|
|
|
if (CmpWidth <= 32)
|
|
|
|
CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZW : AArch64::CBNZW);
|
|
|
|
else if (CmpWidth == 64)
|
|
|
|
CBOpc = (Pred == CmpInst::ICMP_EQ ? AArch64::CBZX : AArch64::CBNZX);
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
|
|
|
|
auto MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(CBOpc))
|
|
|
|
.addUse(LHS)
|
|
|
|
.addMBB(DestMBB);
|
|
|
|
|
|
|
|
constrainSelectedInstRegOperands(*MIB.getInstr(), TII, TRI, RBI);
|
|
|
|
I.eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-02-09 01:57:27 +08:00
|
|
|
bool AArch64InstructionSelector::selectVaStartAAPCS(
|
|
|
|
MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool AArch64InstructionSelector::selectVaStartDarwin(
|
|
|
|
MachineInstr &I, MachineFunction &MF, MachineRegisterInfo &MRI) const {
|
|
|
|
AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
|
|
|
|
unsigned ListReg = I.getOperand(0).getReg();
|
|
|
|
|
|
|
|
unsigned ArgsAddrReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
|
|
|
|
|
|
|
|
auto MIB =
|
|
|
|
BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::ADDXri))
|
|
|
|
.addDef(ArgsAddrReg)
|
|
|
|
.addFrameIndex(FuncInfo->getVarArgsStackIndex())
|
|
|
|
.addImm(0)
|
|
|
|
.addImm(0);
|
|
|
|
|
|
|
|
constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
|
|
|
|
|
|
|
|
MIB = BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AArch64::STRXui))
|
|
|
|
.addUse(ArgsAddrReg)
|
|
|
|
.addUse(ListReg)
|
|
|
|
.addImm(0)
|
|
|
|
.addMemOperand(*I.memoperands_begin());
|
|
|
|
|
|
|
|
constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
|
|
|
|
I.eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-07-27 22:31:55 +08:00
|
|
|
bool AArch64InstructionSelector::select(MachineInstr &I) const {
|
|
|
|
assert(I.getParent() && "Instruction should be in a basic block!");
|
|
|
|
assert(I.getParent()->getParent() && "Instruction should be in a function!");
|
|
|
|
|
|
|
|
MachineBasicBlock &MBB = *I.getParent();
|
|
|
|
MachineFunction &MF = *MBB.getParent();
|
|
|
|
MachineRegisterInfo &MRI = MF.getRegInfo();
|
|
|
|
|
2016-11-01 02:30:59 +08:00
|
|
|
unsigned Opcode = I.getOpcode();
|
2017-08-24 04:45:48 +08:00
|
|
|
// G_PHI requires same handling as PHI
|
|
|
|
if (!isPreISelGenericOpcode(Opcode) || Opcode == TargetOpcode::G_PHI) {
|
2016-11-01 02:30:59 +08:00
|
|
|
// Certain non-generic instructions also need some special handling.
|
|
|
|
|
|
|
|
if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
|
|
|
|
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
|
2016-11-08 08:34:06 +08:00
|
|
|
|
2017-08-24 04:45:48 +08:00
|
|
|
if (Opcode == TargetOpcode::PHI || Opcode == TargetOpcode::G_PHI) {
|
2016-11-08 08:34:06 +08:00
|
|
|
const unsigned DefReg = I.getOperand(0).getReg();
|
|
|
|
const LLT DefTy = MRI.getType(DefReg);
|
|
|
|
|
|
|
|
const TargetRegisterClass *DefRC = nullptr;
|
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(DefReg)) {
|
|
|
|
DefRC = TRI.getRegClass(DefReg);
|
|
|
|
} else {
|
|
|
|
const RegClassOrRegBank &RegClassOrBank =
|
|
|
|
MRI.getRegClassOrRegBank(DefReg);
|
|
|
|
|
|
|
|
DefRC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
|
|
|
|
if (!DefRC) {
|
|
|
|
if (!DefTy.isValid()) {
|
|
|
|
DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
|
|
|
|
DefRC = getRegClassForTypeOnBank(DefTy, RB, RBI);
|
|
|
|
if (!DefRC) {
|
|
|
|
DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-08-24 04:45:48 +08:00
|
|
|
I.setDesc(TII.get(TargetOpcode::PHI));
|
2016-11-08 08:34:06 +08:00
|
|
|
|
|
|
|
return RBI.constrainGenericRegister(DefReg, *DefRC, MRI);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (I.isCopy())
|
2016-11-01 02:30:59 +08:00
|
|
|
return selectCopy(I, TII, MRI, TRI, RBI);
|
2016-11-08 08:34:06 +08:00
|
|
|
|
|
|
|
return true;
|
2016-11-01 02:30:59 +08:00
|
|
|
}
|
|
|
|
|
2016-07-27 22:31:55 +08:00
|
|
|
|
|
|
|
if (I.getNumOperands() != I.getNumExplicitOperands()) {
|
|
|
|
DEBUG(dbgs() << "Generic instruction has unexpected implicit operands\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-12-22 07:26:20 +08:00
|
|
|
if (selectImpl(I))
|
|
|
|
return true;
|
|
|
|
|
2016-09-15 18:09:59 +08:00
|
|
|
LLT Ty =
|
|
|
|
I.getOperand(0).isReg() ? MRI.getType(I.getOperand(0).getReg()) : LLT{};
|
2016-07-27 22:31:55 +08:00
|
|
|
|
2016-10-13 06:49:11 +08:00
|
|
|
switch (Opcode) {
|
2016-10-13 06:49:01 +08:00
|
|
|
case TargetOpcode::G_BRCOND: {
|
|
|
|
if (Ty.getSizeInBits() > 32) {
|
|
|
|
// We shouldn't need this on AArch64, but it would be implemented as an
|
|
|
|
// EXTRACT_SUBREG followed by a TBNZW because TBNZX has no encoding if the
|
|
|
|
// bit being tested is < 32.
|
|
|
|
DEBUG(dbgs() << "G_BRCOND has type: " << Ty
|
|
|
|
<< ", expected at most 32-bits");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
const unsigned CondReg = I.getOperand(0).getReg();
|
|
|
|
MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
|
|
|
|
|
2017-03-28 00:35:31 +08:00
|
|
|
if (selectCompareBranch(I, MF, MRI))
|
|
|
|
return true;
|
|
|
|
|
2016-10-13 06:49:01 +08:00
|
|
|
auto MIB = BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::TBNZW))
|
|
|
|
.addUse(CondReg)
|
|
|
|
.addImm(/*bit offset=*/0)
|
|
|
|
.addMBB(DestMBB);
|
|
|
|
|
|
|
|
I.eraseFromParent();
|
|
|
|
return constrainSelectedInstRegOperands(*MIB.getInstr(), TII, TRI, RBI);
|
|
|
|
}
|
|
|
|
|
2017-01-30 17:13:18 +08:00
|
|
|
case TargetOpcode::G_BRINDIRECT: {
|
|
|
|
I.setDesc(TII.get(AArch64::BR));
|
|
|
|
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
|
|
|
|
}
|
|
|
|
|
2016-10-19 03:47:57 +08:00
|
|
|
case TargetOpcode::G_FCONSTANT:
|
2016-10-11 05:49:42 +08:00
|
|
|
case TargetOpcode::G_CONSTANT: {
|
2016-10-19 03:47:57 +08:00
|
|
|
const bool isFP = Opcode == TargetOpcode::G_FCONSTANT;
|
|
|
|
|
|
|
|
const LLT s32 = LLT::scalar(32);
|
|
|
|
const LLT s64 = LLT::scalar(64);
|
|
|
|
const LLT p0 = LLT::pointer(0, 64);
|
|
|
|
|
|
|
|
const unsigned DefReg = I.getOperand(0).getReg();
|
|
|
|
const LLT DefTy = MRI.getType(DefReg);
|
|
|
|
const unsigned DefSize = DefTy.getSizeInBits();
|
|
|
|
const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
|
|
|
|
|
|
|
|
// FIXME: Redundant check, but even less readable when factored out.
|
|
|
|
if (isFP) {
|
|
|
|
if (Ty != s32 && Ty != s64) {
|
|
|
|
DEBUG(dbgs() << "Unable to materialize FP " << Ty
|
|
|
|
<< " constant, expected: " << s32 << " or " << s64
|
|
|
|
<< '\n');
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (RB.getID() != AArch64::FPRRegBankID) {
|
|
|
|
DEBUG(dbgs() << "Unable to materialize FP " << Ty
|
|
|
|
<< " constant on bank: " << RB << ", expected: FPR\n");
|
|
|
|
return false;
|
|
|
|
}
|
[globalisel][tablegen] Add support for fpimm and import of APInt/APFloat based ImmLeaf.
Summary:
There's only a tablegen testcase for IntImmLeaf and not a CodeGen one
because the relevant rules are rejected for other reasons at the moment.
On AArch64, it's because there's an SDNodeXForm attached to the operand.
On X86, it's because the rule either emits multiple instructions or has
another predicate using PatFrag which cannot easily be supported at the
same time.
Reviewers: ab, t.p.northover, qcolombet, rovka, aditya_nandakumar
Reviewed By: qcolombet
Subscribers: aemerson, javed.absar, igorb, llvm-commits, kristof.beyls
Differential Revision: https://reviews.llvm.org/D36569
llvm-svn: 315761
2017-10-14 05:28:03 +08:00
|
|
|
|
|
|
|
// The case when we have 0.0 is covered by tablegen. Reject it here so we
|
|
|
|
// can be sure tablegen works correctly and isn't rescued by this code.
|
|
|
|
if (I.getOperand(1).getFPImm()->getValueAPF().isExactlyValue(0.0))
|
|
|
|
return false;
|
2016-10-19 03:47:57 +08:00
|
|
|
} else {
|
2017-08-08 18:44:31 +08:00
|
|
|
// s32 and s64 are covered by tablegen.
|
|
|
|
if (Ty != p0) {
|
2016-10-19 03:47:57 +08:00
|
|
|
DEBUG(dbgs() << "Unable to materialize integer " << Ty
|
|
|
|
<< " constant, expected: " << s32 << ", " << s64 << ", or "
|
|
|
|
<< p0 << '\n');
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (RB.getID() != AArch64::GPRRegBankID) {
|
|
|
|
DEBUG(dbgs() << "Unable to materialize integer " << Ty
|
|
|
|
<< " constant on bank: " << RB << ", expected: GPR\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const unsigned MovOpc =
|
|
|
|
DefSize == 32 ? AArch64::MOVi32imm : AArch64::MOVi64imm;
|
|
|
|
|
|
|
|
I.setDesc(TII.get(MovOpc));
|
|
|
|
|
|
|
|
if (isFP) {
|
|
|
|
const TargetRegisterClass &GPRRC =
|
|
|
|
DefSize == 32 ? AArch64::GPR32RegClass : AArch64::GPR64RegClass;
|
|
|
|
const TargetRegisterClass &FPRRC =
|
|
|
|
DefSize == 32 ? AArch64::FPR32RegClass : AArch64::FPR64RegClass;
|
|
|
|
|
|
|
|
const unsigned DefGPRReg = MRI.createVirtualRegister(&GPRRC);
|
|
|
|
MachineOperand &RegOp = I.getOperand(0);
|
|
|
|
RegOp.setReg(DefGPRReg);
|
|
|
|
|
|
|
|
BuildMI(MBB, std::next(I.getIterator()), I.getDebugLoc(),
|
|
|
|
TII.get(AArch64::COPY))
|
|
|
|
.addDef(DefReg)
|
|
|
|
.addUse(DefGPRReg);
|
|
|
|
|
|
|
|
if (!RBI.constrainGenericRegister(DefReg, FPRRC, MRI)) {
|
|
|
|
DEBUG(dbgs() << "Failed to constrain G_FCONSTANT def operand\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
MachineOperand &ImmOp = I.getOperand(1);
|
|
|
|
// FIXME: Is going through int64_t always correct?
|
|
|
|
ImmOp.ChangeToImmediate(
|
|
|
|
ImmOp.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
|
[globalisel] Decouple src pattern operands from dst pattern operands.
Summary:
This isn't testable for AArch64 by itself so this patch also adds
support for constant immediates in the pattern and physical
register uses in the result.
The new IntOperandMatcher matches the constant in patterns such as
'(set $rd:GPR32, (G_XOR $rs:GPR32, -1))'. It's always safe to fold
immediates into an instruction so this is the first rule that will match
across multiple BB's.
The Renderer hierarchy is responsible for adding operands to the result
instruction. Renderers can copy operands (CopyRenderer) or add physical
registers (in particular %wzr and %xzr) to the result instruction
in any order (OperandMatchers now import the operand names from
SelectionDAG to allow renderers to access any operand). This allows us to
emit the result instruction for:
%1 = G_XOR %0, -1 --> %1 = ORNWrr %wzr, %0
%1 = G_XOR -1, %0 --> %1 = ORNWrr %wzr, %0
although the latter is untested since the matcher/importer has not been
taught about commutativity yet.
Added BuildMIAction which can build new instructions and mutate them where
possible. W.r.t the mutation aspect, MatchActions are now told the name of
an instruction they can recycle and BuildMIAction will emit mutation code
when the renderers are appropriate. They are appropriate when all operands
are rendered using CopyRenderer and the indices are the same as the matcher.
This currently assumes that all operands have at least one matcher.
Finally, this change also fixes a crash in
AArch64InstructionSelector::select() caused by an immediate operand
passing isImm() rather than isCImm(). This was uncovered by the other
changes and was detected by existing tests.
Depends on D29711
Reviewers: t.p.northover, ab, qcolombet, rovka, aditya_nandakumar, javed.absar
Reviewed By: rovka
Subscribers: aemerson, dberris, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D29712
llvm-svn: 296131
2017-02-24 23:43:30 +08:00
|
|
|
} else if (I.getOperand(1).isCImm()) {
|
2016-12-06 05:47:07 +08:00
|
|
|
uint64_t Val = I.getOperand(1).getCImm()->getZExtValue();
|
|
|
|
I.getOperand(1).ChangeToImmediate(Val);
|
[globalisel] Decouple src pattern operands from dst pattern operands.
Summary:
This isn't testable for AArch64 by itself so this patch also adds
support for constant immediates in the pattern and physical
register uses in the result.
The new IntOperandMatcher matches the constant in patterns such as
'(set $rd:GPR32, (G_XOR $rs:GPR32, -1))'. It's always safe to fold
immediates into an instruction so this is the first rule that will match
across multiple BB's.
The Renderer hierarchy is responsible for adding operands to the result
instruction. Renderers can copy operands (CopyRenderer) or add physical
registers (in particular %wzr and %xzr) to the result instruction
in any order (OperandMatchers now import the operand names from
SelectionDAG to allow renderers to access any operand). This allows us to
emit the result instruction for:
%1 = G_XOR %0, -1 --> %1 = ORNWrr %wzr, %0
%1 = G_XOR -1, %0 --> %1 = ORNWrr %wzr, %0
although the latter is untested since the matcher/importer has not been
taught about commutativity yet.
Added BuildMIAction which can build new instructions and mutate them where
possible. W.r.t the mutation aspect, MatchActions are now told the name of
an instruction they can recycle and BuildMIAction will emit mutation code
when the renderers are appropriate. They are appropriate when all operands
are rendered using CopyRenderer and the indices are the same as the matcher.
This currently assumes that all operands have at least one matcher.
Finally, this change also fixes a crash in
AArch64InstructionSelector::select() caused by an immediate operand
passing isImm() rather than isCImm(). This was uncovered by the other
changes and was detected by existing tests.
Depends on D29711
Reviewers: t.p.northover, ab, qcolombet, rovka, aditya_nandakumar, javed.absar
Reviewed By: rovka
Subscribers: aemerson, dberris, kristof.beyls, llvm-commits
Differential Revision: https://reviews.llvm.org/D29712
llvm-svn: 296131
2017-02-24 23:43:30 +08:00
|
|
|
} else if (I.getOperand(1).isImm()) {
|
|
|
|
uint64_t Val = I.getOperand(1).getImm();
|
|
|
|
I.getOperand(1).ChangeToImmediate(Val);
|
2016-10-19 03:47:57 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
constrainSelectedInstRegOperands(I, TII, TRI, RBI);
|
|
|
|
return true;
|
2016-10-11 05:49:42 +08:00
|
|
|
}
|
2017-07-21 06:58:38 +08:00
|
|
|
case TargetOpcode::G_EXTRACT: {
|
|
|
|
LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
|
|
|
|
// Larger extracts are vectors, same-size extracts should be something else
|
|
|
|
// by now (either split up or simplified to a COPY).
|
|
|
|
if (SrcTy.getSizeInBits() > 64 || Ty.getSizeInBits() > 32)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
I.setDesc(TII.get(AArch64::UBFMXri));
|
|
|
|
MachineInstrBuilder(MF, I).addImm(I.getOperand(2).getImm() +
|
|
|
|
Ty.getSizeInBits() - 1);
|
|
|
|
|
|
|
|
unsigned DstReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
|
|
|
|
BuildMI(MBB, std::next(I.getIterator()), I.getDebugLoc(),
|
|
|
|
TII.get(AArch64::COPY))
|
|
|
|
.addDef(I.getOperand(0).getReg())
|
|
|
|
.addUse(DstReg, 0, AArch64::sub_32);
|
|
|
|
RBI.constrainGenericRegister(I.getOperand(0).getReg(),
|
|
|
|
AArch64::GPR32RegClass, MRI);
|
|
|
|
I.getOperand(0).setReg(DstReg);
|
|
|
|
|
|
|
|
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
|
|
|
|
}
|
|
|
|
|
|
|
|
case TargetOpcode::G_INSERT: {
|
|
|
|
LLT SrcTy = MRI.getType(I.getOperand(2).getReg());
|
|
|
|
// Larger inserts are vectors, same-size ones should be something else by
|
|
|
|
// now (split up or turned into COPYs).
|
|
|
|
if (Ty.getSizeInBits() > 64 || SrcTy.getSizeInBits() > 32)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
I.setDesc(TII.get(AArch64::BFMXri));
|
|
|
|
unsigned LSB = I.getOperand(3).getImm();
|
|
|
|
unsigned Width = MRI.getType(I.getOperand(2).getReg()).getSizeInBits();
|
|
|
|
I.getOperand(3).setImm((64 - LSB) % 64);
|
|
|
|
MachineInstrBuilder(MF, I).addImm(Width - 1);
|
|
|
|
|
|
|
|
unsigned SrcReg = MRI.createGenericVirtualRegister(LLT::scalar(64));
|
|
|
|
BuildMI(MBB, I.getIterator(), I.getDebugLoc(),
|
|
|
|
TII.get(AArch64::SUBREG_TO_REG))
|
|
|
|
.addDef(SrcReg)
|
|
|
|
.addImm(0)
|
|
|
|
.addUse(I.getOperand(2).getReg())
|
|
|
|
.addImm(AArch64::sub_32);
|
|
|
|
RBI.constrainGenericRegister(I.getOperand(2).getReg(),
|
|
|
|
AArch64::GPR32RegClass, MRI);
|
|
|
|
I.getOperand(2).setReg(SrcReg);
|
|
|
|
|
|
|
|
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
|
|
|
|
}
|
2016-08-16 22:02:42 +08:00
|
|
|
case TargetOpcode::G_FRAME_INDEX: {
|
|
|
|
// allocas and G_FRAME_INDEX are only supported in addrspace(0).
|
2016-09-15 17:20:34 +08:00
|
|
|
if (Ty != LLT::pointer(0, 64)) {
|
2016-09-09 19:46:34 +08:00
|
|
|
DEBUG(dbgs() << "G_FRAME_INDEX pointer has type: " << Ty
|
2016-09-15 17:20:34 +08:00
|
|
|
<< ", expected: " << LLT::pointer(0, 64) << '\n');
|
2016-08-16 22:02:42 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
I.setDesc(TII.get(AArch64::ADDXri));
|
|
|
|
|
|
|
|
// MOs for a #0 shifted immediate.
|
|
|
|
I.addOperand(MachineOperand::CreateImm(0));
|
|
|
|
I.addOperand(MachineOperand::CreateImm(0));
|
|
|
|
|
|
|
|
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
|
|
|
|
}
|
2016-10-11 05:50:00 +08:00
|
|
|
|
|
|
|
case TargetOpcode::G_GLOBAL_VALUE: {
|
|
|
|
auto GV = I.getOperand(1).getGlobal();
|
|
|
|
if (GV->isThreadLocal()) {
|
|
|
|
// FIXME: we don't support TLS yet.
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
unsigned char OpFlags = STI.ClassifyGlobalReference(GV, TM);
|
2016-12-14 02:25:38 +08:00
|
|
|
if (OpFlags & AArch64II::MO_GOT) {
|
2016-10-11 05:50:00 +08:00
|
|
|
I.setDesc(TII.get(AArch64::LOADgot));
|
2016-12-14 02:25:38 +08:00
|
|
|
I.getOperand(1).setTargetFlags(OpFlags);
|
|
|
|
} else {
|
2016-10-11 05:50:00 +08:00
|
|
|
I.setDesc(TII.get(AArch64::MOVaddr));
|
|
|
|
I.getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_PAGE);
|
|
|
|
MachineInstrBuilder MIB(MF, I);
|
|
|
|
MIB.addGlobalAddress(GV, I.getOperand(1).getOffset(),
|
|
|
|
OpFlags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
|
|
|
|
}
|
|
|
|
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
|
|
|
|
}
|
|
|
|
|
2016-07-30 00:56:16 +08:00
|
|
|
case TargetOpcode::G_LOAD:
|
|
|
|
case TargetOpcode::G_STORE: {
|
2016-09-09 19:46:34 +08:00
|
|
|
LLT MemTy = Ty;
|
|
|
|
LLT PtrTy = MRI.getType(I.getOperand(1).getReg());
|
2016-07-30 00:56:16 +08:00
|
|
|
|
2016-09-15 17:20:34 +08:00
|
|
|
if (PtrTy != LLT::pointer(0, 64)) {
|
2016-07-30 00:56:16 +08:00
|
|
|
DEBUG(dbgs() << "Load/Store pointer has type: " << PtrTy
|
2016-09-15 17:20:34 +08:00
|
|
|
<< ", expected: " << LLT::pointer(0, 64) << '\n');
|
2016-07-30 00:56:16 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-02-14 06:14:16 +08:00
|
|
|
auto &MemOp = **I.memoperands_begin();
|
|
|
|
if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
|
|
|
|
DEBUG(dbgs() << "Atomic load/store not supported yet\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-07-30 00:56:16 +08:00
|
|
|
const unsigned PtrReg = I.getOperand(1).getReg();
|
2017-03-28 02:14:20 +08:00
|
|
|
#ifndef NDEBUG
|
2016-07-30 00:56:16 +08:00
|
|
|
const RegisterBank &PtrRB = *RBI.getRegBank(PtrReg, MRI, TRI);
|
2017-03-28 02:14:20 +08:00
|
|
|
// Sanity-check the pointer register.
|
2016-07-30 00:56:16 +08:00
|
|
|
assert(PtrRB.getID() == AArch64::GPRRegBankID &&
|
|
|
|
"Load/Store pointer operand isn't a GPR");
|
2016-09-09 19:46:34 +08:00
|
|
|
assert(MRI.getType(PtrReg).isPointer() &&
|
|
|
|
"Load/Store pointer operand isn't a pointer");
|
2016-07-30 00:56:16 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
const unsigned ValReg = I.getOperand(0).getReg();
|
|
|
|
const RegisterBank &RB = *RBI.getRegBank(ValReg, MRI, TRI);
|
|
|
|
|
|
|
|
const unsigned NewOpc =
|
|
|
|
selectLoadStoreUIOp(I.getOpcode(), RB.getID(), MemTy.getSizeInBits());
|
|
|
|
if (NewOpc == I.getOpcode())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
I.setDesc(TII.get(NewOpc));
|
|
|
|
|
2017-03-28 01:31:52 +08:00
|
|
|
uint64_t Offset = 0;
|
|
|
|
auto *PtrMI = MRI.getVRegDef(PtrReg);
|
|
|
|
|
|
|
|
// Try to fold a GEP into our unsigned immediate addressing mode.
|
|
|
|
if (PtrMI->getOpcode() == TargetOpcode::G_GEP) {
|
|
|
|
if (auto COff = getConstantVRegVal(PtrMI->getOperand(2).getReg(), MRI)) {
|
|
|
|
int64_t Imm = *COff;
|
|
|
|
const unsigned Size = MemTy.getSizeInBits() / 8;
|
|
|
|
const unsigned Scale = Log2_32(Size);
|
|
|
|
if ((Imm & (Size - 1)) == 0 && Imm >= 0 && Imm < (0x1000 << Scale)) {
|
|
|
|
unsigned Ptr2Reg = PtrMI->getOperand(1).getReg();
|
|
|
|
I.getOperand(1).setReg(Ptr2Reg);
|
|
|
|
PtrMI = MRI.getVRegDef(Ptr2Reg);
|
|
|
|
Offset = Imm / Size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-28 01:31:56 +08:00
|
|
|
// If we haven't folded anything into our addressing mode yet, try to fold
|
|
|
|
// a frame index into the base+offset.
|
|
|
|
if (!Offset && PtrMI->getOpcode() == TargetOpcode::G_FRAME_INDEX)
|
|
|
|
I.getOperand(1).ChangeToFrameIndex(PtrMI->getOperand(1).getIndex());
|
|
|
|
|
2017-03-28 01:31:52 +08:00
|
|
|
I.addOperand(MachineOperand::CreateImm(Offset));
|
2017-03-28 01:31:48 +08:00
|
|
|
|
|
|
|
// If we're storing a 0, use WZR/XZR.
|
|
|
|
if (auto CVal = getConstantVRegVal(ValReg, MRI)) {
|
|
|
|
if (*CVal == 0 && Opcode == TargetOpcode::G_STORE) {
|
|
|
|
if (I.getOpcode() == AArch64::STRWui)
|
|
|
|
I.getOperand(0).setReg(AArch64::WZR);
|
|
|
|
else if (I.getOpcode() == AArch64::STRXui)
|
|
|
|
I.getOperand(0).setReg(AArch64::XZR);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-30 00:56:16 +08:00
|
|
|
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
|
2016-08-16 22:37:46 +08:00
|
|
|
}
|
|
|
|
|
2017-02-09 05:22:25 +08:00
|
|
|
case TargetOpcode::G_SMULH:
|
|
|
|
case TargetOpcode::G_UMULH: {
|
|
|
|
// Reject the various things we don't support yet.
|
|
|
|
if (unsupportedBinOp(I, RBI, MRI, TRI))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const unsigned DefReg = I.getOperand(0).getReg();
|
|
|
|
const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
|
|
|
|
|
|
|
|
if (RB.getID() != AArch64::GPRRegBankID) {
|
|
|
|
DEBUG(dbgs() << "G_[SU]MULH on bank: " << RB << ", expected: GPR\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (Ty != LLT::scalar(64)) {
|
|
|
|
DEBUG(dbgs() << "G_[SU]MULH has type: " << Ty
|
|
|
|
<< ", expected: " << LLT::scalar(64) << '\n');
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned NewOpc = I.getOpcode() == TargetOpcode::G_SMULH ? AArch64::SMULHrr
|
|
|
|
: AArch64::UMULHrr;
|
|
|
|
I.setDesc(TII.get(NewOpc));
|
|
|
|
|
|
|
|
// Now that we selected an opcode, we need to constrain the register
|
|
|
|
// operands to use appropriate classes.
|
|
|
|
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
|
|
|
|
}
|
2016-08-19 00:05:11 +08:00
|
|
|
case TargetOpcode::G_FADD:
|
|
|
|
case TargetOpcode::G_FSUB:
|
|
|
|
case TargetOpcode::G_FMUL:
|
|
|
|
case TargetOpcode::G_FDIV:
|
|
|
|
|
2016-07-27 22:31:55 +08:00
|
|
|
case TargetOpcode::G_OR:
|
2016-08-16 22:02:47 +08:00
|
|
|
case TargetOpcode::G_SHL:
|
|
|
|
case TargetOpcode::G_LSHR:
|
|
|
|
case TargetOpcode::G_ASHR:
|
2016-10-11 05:49:49 +08:00
|
|
|
case TargetOpcode::G_GEP: {
|
2016-08-16 22:37:40 +08:00
|
|
|
// Reject the various things we don't support yet.
|
|
|
|
if (unsupportedBinOp(I, RBI, MRI, TRI))
|
2016-07-29 01:15:15 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
const unsigned OpSize = Ty.getSizeInBits();
|
|
|
|
|
2016-07-27 22:31:55 +08:00
|
|
|
const unsigned DefReg = I.getOperand(0).getReg();
|
|
|
|
const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
|
|
|
|
|
|
|
|
const unsigned NewOpc = selectBinaryOp(I.getOpcode(), RB.getID(), OpSize);
|
|
|
|
if (NewOpc == I.getOpcode())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
I.setDesc(TII.get(NewOpc));
|
|
|
|
// FIXME: Should the type be always reset in setDesc?
|
|
|
|
|
|
|
|
// Now that we selected an opcode, we need to constrain the register
|
|
|
|
// operands to use appropriate classes.
|
|
|
|
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
|
|
|
|
}
|
2016-10-12 04:50:21 +08:00
|
|
|
|
2017-02-15 04:56:29 +08:00
|
|
|
case TargetOpcode::G_PTR_MASK: {
|
|
|
|
uint64_t Align = I.getOperand(2).getImm();
|
|
|
|
if (Align >= 64 || Align == 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
uint64_t Mask = ~((1ULL << Align) - 1);
|
|
|
|
I.setDesc(TII.get(AArch64::ANDXri));
|
|
|
|
I.getOperand(2).setImm(AArch64_AM::encodeLogicalImmediate(Mask, 64));
|
|
|
|
|
|
|
|
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
|
|
|
|
}
|
2016-11-01 02:31:09 +08:00
|
|
|
case TargetOpcode::G_PTRTOINT:
|
2016-10-13 06:49:15 +08:00
|
|
|
case TargetOpcode::G_TRUNC: {
|
|
|
|
const LLT DstTy = MRI.getType(I.getOperand(0).getReg());
|
|
|
|
const LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
|
|
|
|
|
|
|
|
const unsigned DstReg = I.getOperand(0).getReg();
|
|
|
|
const unsigned SrcReg = I.getOperand(1).getReg();
|
|
|
|
|
|
|
|
const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
|
|
|
|
const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
|
|
|
|
|
|
|
|
if (DstRB.getID() != SrcRB.getID()) {
|
2017-06-27 18:11:39 +08:00
|
|
|
DEBUG(dbgs() << "G_TRUNC/G_PTRTOINT input/output on different banks\n");
|
2016-10-13 06:49:15 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (DstRB.getID() == AArch64::GPRRegBankID) {
|
|
|
|
const TargetRegisterClass *DstRC =
|
|
|
|
getRegClassForTypeOnBank(DstTy, DstRB, RBI);
|
|
|
|
if (!DstRC)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const TargetRegisterClass *SrcRC =
|
|
|
|
getRegClassForTypeOnBank(SrcTy, SrcRB, RBI);
|
|
|
|
if (!SrcRC)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
|
|
|
|
!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
|
2017-06-27 18:11:39 +08:00
|
|
|
DEBUG(dbgs() << "Failed to constrain G_TRUNC/G_PTRTOINT\n");
|
2016-10-13 06:49:15 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (DstRC == SrcRC) {
|
|
|
|
// Nothing to be done
|
2017-06-27 18:11:39 +08:00
|
|
|
} else if (Opcode == TargetOpcode::G_TRUNC && DstTy == LLT::scalar(32) &&
|
|
|
|
SrcTy == LLT::scalar(64)) {
|
|
|
|
llvm_unreachable("TableGen can import this case");
|
|
|
|
return false;
|
2016-10-13 06:49:15 +08:00
|
|
|
} else if (DstRC == &AArch64::GPR32RegClass &&
|
|
|
|
SrcRC == &AArch64::GPR64RegClass) {
|
|
|
|
I.getOperand(1).setSubReg(AArch64::sub_32);
|
|
|
|
} else {
|
2017-06-27 18:11:39 +08:00
|
|
|
DEBUG(dbgs() << "Unhandled mismatched classes in G_TRUNC/G_PTRTOINT\n");
|
2016-10-13 06:49:15 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
I.setDesc(TII.get(TargetOpcode::COPY));
|
|
|
|
return true;
|
|
|
|
} else if (DstRB.getID() == AArch64::FPRRegBankID) {
|
|
|
|
if (DstTy == LLT::vector(4, 16) && SrcTy == LLT::vector(4, 32)) {
|
|
|
|
I.setDesc(TII.get(AArch64::XTNv4i16));
|
|
|
|
constrainSelectedInstRegOperands(I, TII, TRI, RBI);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-10-12 04:50:21 +08:00
|
|
|
case TargetOpcode::G_ANYEXT: {
|
|
|
|
const unsigned DstReg = I.getOperand(0).getReg();
|
|
|
|
const unsigned SrcReg = I.getOperand(1).getReg();
|
|
|
|
|
2016-10-12 11:57:49 +08:00
|
|
|
const RegisterBank &RBDst = *RBI.getRegBank(DstReg, MRI, TRI);
|
|
|
|
if (RBDst.getID() != AArch64::GPRRegBankID) {
|
|
|
|
DEBUG(dbgs() << "G_ANYEXT on bank: " << RBDst << ", expected: GPR\n");
|
|
|
|
return false;
|
|
|
|
}
|
2016-10-12 04:50:21 +08:00
|
|
|
|
2016-10-12 11:57:49 +08:00
|
|
|
const RegisterBank &RBSrc = *RBI.getRegBank(SrcReg, MRI, TRI);
|
|
|
|
if (RBSrc.getID() != AArch64::GPRRegBankID) {
|
|
|
|
DEBUG(dbgs() << "G_ANYEXT on bank: " << RBSrc << ", expected: GPR\n");
|
2016-10-12 04:50:21 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
|
|
|
|
|
|
|
|
if (DstSize == 0) {
|
|
|
|
DEBUG(dbgs() << "G_ANYEXT operand has no size, not a gvreg?\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-10-12 11:57:49 +08:00
|
|
|
if (DstSize != 64 && DstSize > 32) {
|
2016-10-12 04:50:21 +08:00
|
|
|
DEBUG(dbgs() << "G_ANYEXT to size: " << DstSize
|
|
|
|
<< ", expected: 32 or 64\n");
|
|
|
|
return false;
|
|
|
|
}
|
2016-10-12 11:57:49 +08:00
|
|
|
// At this point G_ANYEXT is just like a plain COPY, but we need
|
|
|
|
// to explicitly form the 64-bit value if any.
|
|
|
|
if (DstSize > 32) {
|
|
|
|
unsigned ExtSrc = MRI.createVirtualRegister(&AArch64::GPR64allRegClass);
|
|
|
|
BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
|
|
|
|
.addDef(ExtSrc)
|
|
|
|
.addImm(0)
|
|
|
|
.addUse(SrcReg)
|
|
|
|
.addImm(AArch64::sub_32);
|
|
|
|
I.getOperand(1).setReg(ExtSrc);
|
2016-10-12 04:50:21 +08:00
|
|
|
}
|
2016-10-12 11:57:49 +08:00
|
|
|
return selectCopy(I, TII, MRI, TRI, RBI);
|
2016-10-12 04:50:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
case TargetOpcode::G_ZEXT:
|
|
|
|
case TargetOpcode::G_SEXT: {
|
|
|
|
unsigned Opcode = I.getOpcode();
|
|
|
|
const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
|
|
|
|
SrcTy = MRI.getType(I.getOperand(1).getReg());
|
|
|
|
const bool isSigned = Opcode == TargetOpcode::G_SEXT;
|
|
|
|
const unsigned DefReg = I.getOperand(0).getReg();
|
|
|
|
const unsigned SrcReg = I.getOperand(1).getReg();
|
|
|
|
const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
|
|
|
|
|
|
|
|
if (RB.getID() != AArch64::GPRRegBankID) {
|
|
|
|
DEBUG(dbgs() << TII.getName(I.getOpcode()) << " on bank: " << RB
|
|
|
|
<< ", expected: GPR\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
MachineInstr *ExtI;
|
|
|
|
if (DstTy == LLT::scalar(64)) {
|
|
|
|
// FIXME: Can we avoid manually doing this?
|
|
|
|
if (!RBI.constrainGenericRegister(SrcReg, AArch64::GPR32RegClass, MRI)) {
|
|
|
|
DEBUG(dbgs() << "Failed to constrain " << TII.getName(Opcode)
|
|
|
|
<< " operand\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
const unsigned SrcXReg =
|
|
|
|
MRI.createVirtualRegister(&AArch64::GPR64RegClass);
|
|
|
|
BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::SUBREG_TO_REG))
|
|
|
|
.addDef(SrcXReg)
|
|
|
|
.addImm(0)
|
|
|
|
.addUse(SrcReg)
|
|
|
|
.addImm(AArch64::sub_32);
|
|
|
|
|
|
|
|
const unsigned NewOpc = isSigned ? AArch64::SBFMXri : AArch64::UBFMXri;
|
|
|
|
ExtI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
|
|
|
|
.addDef(DefReg)
|
|
|
|
.addUse(SrcXReg)
|
|
|
|
.addImm(0)
|
|
|
|
.addImm(SrcTy.getSizeInBits() - 1);
|
2016-11-10 06:39:54 +08:00
|
|
|
} else if (DstTy.isScalar() && DstTy.getSizeInBits() <= 32) {
|
2016-10-12 04:50:21 +08:00
|
|
|
const unsigned NewOpc = isSigned ? AArch64::SBFMWri : AArch64::UBFMWri;
|
|
|
|
ExtI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
|
|
|
|
.addDef(DefReg)
|
|
|
|
.addUse(SrcReg)
|
|
|
|
.addImm(0)
|
|
|
|
.addImm(SrcTy.getSizeInBits() - 1);
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
|
|
|
|
|
|
|
|
I.eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
2016-10-12 06:29:23 +08:00
|
|
|
|
2016-10-13 06:49:11 +08:00
|
|
|
case TargetOpcode::G_SITOFP:
|
|
|
|
case TargetOpcode::G_UITOFP:
|
|
|
|
case TargetOpcode::G_FPTOSI:
|
|
|
|
case TargetOpcode::G_FPTOUI: {
|
|
|
|
const LLT DstTy = MRI.getType(I.getOperand(0).getReg()),
|
|
|
|
SrcTy = MRI.getType(I.getOperand(1).getReg());
|
|
|
|
const unsigned NewOpc = selectFPConvOpc(Opcode, DstTy, SrcTy);
|
|
|
|
if (NewOpc == Opcode)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
I.setDesc(TII.get(NewOpc));
|
|
|
|
constrainSelectedInstRegOperands(I, TII, TRI, RBI);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-10-12 06:29:23 +08:00
|
|
|
case TargetOpcode::G_INTTOPTR:
|
Re-commit: [globalisel][tablegen] Support zero-instruction emission.
Summary:
Support the case where an operand of a pattern is also the whole of the
result pattern. In this case the original result and all its uses must be
replaced by the operand. However, register class restrictions can require
a COPY. This patch handles both cases by always emitting the copy and
leaving it for the register allocator to optimize.
The previous commit failed on Windows machines due to a flaw in the sort
predicate which allowed both A < B < C and B == C to be satisfied
simultaneously. The cause of this was some sloppiness in the priority order of
G_CONSTANT instructions compared to other instructions. These had equal priority
because it makes no difference, however there were operands had higher priority
than G_CONSTANT but lower priority than any other instruction. As a result, a
priority order between G_CONSTANT and other instructions must be enforced to
ensure the predicate defines a strict weak order.
Reviewers: ab, t.p.northover, qcolombet, rovka, aditya_nandakumar
Subscribers: javed.absar, kristof.beyls, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D36084
llvm-svn: 311076
2017-08-17 17:26:14 +08:00
|
|
|
// The importer is currently unable to import pointer types since they
|
|
|
|
// didn't exist in SelectionDAG.
|
2017-08-15 23:10:31 +08:00
|
|
|
return selectCopy(I, TII, MRI, TRI, RBI);
|
2017-08-15 21:50:09 +08:00
|
|
|
|
Re-commit: [globalisel][tablegen] Support zero-instruction emission.
Summary:
Support the case where an operand of a pattern is also the whole of the
result pattern. In this case the original result and all its uses must be
replaced by the operand. However, register class restrictions can require
a COPY. This patch handles both cases by always emitting the copy and
leaving it for the register allocator to optimize.
The previous commit failed on Windows machines due to a flaw in the sort
predicate which allowed both A < B < C and B == C to be satisfied
simultaneously. The cause of this was some sloppiness in the priority order of
G_CONSTANT instructions compared to other instructions. These had equal priority
because it makes no difference, however there were operands had higher priority
than G_CONSTANT but lower priority than any other instruction. As a result, a
priority order between G_CONSTANT and other instructions must be enforced to
ensure the predicate defines a strict weak order.
Reviewers: ab, t.p.northover, qcolombet, rovka, aditya_nandakumar
Subscribers: javed.absar, kristof.beyls, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D36084
llvm-svn: 311076
2017-08-17 17:26:14 +08:00
|
|
|
case TargetOpcode::G_BITCAST:
|
|
|
|
// Imported SelectionDAG rules can handle every bitcast except those that
|
|
|
|
// bitcast from a type to the same type. Ideally, these shouldn't occur
|
|
|
|
// but we might not run an optimizer that deletes them.
|
|
|
|
if (MRI.getType(I.getOperand(0).getReg()) ==
|
|
|
|
MRI.getType(I.getOperand(1).getReg()))
|
|
|
|
return selectCopy(I, TII, MRI, TRI, RBI);
|
|
|
|
return false;
|
|
|
|
|
2016-11-08 08:45:29 +08:00
|
|
|
case TargetOpcode::G_SELECT: {
|
|
|
|
if (MRI.getType(I.getOperand(1).getReg()) != LLT::scalar(1)) {
|
|
|
|
DEBUG(dbgs() << "G_SELECT cond has type: " << Ty
|
|
|
|
<< ", expected: " << LLT::scalar(1) << '\n');
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
const unsigned CondReg = I.getOperand(1).getReg();
|
|
|
|
const unsigned TReg = I.getOperand(2).getReg();
|
|
|
|
const unsigned FReg = I.getOperand(3).getReg();
|
|
|
|
|
|
|
|
unsigned CSelOpc = 0;
|
|
|
|
|
|
|
|
if (Ty == LLT::scalar(32)) {
|
|
|
|
CSelOpc = AArch64::CSELWr;
|
2017-01-19 21:32:14 +08:00
|
|
|
} else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) {
|
2016-11-08 08:45:29 +08:00
|
|
|
CSelOpc = AArch64::CSELXr;
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
MachineInstr &TstMI =
|
|
|
|
*BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ANDSWri))
|
|
|
|
.addDef(AArch64::WZR)
|
|
|
|
.addUse(CondReg)
|
|
|
|
.addImm(AArch64_AM::encodeLogicalImmediate(1, 32));
|
|
|
|
|
|
|
|
MachineInstr &CSelMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CSelOpc))
|
|
|
|
.addDef(I.getOperand(0).getReg())
|
|
|
|
.addUse(TReg)
|
|
|
|
.addUse(FReg)
|
|
|
|
.addImm(AArch64CC::NE);
|
|
|
|
|
|
|
|
constrainSelectedInstRegOperands(TstMI, TII, TRI, RBI);
|
|
|
|
constrainSelectedInstRegOperands(CSelMI, TII, TRI, RBI);
|
|
|
|
|
|
|
|
I.eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
2016-10-13 06:49:04 +08:00
|
|
|
case TargetOpcode::G_ICMP: {
|
2017-08-01 01:00:16 +08:00
|
|
|
if (Ty != LLT::scalar(32)) {
|
2016-10-13 06:49:04 +08:00
|
|
|
DEBUG(dbgs() << "G_ICMP result has type: " << Ty
|
2017-08-01 01:00:16 +08:00
|
|
|
<< ", expected: " << LLT::scalar(32) << '\n');
|
2016-10-13 06:49:04 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned CmpOpc = 0;
|
|
|
|
unsigned ZReg = 0;
|
|
|
|
|
|
|
|
LLT CmpTy = MRI.getType(I.getOperand(2).getReg());
|
|
|
|
if (CmpTy == LLT::scalar(32)) {
|
|
|
|
CmpOpc = AArch64::SUBSWrr;
|
|
|
|
ZReg = AArch64::WZR;
|
|
|
|
} else if (CmpTy == LLT::scalar(64) || CmpTy.isPointer()) {
|
|
|
|
CmpOpc = AArch64::SUBSXrr;
|
|
|
|
ZReg = AArch64::XZR;
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-01-05 18:16:08 +08:00
|
|
|
// CSINC increments the result by one when the condition code is false.
|
|
|
|
// Therefore, we have to invert the predicate to get an increment by 1 when
|
|
|
|
// the predicate is true.
|
|
|
|
const AArch64CC::CondCode invCC =
|
|
|
|
changeICMPPredToAArch64CC(CmpInst::getInversePredicate(
|
|
|
|
(CmpInst::Predicate)I.getOperand(1).getPredicate()));
|
2016-10-13 06:49:04 +08:00
|
|
|
|
|
|
|
MachineInstr &CmpMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CmpOpc))
|
|
|
|
.addDef(ZReg)
|
|
|
|
.addUse(I.getOperand(2).getReg())
|
|
|
|
.addUse(I.getOperand(3).getReg());
|
|
|
|
|
|
|
|
MachineInstr &CSetMI =
|
|
|
|
*BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
|
|
|
|
.addDef(I.getOperand(0).getReg())
|
|
|
|
.addUse(AArch64::WZR)
|
|
|
|
.addUse(AArch64::WZR)
|
2017-01-05 18:16:08 +08:00
|
|
|
.addImm(invCC);
|
2016-10-13 06:49:04 +08:00
|
|
|
|
|
|
|
constrainSelectedInstRegOperands(CmpMI, TII, TRI, RBI);
|
|
|
|
constrainSelectedInstRegOperands(CSetMI, TII, TRI, RBI);
|
|
|
|
|
|
|
|
I.eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-10-13 06:49:07 +08:00
|
|
|
case TargetOpcode::G_FCMP: {
|
2017-08-01 01:00:16 +08:00
|
|
|
if (Ty != LLT::scalar(32)) {
|
2016-10-13 06:49:07 +08:00
|
|
|
DEBUG(dbgs() << "G_FCMP result has type: " << Ty
|
2017-08-01 01:00:16 +08:00
|
|
|
<< ", expected: " << LLT::scalar(32) << '\n');
|
2016-10-13 06:49:07 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned CmpOpc = 0;
|
|
|
|
LLT CmpTy = MRI.getType(I.getOperand(2).getReg());
|
|
|
|
if (CmpTy == LLT::scalar(32)) {
|
|
|
|
CmpOpc = AArch64::FCMPSrr;
|
|
|
|
} else if (CmpTy == LLT::scalar(64)) {
|
|
|
|
CmpOpc = AArch64::FCMPDrr;
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME: regbank
|
|
|
|
|
|
|
|
AArch64CC::CondCode CC1, CC2;
|
|
|
|
changeFCMPPredToAArch64CC(
|
|
|
|
(CmpInst::Predicate)I.getOperand(1).getPredicate(), CC1, CC2);
|
|
|
|
|
|
|
|
MachineInstr &CmpMI = *BuildMI(MBB, I, I.getDebugLoc(), TII.get(CmpOpc))
|
|
|
|
.addUse(I.getOperand(2).getReg())
|
|
|
|
.addUse(I.getOperand(3).getReg());
|
|
|
|
|
|
|
|
const unsigned DefReg = I.getOperand(0).getReg();
|
|
|
|
unsigned Def1Reg = DefReg;
|
|
|
|
if (CC2 != AArch64CC::AL)
|
|
|
|
Def1Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
|
|
|
|
|
|
|
|
MachineInstr &CSetMI =
|
|
|
|
*BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
|
|
|
|
.addDef(Def1Reg)
|
|
|
|
.addUse(AArch64::WZR)
|
|
|
|
.addUse(AArch64::WZR)
|
2017-01-18 07:04:01 +08:00
|
|
|
.addImm(getInvertedCondCode(CC1));
|
2016-10-13 06:49:07 +08:00
|
|
|
|
|
|
|
if (CC2 != AArch64CC::AL) {
|
|
|
|
unsigned Def2Reg = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
|
|
|
|
MachineInstr &CSet2MI =
|
|
|
|
*BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::CSINCWr))
|
|
|
|
.addDef(Def2Reg)
|
|
|
|
.addUse(AArch64::WZR)
|
|
|
|
.addUse(AArch64::WZR)
|
2017-01-18 07:04:01 +08:00
|
|
|
.addImm(getInvertedCondCode(CC2));
|
2016-10-13 06:49:07 +08:00
|
|
|
MachineInstr &OrMI =
|
|
|
|
*BuildMI(MBB, I, I.getDebugLoc(), TII.get(AArch64::ORRWrr))
|
|
|
|
.addDef(DefReg)
|
|
|
|
.addUse(Def1Reg)
|
|
|
|
.addUse(Def2Reg);
|
|
|
|
constrainSelectedInstRegOperands(OrMI, TII, TRI, RBI);
|
|
|
|
constrainSelectedInstRegOperands(CSet2MI, TII, TRI, RBI);
|
|
|
|
}
|
|
|
|
|
|
|
|
constrainSelectedInstRegOperands(CmpMI, TII, TRI, RBI);
|
|
|
|
constrainSelectedInstRegOperands(CSetMI, TII, TRI, RBI);
|
|
|
|
|
|
|
|
I.eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
2017-02-09 01:57:27 +08:00
|
|
|
case TargetOpcode::G_VASTART:
|
|
|
|
return STI.isTargetDarwin() ? selectVaStartDarwin(I, MF, MRI)
|
|
|
|
: selectVaStartAAPCS(I, MF, MRI);
|
2017-07-13 01:32:32 +08:00
|
|
|
case TargetOpcode::G_IMPLICIT_DEF:
|
|
|
|
I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
|
|
|
|
return true;
|
2016-07-27 22:31:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
2017-03-15 05:32:08 +08:00
|
|
|
|
|
|
|
/// SelectArithImmed - Select an immediate value that can be represented as
|
|
|
|
/// a 12-bit value shifted left by either 0 or 12. If so, return true with
|
|
|
|
/// Val set to the 12-bit value and Shift set to the shifter operand.
|
[globalisel][tablegen] Revise API for ComplexPattern operands to improve flexibility.
Summary:
Some targets need to be able to do more complex rendering than just adding an
operand or two to an instruction. For example, it may need to insert an
instruction to extract a subreg first, or it may need to perform an operation
on the operand.
In SelectionDAG, targets would create SDNode's to achieve the desired effect
during the complex pattern predicate. This worked because SelectionDAG had a
form of garbage collection that would take care of SDNode's that were created
but not used due to a later predicate rejecting a match. This doesn't translate
well to GlobalISel and the churn was wasteful.
The API changes in this patch enable GlobalISel to accomplish the same thing
without the waste. The API is now:
InstructionSelector::OptionalComplexRendererFn selectArithImmed(MachineOperand &Root) const;
where Root is the root of the match. The return value can be omitted to
indicate that the predicate failed to match, or a function with the signature
ComplexRendererFn can be returned. For example:
return OptionalComplexRendererFn(
[=](MachineInstrBuilder &MIB) { MIB.addImm(Immed).addImm(ShVal); });
adds two immediate operands to the rendered instruction. Immed and ShVal are
captured from the predicate function.
As an added bonus, this also reduces the amount of information we need to
provide to GIComplexOperandMatcher.
Depends on D31418
Reviewers: aditya_nandakumar, t.p.northover, qcolombet, rovka, ab, javed.absar
Reviewed By: ab
Subscribers: dberris, kristof.beyls, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D31761
llvm-svn: 301079
2017-04-22 23:11:04 +08:00
|
|
|
InstructionSelector::ComplexRendererFn
|
|
|
|
AArch64InstructionSelector::selectArithImmed(MachineOperand &Root) const {
|
2017-03-15 05:32:08 +08:00
|
|
|
MachineInstr &MI = *Root.getParent();
|
|
|
|
MachineBasicBlock &MBB = *MI.getParent();
|
|
|
|
MachineFunction &MF = *MBB.getParent();
|
|
|
|
MachineRegisterInfo &MRI = MF.getRegInfo();
|
|
|
|
|
|
|
|
// This function is called from the addsub_shifted_imm ComplexPattern,
|
|
|
|
// which lists [imm] as the list of opcode it's interested in, however
|
|
|
|
// we still need to check whether the operand is actually an immediate
|
|
|
|
// here because the ComplexPattern opcode list is only used in
|
|
|
|
// root-level opcode matching.
|
|
|
|
uint64_t Immed;
|
|
|
|
if (Root.isImm())
|
|
|
|
Immed = Root.getImm();
|
|
|
|
else if (Root.isCImm())
|
|
|
|
Immed = Root.getCImm()->getZExtValue();
|
|
|
|
else if (Root.isReg()) {
|
|
|
|
MachineInstr *Def = MRI.getVRegDef(Root.getReg());
|
|
|
|
if (Def->getOpcode() != TargetOpcode::G_CONSTANT)
|
2017-10-16 02:22:54 +08:00
|
|
|
return None;
|
2017-03-17 02:04:50 +08:00
|
|
|
MachineOperand &Op1 = Def->getOperand(1);
|
|
|
|
if (!Op1.isCImm() || Op1.getCImm()->getBitWidth() > 64)
|
2017-10-16 02:22:54 +08:00
|
|
|
return None;
|
2017-03-17 02:04:50 +08:00
|
|
|
Immed = Op1.getCImm()->getZExtValue();
|
2017-03-15 05:32:08 +08:00
|
|
|
} else
|
2017-10-16 02:22:54 +08:00
|
|
|
return None;
|
2017-03-15 05:32:08 +08:00
|
|
|
|
|
|
|
unsigned ShiftAmt;
|
|
|
|
|
|
|
|
if (Immed >> 12 == 0) {
|
|
|
|
ShiftAmt = 0;
|
|
|
|
} else if ((Immed & 0xfff) == 0 && Immed >> 24 == 0) {
|
|
|
|
ShiftAmt = 12;
|
|
|
|
Immed = Immed >> 12;
|
|
|
|
} else
|
2017-10-16 02:22:54 +08:00
|
|
|
return None;
|
2017-03-15 05:32:08 +08:00
|
|
|
|
|
|
|
unsigned ShVal = AArch64_AM::getShifterImm(AArch64_AM::LSL, ShiftAmt);
|
2017-10-16 02:22:54 +08:00
|
|
|
return {{
|
|
|
|
[=](MachineInstrBuilder &MIB) { MIB.addImm(Immed); },
|
|
|
|
[=](MachineInstrBuilder &MIB) { MIB.addImm(ShVal); },
|
|
|
|
}};
|
2017-03-15 05:32:08 +08:00
|
|
|
}
|
2017-04-06 17:49:34 +08:00
|
|
|
|
2017-10-16 11:36:29 +08:00
|
|
|
/// Select a "register plus unscaled signed 9-bit immediate" address. This
|
|
|
|
/// should only match when there is an offset that is not valid for a scaled
|
|
|
|
/// immediate addressing mode. The "Size" argument is the size in bytes of the
|
|
|
|
/// memory reference, which is needed here to know what is valid for a scaled
|
|
|
|
/// immediate.
|
|
|
|
InstructionSelector::ComplexRendererFn
|
|
|
|
AArch64InstructionSelector::selectAddrModeUnscaled(MachineOperand &Root,
|
|
|
|
unsigned Size) const {
|
|
|
|
MachineRegisterInfo &MRI =
|
|
|
|
Root.getParent()->getParent()->getParent()->getRegInfo();
|
|
|
|
|
|
|
|
if (!Root.isReg())
|
|
|
|
return None;
|
|
|
|
|
|
|
|
if (!isBaseWithConstantOffset(Root, MRI))
|
|
|
|
return None;
|
|
|
|
|
|
|
|
MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
|
|
|
|
if (!RootDef)
|
|
|
|
return None;
|
|
|
|
|
|
|
|
MachineOperand &OffImm = RootDef->getOperand(2);
|
|
|
|
if (!OffImm.isReg())
|
|
|
|
return None;
|
|
|
|
MachineInstr *RHS = MRI.getVRegDef(OffImm.getReg());
|
|
|
|
if (!RHS || RHS->getOpcode() != TargetOpcode::G_CONSTANT)
|
|
|
|
return None;
|
|
|
|
int64_t RHSC;
|
|
|
|
MachineOperand &RHSOp1 = RHS->getOperand(1);
|
|
|
|
if (!RHSOp1.isCImm() || RHSOp1.getCImm()->getBitWidth() > 64)
|
|
|
|
return None;
|
|
|
|
RHSC = RHSOp1.getCImm()->getSExtValue();
|
|
|
|
|
|
|
|
// If the offset is valid as a scaled immediate, don't match here.
|
|
|
|
if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Log2_32(Size)))
|
|
|
|
return None;
|
|
|
|
if (RHSC >= -256 && RHSC < 256) {
|
|
|
|
MachineOperand &Base = RootDef->getOperand(1);
|
|
|
|
return {{
|
|
|
|
[=](MachineInstrBuilder &MIB) { MIB.add(Base); },
|
|
|
|
[=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
|
|
|
|
}};
|
|
|
|
}
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Select a "register plus scaled unsigned 12-bit immediate" address. The
|
|
|
|
/// "Size" argument is the size in bytes of the memory reference, which
|
|
|
|
/// determines the scale.
|
|
|
|
InstructionSelector::ComplexRendererFn
|
|
|
|
AArch64InstructionSelector::selectAddrModeIndexed(MachineOperand &Root,
|
|
|
|
unsigned Size) const {
|
|
|
|
MachineRegisterInfo &MRI =
|
|
|
|
Root.getParent()->getParent()->getParent()->getRegInfo();
|
|
|
|
|
|
|
|
if (!Root.isReg())
|
|
|
|
return None;
|
|
|
|
|
|
|
|
MachineInstr *RootDef = MRI.getVRegDef(Root.getReg());
|
|
|
|
if (!RootDef)
|
|
|
|
return None;
|
|
|
|
|
|
|
|
if (RootDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
|
|
|
|
return {{
|
|
|
|
[=](MachineInstrBuilder &MIB) { MIB.add(RootDef->getOperand(1)); },
|
|
|
|
[=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
|
|
|
|
}};
|
|
|
|
}
|
|
|
|
|
|
|
|
if (isBaseWithConstantOffset(Root, MRI)) {
|
|
|
|
MachineOperand &LHS = RootDef->getOperand(1);
|
|
|
|
MachineOperand &RHS = RootDef->getOperand(2);
|
|
|
|
MachineInstr *LHSDef = MRI.getVRegDef(LHS.getReg());
|
|
|
|
MachineInstr *RHSDef = MRI.getVRegDef(RHS.getReg());
|
|
|
|
if (LHSDef && RHSDef) {
|
|
|
|
int64_t RHSC = (int64_t)RHSDef->getOperand(1).getCImm()->getZExtValue();
|
|
|
|
unsigned Scale = Log2_32(Size);
|
|
|
|
if ((RHSC & (Size - 1)) == 0 && RHSC >= 0 && RHSC < (0x1000 << Scale)) {
|
|
|
|
if (LHSDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
|
|
|
|
LHSDef = MRI.getVRegDef(LHSDef->getOperand(1).getReg());
|
|
|
|
return {{
|
|
|
|
[=](MachineInstrBuilder &MIB) { MIB.add(LHS); },
|
|
|
|
[=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC >> Scale); },
|
|
|
|
}};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Before falling back to our general case, check if the unscaled
|
|
|
|
// instructions can handle this. If so, that's preferable.
|
|
|
|
if (selectAddrModeUnscaled(Root, Size).hasValue())
|
|
|
|
return None;
|
|
|
|
|
|
|
|
return {{
|
|
|
|
[=](MachineInstrBuilder &MIB) { MIB.add(Root); },
|
|
|
|
[=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
|
|
|
|
}};
|
|
|
|
}
|
|
|
|
|
2017-04-06 17:49:34 +08:00
|
|
|
namespace llvm {
|
|
|
|
InstructionSelector *
|
|
|
|
createAArch64InstructionSelector(const AArch64TargetMachine &TM,
|
|
|
|
AArch64Subtarget &Subtarget,
|
|
|
|
AArch64RegisterBankInfo &RBI) {
|
|
|
|
return new AArch64InstructionSelector(TM, Subtarget, RBI);
|
|
|
|
}
|
|
|
|
}
|