2017-02-22 20:25:09 +08:00
|
|
|
//===- X86InstructionSelector.cpp ----------------------------*- C++ -*-==//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
/// \file
|
|
|
|
/// This file implements the targeting of the InstructionSelector class for
|
|
|
|
/// X86.
|
|
|
|
/// \todo This should be generated by TableGen.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2017-03-23 23:25:57 +08:00
|
|
|
#include "X86InstrBuilder.h"
|
2017-02-22 20:25:09 +08:00
|
|
|
#include "X86InstrInfo.h"
|
|
|
|
#include "X86RegisterBankInfo.h"
|
|
|
|
#include "X86RegisterInfo.h"
|
|
|
|
#include "X86Subtarget.h"
|
|
|
|
#include "X86TargetMachine.h"
|
2017-04-12 20:54:54 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
|
2017-05-17 20:48:08 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/Utils.h"
|
2017-02-22 20:25:09 +08:00
|
|
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstr.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
2017-04-06 17:49:34 +08:00
|
|
|
#include "llvm/CodeGen/MachineOperand.h"
|
2017-02-22 20:25:09 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
|
|
|
#include "llvm/IR/Type.h"
|
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Support/raw_ostream.h"
|
|
|
|
|
|
|
|
#define DEBUG_TYPE "X86-isel"
|
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
|
|
|
#ifndef LLVM_BUILD_GLOBAL_ISEL
|
|
|
|
#error "You shouldn't build this"
|
|
|
|
#endif
|
|
|
|
|
2017-04-06 17:49:34 +08:00
|
|
|
namespace {
|
|
|
|
|
[globalisel][tablegen] Import SelectionDAG's rule predicates and support the equivalent in GIRule.
Summary:
The SelectionDAG importer now imports rules with Predicate's attached via
Requires, PredicateControl, etc. These predicates are implemented as
bitset's to allow multiple predicates to be tested together. However,
unlike the MC layer subtarget features, each target only pays for it's own
predicates (e.g. AArch64 doesn't have 192 feature bits just because X86
needs a lot).
Both AArch64 and X86 derive at least one predicate from the MachineFunction
or Function so they must re-initialize AvailableFeatures before each
function. They also declare locals in <Target>InstructionSelector so that
computeAvailableFeatures() can use the code from SelectionDAG without
modification.
Reviewers: rovka, qcolombet, aditya_nandakumar, t.p.northover, ab
Reviewed By: rovka
Subscribers: aemerson, rengolin, dberris, kristof.beyls, llvm-commits, igorb
Differential Revision: https://reviews.llvm.org/D31418
llvm-svn: 300993
2017-04-21 23:59:56 +08:00
|
|
|
#define GET_GLOBALISEL_PREDICATE_BITSET
|
|
|
|
#include "X86GenGlobalISel.inc"
|
|
|
|
#undef GET_GLOBALISEL_PREDICATE_BITSET
|
|
|
|
|
2017-04-06 17:49:34 +08:00
|
|
|
class X86InstructionSelector : public InstructionSelector {
|
|
|
|
public:
|
[globalisel][tablegen] Import SelectionDAG's rule predicates and support the equivalent in GIRule.
Summary:
The SelectionDAG importer now imports rules with Predicate's attached via
Requires, PredicateControl, etc. These predicates are implemented as
bitset's to allow multiple predicates to be tested together. However,
unlike the MC layer subtarget features, each target only pays for it's own
predicates (e.g. AArch64 doesn't have 192 feature bits just because X86
needs a lot).
Both AArch64 and X86 derive at least one predicate from the MachineFunction
or Function so they must re-initialize AvailableFeatures before each
function. They also declare locals in <Target>InstructionSelector so that
computeAvailableFeatures() can use the code from SelectionDAG without
modification.
Reviewers: rovka, qcolombet, aditya_nandakumar, t.p.northover, ab
Reviewed By: rovka
Subscribers: aemerson, rengolin, dberris, kristof.beyls, llvm-commits, igorb
Differential Revision: https://reviews.llvm.org/D31418
llvm-svn: 300993
2017-04-21 23:59:56 +08:00
|
|
|
X86InstructionSelector(const X86TargetMachine &TM, const X86Subtarget &STI,
|
2017-04-06 17:49:34 +08:00
|
|
|
const X86RegisterBankInfo &RBI);
|
|
|
|
|
|
|
|
bool select(MachineInstr &I) const override;
|
|
|
|
|
|
|
|
private:
|
|
|
|
/// tblgen-erated 'select' implementation, used as the initial selector for
|
|
|
|
/// the patterns that don't require complex C++.
|
|
|
|
bool selectImpl(MachineInstr &I) const;
|
|
|
|
|
2017-05-01 15:06:08 +08:00
|
|
|
// TODO: remove after suported by Tablegen-erated instruction selection.
|
2017-04-06 17:49:34 +08:00
|
|
|
unsigned getLoadStoreOp(LLT &Ty, const RegisterBank &RB, unsigned Opc,
|
|
|
|
uint64_t Alignment) const;
|
|
|
|
|
|
|
|
bool selectLoadStoreOp(MachineInstr &I, MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const;
|
2017-05-08 17:40:43 +08:00
|
|
|
bool selectFrameIndexOrGep(MachineInstr &I, MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const;
|
2017-04-12 20:54:54 +08:00
|
|
|
bool selectConstant(MachineInstr &I, MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const;
|
2017-04-19 19:34:59 +08:00
|
|
|
bool selectTrunc(MachineInstr &I, MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const;
|
2017-05-10 14:52:58 +08:00
|
|
|
bool selectZext(MachineInstr &I, MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const;
|
2017-05-11 15:17:40 +08:00
|
|
|
bool selectCmp(MachineInstr &I, MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const;
|
2017-05-17 20:48:08 +08:00
|
|
|
bool selectUadde(MachineInstr &I, MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const;
|
2017-06-20 17:15:10 +08:00
|
|
|
bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const;
|
2017-06-22 17:43:35 +08:00
|
|
|
bool selectInsert(MachineInstr &I, MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const;
|
|
|
|
|
|
|
|
// emit insert subreg instruction and insert it before MachineInstr &I
|
|
|
|
bool emitInsertSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI, MachineFunction &MF) const;
|
2017-06-20 17:15:10 +08:00
|
|
|
|
|
|
|
const TargetRegisterClass *getRegClass(LLT Ty, const RegisterBank &RB) const;
|
|
|
|
const TargetRegisterClass *getRegClass(LLT Ty, unsigned Reg,
|
|
|
|
MachineRegisterInfo &MRI) const;
|
2017-05-17 20:48:08 +08:00
|
|
|
|
[globalisel][tablegen] Import SelectionDAG's rule predicates and support the equivalent in GIRule.
Summary:
The SelectionDAG importer now imports rules with Predicate's attached via
Requires, PredicateControl, etc. These predicates are implemented as
bitset's to allow multiple predicates to be tested together. However,
unlike the MC layer subtarget features, each target only pays for it's own
predicates (e.g. AArch64 doesn't have 192 feature bits just because X86
needs a lot).
Both AArch64 and X86 derive at least one predicate from the MachineFunction
or Function so they must re-initialize AvailableFeatures before each
function. They also declare locals in <Target>InstructionSelector so that
computeAvailableFeatures() can use the code from SelectionDAG without
modification.
Reviewers: rovka, qcolombet, aditya_nandakumar, t.p.northover, ab
Reviewed By: rovka
Subscribers: aemerson, rengolin, dberris, kristof.beyls, llvm-commits, igorb
Differential Revision: https://reviews.llvm.org/D31418
llvm-svn: 300993
2017-04-21 23:59:56 +08:00
|
|
|
const X86TargetMachine &TM;
|
2017-04-06 17:49:34 +08:00
|
|
|
const X86Subtarget &STI;
|
|
|
|
const X86InstrInfo &TII;
|
|
|
|
const X86RegisterInfo &TRI;
|
|
|
|
const X86RegisterBankInfo &RBI;
|
[globalisel][tablegen] Import SelectionDAG's rule predicates and support the equivalent in GIRule.
Summary:
The SelectionDAG importer now imports rules with Predicate's attached via
Requires, PredicateControl, etc. These predicates are implemented as
bitset's to allow multiple predicates to be tested together. However,
unlike the MC layer subtarget features, each target only pays for it's own
predicates (e.g. AArch64 doesn't have 192 feature bits just because X86
needs a lot).
Both AArch64 and X86 derive at least one predicate from the MachineFunction
or Function so they must re-initialize AvailableFeatures before each
function. They also declare locals in <Target>InstructionSelector so that
computeAvailableFeatures() can use the code from SelectionDAG without
modification.
Reviewers: rovka, qcolombet, aditya_nandakumar, t.p.northover, ab
Reviewed By: rovka
Subscribers: aemerson, rengolin, dberris, kristof.beyls, llvm-commits, igorb
Differential Revision: https://reviews.llvm.org/D31418
llvm-svn: 300993
2017-04-21 23:59:56 +08:00
|
|
|
|
2017-04-30 01:30:09 +08:00
|
|
|
#define GET_GLOBALISEL_PREDICATES_DECL
|
|
|
|
#include "X86GenGlobalISel.inc"
|
|
|
|
#undef GET_GLOBALISEL_PREDICATES_DECL
|
2017-04-06 17:49:34 +08:00
|
|
|
|
|
|
|
#define GET_GLOBALISEL_TEMPORARIES_DECL
|
|
|
|
#include "X86GenGlobalISel.inc"
|
|
|
|
#undef GET_GLOBALISEL_TEMPORARIES_DECL
|
|
|
|
};
|
|
|
|
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
2017-03-15 05:32:08 +08:00
|
|
|
#define GET_GLOBALISEL_IMPL
|
2017-02-22 20:25:09 +08:00
|
|
|
#include "X86GenGlobalISel.inc"
|
2017-03-15 05:32:08 +08:00
|
|
|
#undef GET_GLOBALISEL_IMPL
|
2017-02-22 20:25:09 +08:00
|
|
|
|
[globalisel][tablegen] Import SelectionDAG's rule predicates and support the equivalent in GIRule.
Summary:
The SelectionDAG importer now imports rules with Predicate's attached via
Requires, PredicateControl, etc. These predicates are implemented as
bitset's to allow multiple predicates to be tested together. However,
unlike the MC layer subtarget features, each target only pays for it's own
predicates (e.g. AArch64 doesn't have 192 feature bits just because X86
needs a lot).
Both AArch64 and X86 derive at least one predicate from the MachineFunction
or Function so they must re-initialize AvailableFeatures before each
function. They also declare locals in <Target>InstructionSelector so that
computeAvailableFeatures() can use the code from SelectionDAG without
modification.
Reviewers: rovka, qcolombet, aditya_nandakumar, t.p.northover, ab
Reviewed By: rovka
Subscribers: aemerson, rengolin, dberris, kristof.beyls, llvm-commits, igorb
Differential Revision: https://reviews.llvm.org/D31418
llvm-svn: 300993
2017-04-21 23:59:56 +08:00
|
|
|
X86InstructionSelector::X86InstructionSelector(const X86TargetMachine &TM,
|
|
|
|
const X86Subtarget &STI,
|
2017-02-22 20:25:09 +08:00
|
|
|
const X86RegisterBankInfo &RBI)
|
[globalisel][tablegen] Import SelectionDAG's rule predicates and support the equivalent in GIRule.
Summary:
The SelectionDAG importer now imports rules with Predicate's attached via
Requires, PredicateControl, etc. These predicates are implemented as
bitset's to allow multiple predicates to be tested together. However,
unlike the MC layer subtarget features, each target only pays for it's own
predicates (e.g. AArch64 doesn't have 192 feature bits just because X86
needs a lot).
Both AArch64 and X86 derive at least one predicate from the MachineFunction
or Function so they must re-initialize AvailableFeatures before each
function. They also declare locals in <Target>InstructionSelector so that
computeAvailableFeatures() can use the code from SelectionDAG without
modification.
Reviewers: rovka, qcolombet, aditya_nandakumar, t.p.northover, ab
Reviewed By: rovka
Subscribers: aemerson, rengolin, dberris, kristof.beyls, llvm-commits, igorb
Differential Revision: https://reviews.llvm.org/D31418
llvm-svn: 300993
2017-04-21 23:59:56 +08:00
|
|
|
: InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
|
2017-04-30 01:30:09 +08:00
|
|
|
TRI(*STI.getRegisterInfo()), RBI(RBI),
|
|
|
|
#define GET_GLOBALISEL_PREDICATES_INIT
|
|
|
|
#include "X86GenGlobalISel.inc"
|
|
|
|
#undef GET_GLOBALISEL_PREDICATES_INIT
|
2017-03-15 05:32:08 +08:00
|
|
|
#define GET_GLOBALISEL_TEMPORARIES_INIT
|
|
|
|
#include "X86GenGlobalISel.inc"
|
|
|
|
#undef GET_GLOBALISEL_TEMPORARIES_INIT
|
|
|
|
{
|
|
|
|
}
|
2017-02-22 20:25:09 +08:00
|
|
|
|
|
|
|
// FIXME: This should be target-independent, inferred from the types declared
|
|
|
|
// for each class in the bank.
|
2017-06-20 17:15:10 +08:00
|
|
|
const TargetRegisterClass *
|
|
|
|
X86InstructionSelector::getRegClass(LLT Ty, const RegisterBank &RB) const {
|
2017-02-22 20:25:09 +08:00
|
|
|
if (RB.getID() == X86::GPRRegBankID) {
|
2017-04-19 19:34:59 +08:00
|
|
|
if (Ty.getSizeInBits() <= 8)
|
|
|
|
return &X86::GR8RegClass;
|
|
|
|
if (Ty.getSizeInBits() == 16)
|
|
|
|
return &X86::GR16RegClass;
|
2017-03-03 16:06:46 +08:00
|
|
|
if (Ty.getSizeInBits() == 32)
|
2017-02-22 20:25:09 +08:00
|
|
|
return &X86::GR32RegClass;
|
|
|
|
if (Ty.getSizeInBits() == 64)
|
|
|
|
return &X86::GR64RegClass;
|
|
|
|
}
|
2017-03-03 16:06:46 +08:00
|
|
|
if (RB.getID() == X86::VECRRegBankID) {
|
|
|
|
if (Ty.getSizeInBits() == 32)
|
2017-06-20 17:15:10 +08:00
|
|
|
return STI.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
|
2017-03-03 16:06:46 +08:00
|
|
|
if (Ty.getSizeInBits() == 64)
|
2017-06-20 17:15:10 +08:00
|
|
|
return STI.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
|
2017-03-03 16:06:46 +08:00
|
|
|
if (Ty.getSizeInBits() == 128)
|
2017-06-20 17:15:10 +08:00
|
|
|
return STI.hasAVX512() ? &X86::VR128XRegClass : &X86::VR128RegClass;
|
2017-03-03 16:06:46 +08:00
|
|
|
if (Ty.getSizeInBits() == 256)
|
2017-06-20 17:15:10 +08:00
|
|
|
return STI.hasAVX512() ? &X86::VR256XRegClass : &X86::VR256RegClass;
|
2017-03-03 16:06:46 +08:00
|
|
|
if (Ty.getSizeInBits() == 512)
|
|
|
|
return &X86::VR512RegClass;
|
|
|
|
}
|
2017-02-22 20:25:09 +08:00
|
|
|
|
|
|
|
llvm_unreachable("Unknown RegBank!");
|
|
|
|
}
|
|
|
|
|
2017-06-20 17:15:10 +08:00
|
|
|
const TargetRegisterClass *
|
|
|
|
X86InstructionSelector::getRegClass(LLT Ty, unsigned Reg,
|
|
|
|
MachineRegisterInfo &MRI) const {
|
|
|
|
const RegisterBank &RegBank = *RBI.getRegBank(Reg, MRI, TRI);
|
|
|
|
return getRegClass(Ty, RegBank);
|
|
|
|
}
|
|
|
|
|
2017-02-22 20:25:09 +08:00
|
|
|
// Set X86 Opcode and constrain DestReg.
|
2017-06-20 17:15:10 +08:00
|
|
|
bool X86InstructionSelector::selectCopy(MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI) const {
|
2017-02-22 20:25:09 +08:00
|
|
|
|
|
|
|
unsigned DstReg = I.getOperand(0).getReg();
|
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
|
|
|
|
assert(I.isCopy() && "Generic operators do not allow physical registers");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
|
|
|
|
const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
|
|
|
|
unsigned SrcReg = I.getOperand(1).getReg();
|
|
|
|
const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
|
2017-04-27 16:02:03 +08:00
|
|
|
|
2017-02-22 20:25:09 +08:00
|
|
|
assert((!TargetRegisterInfo::isPhysicalRegister(SrcReg) || I.isCopy()) &&
|
|
|
|
"No phys reg on generic operators");
|
|
|
|
assert((DstSize == SrcSize ||
|
|
|
|
// Copies are a mean to setup initial types, the number of
|
|
|
|
// bits may not exactly match.
|
|
|
|
(TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
|
|
|
|
DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI))) &&
|
|
|
|
"Copy with different width?!");
|
|
|
|
|
|
|
|
const TargetRegisterClass *RC = nullptr;
|
|
|
|
|
|
|
|
switch (RegBank.getID()) {
|
|
|
|
case X86::GPRRegBankID:
|
|
|
|
assert((DstSize <= 64) && "GPRs cannot get more than 64-bit width values.");
|
2017-06-20 17:15:10 +08:00
|
|
|
RC = getRegClass(MRI.getType(DstReg), RegBank);
|
2017-04-27 16:02:03 +08:00
|
|
|
|
|
|
|
// Change the physical register
|
|
|
|
if (SrcSize > DstSize && TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
|
|
|
|
if (RC == &X86::GR32RegClass)
|
|
|
|
I.getOperand(1).setSubReg(X86::sub_32bit);
|
|
|
|
else if (RC == &X86::GR16RegClass)
|
|
|
|
I.getOperand(1).setSubReg(X86::sub_16bit);
|
|
|
|
else if (RC == &X86::GR8RegClass)
|
|
|
|
I.getOperand(1).setSubReg(X86::sub_8bit);
|
|
|
|
|
|
|
|
I.getOperand(1).substPhysReg(SrcReg, TRI);
|
|
|
|
}
|
2017-02-22 20:25:09 +08:00
|
|
|
break;
|
2017-03-03 16:06:46 +08:00
|
|
|
case X86::VECRRegBankID:
|
2017-06-20 17:15:10 +08:00
|
|
|
RC = getRegClass(MRI.getType(DstReg), RegBank);
|
2017-03-03 16:06:46 +08:00
|
|
|
break;
|
2017-02-22 20:25:09 +08:00
|
|
|
default:
|
|
|
|
llvm_unreachable("Unknown RegBank!");
|
|
|
|
}
|
|
|
|
|
|
|
|
// No need to constrain SrcReg. It will get constrained when
|
|
|
|
// we hit another of its use or its defs.
|
|
|
|
// Copies do not have constraints.
|
2017-03-23 20:13:29 +08:00
|
|
|
const TargetRegisterClass *OldRC = MRI.getRegClassOrNull(DstReg);
|
2017-03-03 16:06:46 +08:00
|
|
|
if (!OldRC || !RC->hasSubClassEq(OldRC)) {
|
|
|
|
if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
|
2017-03-23 20:13:29 +08:00
|
|
|
DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
|
|
|
|
<< " operand\n");
|
|
|
|
return false;
|
|
|
|
}
|
2017-02-22 20:25:09 +08:00
|
|
|
}
|
|
|
|
I.setDesc(TII.get(X86::COPY));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool X86InstructionSelector::select(MachineInstr &I) const {
|
|
|
|
assert(I.getParent() && "Instruction should be in a basic block!");
|
|
|
|
assert(I.getParent()->getParent() && "Instruction should be in a function!");
|
|
|
|
|
|
|
|
MachineBasicBlock &MBB = *I.getParent();
|
|
|
|
MachineFunction &MF = *MBB.getParent();
|
|
|
|
MachineRegisterInfo &MRI = MF.getRegInfo();
|
|
|
|
|
|
|
|
unsigned Opcode = I.getOpcode();
|
|
|
|
if (!isPreISelGenericOpcode(Opcode)) {
|
|
|
|
// Certain non-generic instructions also need some special handling.
|
|
|
|
|
|
|
|
if (I.isCopy())
|
2017-06-20 17:15:10 +08:00
|
|
|
return selectCopy(I, MRI);
|
2017-02-22 20:25:09 +08:00
|
|
|
|
|
|
|
// TODO: handle more cases - LOAD_STACK_GUARD, PHI
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-02-22 20:59:47 +08:00
|
|
|
assert(I.getNumOperands() == I.getNumExplicitOperands() &&
|
|
|
|
"Generic instruction has unexpected implicit operands\n");
|
2017-02-22 20:25:09 +08:00
|
|
|
|
2017-05-01 15:06:08 +08:00
|
|
|
if (selectImpl(I))
|
2017-05-10 14:52:58 +08:00
|
|
|
return true;
|
2017-05-01 15:06:08 +08:00
|
|
|
|
|
|
|
DEBUG(dbgs() << " C++ instruction selection: "; I.print(dbgs()));
|
|
|
|
|
|
|
|
// TODO: This should be implemented by tblgen.
|
2017-03-23 23:25:57 +08:00
|
|
|
if (selectLoadStoreOp(I, MRI, MF))
|
2017-03-03 16:06:46 +08:00
|
|
|
return true;
|
2017-05-08 17:40:43 +08:00
|
|
|
if (selectFrameIndexOrGep(I, MRI, MF))
|
2017-03-26 16:11:12 +08:00
|
|
|
return true;
|
2017-04-12 20:54:54 +08:00
|
|
|
if (selectConstant(I, MRI, MF))
|
|
|
|
return true;
|
2017-04-19 19:34:59 +08:00
|
|
|
if (selectTrunc(I, MRI, MF))
|
|
|
|
return true;
|
2017-05-10 14:52:58 +08:00
|
|
|
if (selectZext(I, MRI, MF))
|
|
|
|
return true;
|
2017-05-11 15:17:40 +08:00
|
|
|
if (selectCmp(I, MRI, MF))
|
|
|
|
return true;
|
2017-05-17 20:48:08 +08:00
|
|
|
if (selectUadde(I, MRI, MF))
|
|
|
|
return true;
|
2017-06-22 17:43:35 +08:00
|
|
|
if (selectInsert(I, MRI, MF))
|
|
|
|
return true;
|
2017-03-03 16:06:46 +08:00
|
|
|
|
2017-05-01 15:06:08 +08:00
|
|
|
return false;
|
2017-02-22 20:25:09 +08:00
|
|
|
}
|
2017-03-03 16:06:46 +08:00
|
|
|
|
2017-03-23 23:25:57 +08:00
|
|
|
unsigned X86InstructionSelector::getLoadStoreOp(LLT &Ty, const RegisterBank &RB,
|
|
|
|
unsigned Opc,
|
|
|
|
uint64_t Alignment) const {
|
|
|
|
bool Isload = (Opc == TargetOpcode::G_LOAD);
|
|
|
|
bool HasAVX = STI.hasAVX();
|
|
|
|
bool HasAVX512 = STI.hasAVX512();
|
|
|
|
bool HasVLX = STI.hasVLX();
|
|
|
|
|
|
|
|
if (Ty == LLT::scalar(8)) {
|
|
|
|
if (X86::GPRRegBankID == RB.getID())
|
|
|
|
return Isload ? X86::MOV8rm : X86::MOV8mr;
|
|
|
|
} else if (Ty == LLT::scalar(16)) {
|
|
|
|
if (X86::GPRRegBankID == RB.getID())
|
|
|
|
return Isload ? X86::MOV16rm : X86::MOV16mr;
|
2017-05-01 14:08:32 +08:00
|
|
|
} else if (Ty == LLT::scalar(32) || Ty == LLT::pointer(0, 32)) {
|
2017-03-23 23:25:57 +08:00
|
|
|
if (X86::GPRRegBankID == RB.getID())
|
|
|
|
return Isload ? X86::MOV32rm : X86::MOV32mr;
|
|
|
|
if (X86::VECRRegBankID == RB.getID())
|
|
|
|
return Isload ? (HasAVX512 ? X86::VMOVSSZrm
|
|
|
|
: HasAVX ? X86::VMOVSSrm : X86::MOVSSrm)
|
|
|
|
: (HasAVX512 ? X86::VMOVSSZmr
|
|
|
|
: HasAVX ? X86::VMOVSSmr : X86::MOVSSmr);
|
2017-05-01 14:08:32 +08:00
|
|
|
} else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) {
|
2017-03-23 23:25:57 +08:00
|
|
|
if (X86::GPRRegBankID == RB.getID())
|
|
|
|
return Isload ? X86::MOV64rm : X86::MOV64mr;
|
|
|
|
if (X86::VECRRegBankID == RB.getID())
|
|
|
|
return Isload ? (HasAVX512 ? X86::VMOVSDZrm
|
|
|
|
: HasAVX ? X86::VMOVSDrm : X86::MOVSDrm)
|
|
|
|
: (HasAVX512 ? X86::VMOVSDZmr
|
|
|
|
: HasAVX ? X86::VMOVSDmr : X86::MOVSDmr);
|
|
|
|
} else if (Ty.isVector() && Ty.getSizeInBits() == 128) {
|
|
|
|
if (Alignment >= 16)
|
|
|
|
return Isload ? (HasVLX ? X86::VMOVAPSZ128rm
|
|
|
|
: HasAVX512
|
|
|
|
? X86::VMOVAPSZ128rm_NOVLX
|
|
|
|
: HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm)
|
|
|
|
: (HasVLX ? X86::VMOVAPSZ128mr
|
|
|
|
: HasAVX512
|
|
|
|
? X86::VMOVAPSZ128mr_NOVLX
|
|
|
|
: HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
|
|
|
|
else
|
|
|
|
return Isload ? (HasVLX ? X86::VMOVUPSZ128rm
|
|
|
|
: HasAVX512
|
|
|
|
? X86::VMOVUPSZ128rm_NOVLX
|
|
|
|
: HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm)
|
|
|
|
: (HasVLX ? X86::VMOVUPSZ128mr
|
|
|
|
: HasAVX512
|
|
|
|
? X86::VMOVUPSZ128mr_NOVLX
|
|
|
|
: HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
|
2017-05-23 16:23:51 +08:00
|
|
|
} else if (Ty.isVector() && Ty.getSizeInBits() == 256) {
|
|
|
|
if (Alignment >= 32)
|
|
|
|
return Isload ? (HasVLX ? X86::VMOVAPSZ256rm
|
|
|
|
: HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
|
|
|
|
: X86::VMOVAPSYrm)
|
|
|
|
: (HasVLX ? X86::VMOVAPSZ256mr
|
|
|
|
: HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
|
|
|
|
: X86::VMOVAPSYmr);
|
|
|
|
else
|
|
|
|
return Isload ? (HasVLX ? X86::VMOVUPSZ256rm
|
|
|
|
: HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
|
|
|
|
: X86::VMOVUPSYrm)
|
|
|
|
: (HasVLX ? X86::VMOVUPSZ256mr
|
|
|
|
: HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
|
|
|
|
: X86::VMOVUPSYmr);
|
|
|
|
} else if (Ty.isVector() && Ty.getSizeInBits() == 512) {
|
|
|
|
if (Alignment >= 64)
|
|
|
|
return Isload ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
|
|
|
|
else
|
|
|
|
return Isload ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
|
2017-03-23 23:25:57 +08:00
|
|
|
}
|
|
|
|
return Opc;
|
|
|
|
}
|
|
|
|
|
2017-06-19 21:12:57 +08:00
|
|
|
// Fill in an address from the given instruction.
|
|
|
|
void X86SelectAddress(const MachineInstr &I, const MachineRegisterInfo &MRI,
|
|
|
|
X86AddressMode &AM) {
|
|
|
|
|
|
|
|
assert(I.getOperand(0).isReg() && "unsupported opperand.");
|
|
|
|
assert(MRI.getType(I.getOperand(0).getReg()).isPointer() &&
|
|
|
|
"unsupported type.");
|
|
|
|
|
|
|
|
if (I.getOpcode() == TargetOpcode::G_GEP) {
|
|
|
|
if (auto COff = getConstantVRegVal(I.getOperand(2).getReg(), MRI)) {
|
|
|
|
int64_t Imm = *COff;
|
|
|
|
if (isInt<32>(Imm)) { // Check for displacement overflow.
|
|
|
|
AM.Disp = static_cast<int32_t>(Imm);
|
|
|
|
AM.Base.Reg = I.getOperand(1).getReg();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if (I.getOpcode() == TargetOpcode::G_FRAME_INDEX) {
|
|
|
|
AM.Base.FrameIndex = I.getOperand(1).getIndex();
|
|
|
|
AM.BaseType = X86AddressMode::FrameIndexBase;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Default behavior.
|
|
|
|
AM.Base.Reg = I.getOperand(0).getReg();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-03-23 23:25:57 +08:00
|
|
|
bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const {
|
|
|
|
|
|
|
|
unsigned Opc = I.getOpcode();
|
|
|
|
|
|
|
|
if (Opc != TargetOpcode::G_STORE && Opc != TargetOpcode::G_LOAD)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const unsigned DefReg = I.getOperand(0).getReg();
|
|
|
|
LLT Ty = MRI.getType(DefReg);
|
|
|
|
const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
|
|
|
|
|
|
|
|
auto &MemOp = **I.memoperands_begin();
|
2017-06-19 21:12:57 +08:00
|
|
|
if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
|
|
|
|
DEBUG(dbgs() << "Atomic load/store not supported yet\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-03-23 23:25:57 +08:00
|
|
|
unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc, MemOp.getAlignment());
|
|
|
|
if (NewOpc == Opc)
|
|
|
|
return false;
|
|
|
|
|
2017-06-19 21:12:57 +08:00
|
|
|
X86AddressMode AM;
|
|
|
|
X86SelectAddress(*MRI.getVRegDef(I.getOperand(1).getReg()), MRI, AM);
|
|
|
|
|
2017-03-23 23:25:57 +08:00
|
|
|
I.setDesc(TII.get(NewOpc));
|
|
|
|
MachineInstrBuilder MIB(MF, I);
|
2017-06-19 21:12:57 +08:00
|
|
|
if (Opc == TargetOpcode::G_LOAD) {
|
|
|
|
I.RemoveOperand(1);
|
|
|
|
addFullAddress(MIB, AM);
|
|
|
|
} else {
|
2017-03-23 23:25:57 +08:00
|
|
|
// G_STORE (VAL, Addr), X86Store instruction (Addr, VAL)
|
2017-06-19 21:12:57 +08:00
|
|
|
I.RemoveOperand(1);
|
2017-03-23 23:25:57 +08:00
|
|
|
I.RemoveOperand(0);
|
2017-06-19 21:12:57 +08:00
|
|
|
addFullAddress(MIB, AM).addUse(DefReg);
|
2017-03-23 23:25:57 +08:00
|
|
|
}
|
|
|
|
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
|
|
|
|
}
|
|
|
|
|
2017-05-08 17:40:43 +08:00
|
|
|
bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const {
|
|
|
|
unsigned Opc = I.getOpcode();
|
|
|
|
|
|
|
|
if (Opc != TargetOpcode::G_FRAME_INDEX && Opc != TargetOpcode::G_GEP)
|
2017-03-26 16:11:12 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
const unsigned DefReg = I.getOperand(0).getReg();
|
|
|
|
LLT Ty = MRI.getType(DefReg);
|
|
|
|
|
2017-05-08 17:40:43 +08:00
|
|
|
// Use LEA to calculate frame index and GEP
|
2017-03-26 16:11:12 +08:00
|
|
|
unsigned NewOpc;
|
|
|
|
if (Ty == LLT::pointer(0, 64))
|
|
|
|
NewOpc = X86::LEA64r;
|
|
|
|
else if (Ty == LLT::pointer(0, 32))
|
|
|
|
NewOpc = STI.isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r;
|
|
|
|
else
|
2017-05-08 17:40:43 +08:00
|
|
|
llvm_unreachable("Can't select G_FRAME_INDEX/G_GEP, unsupported type.");
|
2017-03-26 16:11:12 +08:00
|
|
|
|
|
|
|
I.setDesc(TII.get(NewOpc));
|
|
|
|
MachineInstrBuilder MIB(MF, I);
|
2017-05-08 17:40:43 +08:00
|
|
|
|
|
|
|
if (Opc == TargetOpcode::G_FRAME_INDEX) {
|
|
|
|
addOffset(MIB, 0);
|
|
|
|
} else {
|
|
|
|
MachineOperand &InxOp = I.getOperand(2);
|
|
|
|
I.addOperand(InxOp); // set IndexReg
|
|
|
|
InxOp.ChangeToImmediate(1); // set Scale
|
|
|
|
MIB.addImm(0).addReg(0);
|
|
|
|
}
|
2017-03-26 16:11:12 +08:00
|
|
|
|
|
|
|
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
|
|
|
|
}
|
2017-04-06 17:49:34 +08:00
|
|
|
|
2017-04-12 20:54:54 +08:00
|
|
|
bool X86InstructionSelector::selectConstant(MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const {
|
|
|
|
if (I.getOpcode() != TargetOpcode::G_CONSTANT)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const unsigned DefReg = I.getOperand(0).getReg();
|
|
|
|
LLT Ty = MRI.getType(DefReg);
|
|
|
|
|
|
|
|
assert(Ty.isScalar() && "invalid element type.");
|
|
|
|
|
|
|
|
uint64_t Val = 0;
|
|
|
|
if (I.getOperand(1).isCImm()) {
|
|
|
|
Val = I.getOperand(1).getCImm()->getZExtValue();
|
|
|
|
I.getOperand(1).ChangeToImmediate(Val);
|
|
|
|
} else if (I.getOperand(1).isImm()) {
|
|
|
|
Val = I.getOperand(1).getImm();
|
|
|
|
} else
|
|
|
|
llvm_unreachable("Unsupported operand type.");
|
|
|
|
|
|
|
|
unsigned NewOpc;
|
|
|
|
switch (Ty.getSizeInBits()) {
|
|
|
|
case 8:
|
|
|
|
NewOpc = X86::MOV8ri;
|
|
|
|
break;
|
|
|
|
case 16:
|
|
|
|
NewOpc = X86::MOV16ri;
|
|
|
|
break;
|
|
|
|
case 32:
|
|
|
|
NewOpc = X86::MOV32ri;
|
|
|
|
break;
|
|
|
|
case 64: {
|
|
|
|
// TODO: in case isUInt<32>(Val), X86::MOV32ri can be used
|
|
|
|
if (isInt<32>(Val))
|
|
|
|
NewOpc = X86::MOV64ri32;
|
|
|
|
else
|
|
|
|
NewOpc = X86::MOV64ri;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Can't select G_CONSTANT, unsupported type.");
|
|
|
|
}
|
|
|
|
|
|
|
|
I.setDesc(TII.get(NewOpc));
|
|
|
|
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
|
|
|
|
}
|
|
|
|
|
2017-04-19 19:34:59 +08:00
|
|
|
bool X86InstructionSelector::selectTrunc(MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const {
|
|
|
|
if (I.getOpcode() != TargetOpcode::G_TRUNC)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const unsigned DstReg = I.getOperand(0).getReg();
|
|
|
|
const unsigned SrcReg = I.getOperand(1).getReg();
|
|
|
|
|
|
|
|
const LLT DstTy = MRI.getType(DstReg);
|
|
|
|
const LLT SrcTy = MRI.getType(SrcReg);
|
|
|
|
|
|
|
|
const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
|
|
|
|
const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
|
|
|
|
|
|
|
|
if (DstRB.getID() != SrcRB.getID()) {
|
|
|
|
DEBUG(dbgs() << "G_TRUNC input/output on different banks\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (DstRB.getID() != X86::GPRRegBankID)
|
|
|
|
return false;
|
|
|
|
|
2017-06-20 17:15:10 +08:00
|
|
|
const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
|
2017-04-19 19:34:59 +08:00
|
|
|
if (!DstRC)
|
|
|
|
return false;
|
|
|
|
|
2017-06-20 17:15:10 +08:00
|
|
|
const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
|
2017-04-19 19:34:59 +08:00
|
|
|
if (!SrcRC)
|
|
|
|
return false;
|
|
|
|
|
2017-05-21 19:13:56 +08:00
|
|
|
unsigned SubIdx;
|
2017-04-19 19:34:59 +08:00
|
|
|
if (DstRC == SrcRC) {
|
|
|
|
// Nothing to be done
|
2017-05-21 19:13:56 +08:00
|
|
|
SubIdx = X86::NoSubRegister;
|
2017-04-19 19:34:59 +08:00
|
|
|
} else if (DstRC == &X86::GR32RegClass) {
|
2017-05-21 19:13:56 +08:00
|
|
|
SubIdx = X86::sub_32bit;
|
2017-04-19 19:34:59 +08:00
|
|
|
} else if (DstRC == &X86::GR16RegClass) {
|
2017-05-21 19:13:56 +08:00
|
|
|
SubIdx = X86::sub_16bit;
|
2017-04-19 19:34:59 +08:00
|
|
|
} else if (DstRC == &X86::GR8RegClass) {
|
2017-05-21 19:13:56 +08:00
|
|
|
SubIdx = X86::sub_8bit;
|
2017-04-19 19:34:59 +08:00
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-05-21 19:13:56 +08:00
|
|
|
SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
|
|
|
|
|
|
|
|
if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
|
|
|
|
!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
|
|
|
|
DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
I.getOperand(1).setSubReg(SubIdx);
|
|
|
|
|
2017-04-19 19:34:59 +08:00
|
|
|
I.setDesc(TII.get(X86::COPY));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-05-10 14:52:58 +08:00
|
|
|
bool X86InstructionSelector::selectZext(MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const {
|
|
|
|
if (I.getOpcode() != TargetOpcode::G_ZEXT)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const unsigned DstReg = I.getOperand(0).getReg();
|
|
|
|
const unsigned SrcReg = I.getOperand(1).getReg();
|
|
|
|
|
|
|
|
const LLT DstTy = MRI.getType(DstReg);
|
|
|
|
const LLT SrcTy = MRI.getType(SrcReg);
|
|
|
|
|
|
|
|
if (SrcTy == LLT::scalar(1)) {
|
|
|
|
|
|
|
|
unsigned AndOpc;
|
|
|
|
if (DstTy == LLT::scalar(32))
|
|
|
|
AndOpc = X86::AND32ri8;
|
|
|
|
else if (DstTy == LLT::scalar(64))
|
|
|
|
AndOpc = X86::AND64ri8;
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
|
2017-06-20 17:40:57 +08:00
|
|
|
unsigned DefReg =
|
|
|
|
MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI));
|
2017-05-10 14:52:58 +08:00
|
|
|
|
|
|
|
BuildMI(*I.getParent(), I, I.getDebugLoc(),
|
|
|
|
TII.get(TargetOpcode::SUBREG_TO_REG), DefReg)
|
|
|
|
.addImm(0)
|
|
|
|
.addReg(SrcReg)
|
|
|
|
.addImm(X86::sub_8bit);
|
|
|
|
|
|
|
|
MachineInstr &AndInst =
|
|
|
|
*BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AndOpc), DstReg)
|
|
|
|
.addReg(DefReg)
|
|
|
|
.addImm(1);
|
|
|
|
|
|
|
|
constrainSelectedInstRegOperands(AndInst, TII, TRI, RBI);
|
|
|
|
|
|
|
|
I.eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-05-11 15:17:40 +08:00
|
|
|
bool X86InstructionSelector::selectCmp(MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const {
|
|
|
|
if (I.getOpcode() != TargetOpcode::G_ICMP)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
X86::CondCode CC;
|
|
|
|
bool SwapArgs;
|
|
|
|
std::tie(CC, SwapArgs) = X86::getX86ConditionCode(
|
|
|
|
(CmpInst::Predicate)I.getOperand(1).getPredicate());
|
|
|
|
unsigned OpSet = X86::getSETFromCond(CC);
|
|
|
|
|
|
|
|
unsigned LHS = I.getOperand(2).getReg();
|
|
|
|
unsigned RHS = I.getOperand(3).getReg();
|
|
|
|
|
|
|
|
if (SwapArgs)
|
|
|
|
std::swap(LHS, RHS);
|
|
|
|
|
|
|
|
unsigned OpCmp;
|
|
|
|
LLT Ty = MRI.getType(LHS);
|
|
|
|
|
|
|
|
switch (Ty.getSizeInBits()) {
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
case 8:
|
|
|
|
OpCmp = X86::CMP8rr;
|
|
|
|
break;
|
|
|
|
case 16:
|
|
|
|
OpCmp = X86::CMP16rr;
|
|
|
|
break;
|
|
|
|
case 32:
|
|
|
|
OpCmp = X86::CMP32rr;
|
|
|
|
break;
|
|
|
|
case 64:
|
|
|
|
OpCmp = X86::CMP64rr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
MachineInstr &CmpInst =
|
|
|
|
*BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
|
|
|
|
.addReg(LHS)
|
|
|
|
.addReg(RHS);
|
|
|
|
|
|
|
|
MachineInstr &SetInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
|
|
|
|
TII.get(OpSet), I.getOperand(0).getReg());
|
|
|
|
|
|
|
|
constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI);
|
|
|
|
constrainSelectedInstRegOperands(SetInst, TII, TRI, RBI);
|
|
|
|
|
|
|
|
I.eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-05-17 20:48:08 +08:00
|
|
|
bool X86InstructionSelector::selectUadde(MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const {
|
|
|
|
if (I.getOpcode() != TargetOpcode::G_UADDE)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const unsigned DstReg = I.getOperand(0).getReg();
|
|
|
|
const unsigned CarryOutReg = I.getOperand(1).getReg();
|
|
|
|
const unsigned Op0Reg = I.getOperand(2).getReg();
|
|
|
|
const unsigned Op1Reg = I.getOperand(3).getReg();
|
|
|
|
unsigned CarryInReg = I.getOperand(4).getReg();
|
|
|
|
|
|
|
|
const LLT DstTy = MRI.getType(DstReg);
|
|
|
|
|
|
|
|
if (DstTy != LLT::scalar(32))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// find CarryIn def instruction.
|
|
|
|
MachineInstr *Def = MRI.getVRegDef(CarryInReg);
|
|
|
|
while (Def->getOpcode() == TargetOpcode::G_TRUNC) {
|
|
|
|
CarryInReg = Def->getOperand(1).getReg();
|
|
|
|
Def = MRI.getVRegDef(CarryInReg);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned Opcode;
|
|
|
|
if (Def->getOpcode() == TargetOpcode::G_UADDE) {
|
|
|
|
// carry set by prev ADD.
|
|
|
|
|
|
|
|
BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), X86::EFLAGS)
|
|
|
|
.addReg(CarryInReg);
|
|
|
|
|
|
|
|
if (!RBI.constrainGenericRegister(CarryInReg, X86::GR32RegClass, MRI))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
Opcode = X86::ADC32rr;
|
|
|
|
} else if (auto val = getConstantVRegVal(CarryInReg, MRI)) {
|
|
|
|
// carry is constant, support only 0.
|
|
|
|
if (*val != 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
Opcode = X86::ADD32rr;
|
|
|
|
} else
|
|
|
|
return false;
|
|
|
|
|
|
|
|
MachineInstr &AddInst =
|
|
|
|
*BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg)
|
|
|
|
.addReg(Op0Reg)
|
|
|
|
.addReg(Op1Reg);
|
|
|
|
|
|
|
|
BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), CarryOutReg)
|
|
|
|
.addReg(X86::EFLAGS);
|
|
|
|
|
|
|
|
if (!constrainSelectedInstRegOperands(AddInst, TII, TRI, RBI) ||
|
|
|
|
!RBI.constrainGenericRegister(CarryOutReg, X86::GR32RegClass, MRI))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
I.eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-06-22 17:43:35 +08:00
|
|
|
bool X86InstructionSelector::emitInsertSubreg(unsigned DstReg, unsigned SrcReg,
|
|
|
|
MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const {
|
|
|
|
|
|
|
|
const LLT DstTy = MRI.getType(DstReg);
|
|
|
|
const LLT SrcTy = MRI.getType(SrcReg);
|
|
|
|
unsigned SubIdx = X86::NoSubRegister;
|
|
|
|
|
|
|
|
// TODO: support scalar types
|
|
|
|
if (!DstTy.isVector() || !SrcTy.isVector())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
assert(SrcTy.getSizeInBits() < DstTy.getSizeInBits() &&
|
|
|
|
"Incorrect Src/Dst register size");
|
|
|
|
|
|
|
|
if (SrcTy.getSizeInBits() == 128)
|
|
|
|
SubIdx = X86::sub_xmm;
|
|
|
|
else if (SrcTy.getSizeInBits() == 256)
|
|
|
|
SubIdx = X86::sub_ymm;
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
|
|
|
|
const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
|
|
|
|
|
|
|
|
if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
|
|
|
|
!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
|
|
|
|
DEBUG(dbgs() << "Failed to constrain INSERT_SUBREG\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY))
|
|
|
|
.addReg(DstReg, RegState::DefineNoRead, SubIdx)
|
|
|
|
.addReg(SrcReg);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool X86InstructionSelector::selectInsert(MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const {
|
|
|
|
|
|
|
|
if (I.getOpcode() != TargetOpcode::G_INSERT)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const unsigned DstReg = I.getOperand(0).getReg();
|
|
|
|
const unsigned SrcReg = I.getOperand(1).getReg();
|
|
|
|
const unsigned InsertReg = I.getOperand(2).getReg();
|
|
|
|
int64_t Index = I.getOperand(3).getImm();
|
|
|
|
|
|
|
|
const LLT DstTy = MRI.getType(DstReg);
|
|
|
|
const LLT InsertRegTy = MRI.getType(InsertReg);
|
|
|
|
|
|
|
|
// Meanwile handle vector type only.
|
|
|
|
if (!DstTy.isVector())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (Index % InsertRegTy.getSizeInBits() != 0)
|
|
|
|
return false; // Not insert subvector.
|
|
|
|
|
|
|
|
if (Index == 0 && MRI.getVRegDef(SrcReg)->isImplicitDef()) {
|
|
|
|
// Replace by subreg copy.
|
|
|
|
if (!emitInsertSubreg(DstReg, InsertReg, I, MRI, MF))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
I.eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool HasAVX = STI.hasAVX();
|
|
|
|
bool HasAVX512 = STI.hasAVX512();
|
|
|
|
bool HasVLX = STI.hasVLX();
|
|
|
|
|
|
|
|
if (DstTy.getSizeInBits() == 256 && InsertRegTy.getSizeInBits() == 128) {
|
|
|
|
if (HasVLX)
|
|
|
|
I.setDesc(TII.get(X86::VINSERTF32x4Z256rr));
|
|
|
|
else if (HasAVX)
|
|
|
|
I.setDesc(TII.get(X86::VINSERTF128rr));
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
} else if (DstTy.getSizeInBits() == 512 && HasAVX512) {
|
|
|
|
if (InsertRegTy.getSizeInBits() == 128)
|
|
|
|
I.setDesc(TII.get(X86::VINSERTF32x4Zrr));
|
|
|
|
else if (InsertRegTy.getSizeInBits() == 256)
|
|
|
|
I.setDesc(TII.get(X86::VINSERTF64x4Zrr));
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
} else
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Convert to X86 VINSERT immediate.
|
|
|
|
Index = Index / InsertRegTy.getSizeInBits();
|
|
|
|
|
|
|
|
I.getOperand(3).setImm(Index);
|
|
|
|
|
|
|
|
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
|
|
|
|
}
|
|
|
|
|
2017-04-06 17:49:34 +08:00
|
|
|
InstructionSelector *
|
[globalisel][tablegen] Import SelectionDAG's rule predicates and support the equivalent in GIRule.
Summary:
The SelectionDAG importer now imports rules with Predicate's attached via
Requires, PredicateControl, etc. These predicates are implemented as
bitset's to allow multiple predicates to be tested together. However,
unlike the MC layer subtarget features, each target only pays for it's own
predicates (e.g. AArch64 doesn't have 192 feature bits just because X86
needs a lot).
Both AArch64 and X86 derive at least one predicate from the MachineFunction
or Function so they must re-initialize AvailableFeatures before each
function. They also declare locals in <Target>InstructionSelector so that
computeAvailableFeatures() can use the code from SelectionDAG without
modification.
Reviewers: rovka, qcolombet, aditya_nandakumar, t.p.northover, ab
Reviewed By: rovka
Subscribers: aemerson, rengolin, dberris, kristof.beyls, llvm-commits, igorb
Differential Revision: https://reviews.llvm.org/D31418
llvm-svn: 300993
2017-04-21 23:59:56 +08:00
|
|
|
llvm::createX86InstructionSelector(const X86TargetMachine &TM,
|
|
|
|
X86Subtarget &Subtarget,
|
2017-04-06 17:49:34 +08:00
|
|
|
X86RegisterBankInfo &RBI) {
|
[globalisel][tablegen] Import SelectionDAG's rule predicates and support the equivalent in GIRule.
Summary:
The SelectionDAG importer now imports rules with Predicate's attached via
Requires, PredicateControl, etc. These predicates are implemented as
bitset's to allow multiple predicates to be tested together. However,
unlike the MC layer subtarget features, each target only pays for it's own
predicates (e.g. AArch64 doesn't have 192 feature bits just because X86
needs a lot).
Both AArch64 and X86 derive at least one predicate from the MachineFunction
or Function so they must re-initialize AvailableFeatures before each
function. They also declare locals in <Target>InstructionSelector so that
computeAvailableFeatures() can use the code from SelectionDAG without
modification.
Reviewers: rovka, qcolombet, aditya_nandakumar, t.p.northover, ab
Reviewed By: rovka
Subscribers: aemerson, rengolin, dberris, kristof.beyls, llvm-commits, igorb
Differential Revision: https://reviews.llvm.org/D31418
llvm-svn: 300993
2017-04-21 23:59:56 +08:00
|
|
|
return new X86InstructionSelector(TM, Subtarget, RBI);
|
2017-04-06 17:49:34 +08:00
|
|
|
}
|