2017-10-05 08:33:50 +08:00
|
|
|
//===- X86InstructionSelector.cpp -----------------------------------------===//
|
2017-02-22 20:25:09 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
/// \file
|
|
|
|
/// This file implements the targeting of the InstructionSelector class for
|
|
|
|
/// X86.
|
|
|
|
/// \todo This should be generated by TableGen.
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2017-10-05 08:33:50 +08:00
|
|
|
#include "MCTargetDesc/X86BaseInfo.h"
|
2017-03-23 23:25:57 +08:00
|
|
|
#include "X86InstrBuilder.h"
|
2017-02-22 20:25:09 +08:00
|
|
|
#include "X86InstrInfo.h"
|
|
|
|
#include "X86RegisterBankInfo.h"
|
|
|
|
#include "X86RegisterInfo.h"
|
|
|
|
#include "X86Subtarget.h"
|
|
|
|
#include "X86TargetMachine.h"
|
2017-04-12 20:54:54 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
|
2017-10-05 08:33:50 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
|
|
|
|
#include "llvm/CodeGen/GlobalISel/RegisterBank.h"
|
2017-05-17 20:48:08 +08:00
|
|
|
#include "llvm/CodeGen/GlobalISel/Utils.h"
|
2017-02-22 20:25:09 +08:00
|
|
|
#include "llvm/CodeGen/MachineBasicBlock.h"
|
2017-09-17 16:08:13 +08:00
|
|
|
#include "llvm/CodeGen/MachineConstantPool.h"
|
2017-02-22 20:25:09 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstr.h"
|
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
2017-10-05 08:33:50 +08:00
|
|
|
#include "llvm/CodeGen/MachineMemOperand.h"
|
2017-04-06 17:49:34 +08:00
|
|
|
#include "llvm/CodeGen/MachineOperand.h"
|
2017-02-22 20:25:09 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2017-11-17 09:07:10 +08:00
|
|
|
#include "llvm/CodeGen/TargetOpcodes.h"
|
|
|
|
#include "llvm/CodeGen/TargetRegisterInfo.h"
|
2017-10-05 08:33:50 +08:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
|
|
|
#include "llvm/IR/InstrTypes.h"
|
|
|
|
#include "llvm/Support/AtomicOrdering.h"
|
|
|
|
#include "llvm/Support/CodeGen.h"
|
2017-02-22 20:25:09 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
2017-10-05 08:33:50 +08:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
|
|
|
#include "llvm/Support/LowLevelTypeImpl.h"
|
|
|
|
#include "llvm/Support/MathExtras.h"
|
2017-02-22 20:25:09 +08:00
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2017-10-05 08:33:50 +08:00
|
|
|
#include <cassert>
|
|
|
|
#include <cstdint>
|
|
|
|
#include <tuple>
|
[globalisel][tablegen] Partially fix compile-time regressions by converting matcher to state-machine(s)
Summary:
Replace the matcher if-statements for each rule with a state-machine. This
significantly reduces compile time, memory allocations, and cumulative memory
allocation when compiling AArch64InstructionSelector.cpp.o after r303259 is
recommitted.
The following patches will expand on this further to fully fix the regressions.
Reviewers: rovka, ab, t.p.northover, qcolombet, aditya_nandakumar
Reviewed By: ab
Subscribers: vitalybuka, aemerson, javed.absar, igorb, llvm-commits, kristof.beyls
Differential Revision: https://reviews.llvm.org/D33758
llvm-svn: 307079
2017-07-04 22:35:06 +08:00
|
|
|
|
2017-10-27 07:39:54 +08:00
|
|
|
#define DEBUG_TYPE "X86-isel"
|
|
|
|
|
2017-02-22 20:25:09 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2017-04-06 17:49:34 +08:00
|
|
|
namespace {
|
|
|
|
|
[globalisel][tablegen] Import SelectionDAG's rule predicates and support the equivalent in GIRule.
Summary:
The SelectionDAG importer now imports rules with Predicate's attached via
Requires, PredicateControl, etc. These predicates are implemented as
bitset's to allow multiple predicates to be tested together. However,
unlike the MC layer subtarget features, each target only pays for it's own
predicates (e.g. AArch64 doesn't have 192 feature bits just because X86
needs a lot).
Both AArch64 and X86 derive at least one predicate from the MachineFunction
or Function so they must re-initialize AvailableFeatures before each
function. They also declare locals in <Target>InstructionSelector so that
computeAvailableFeatures() can use the code from SelectionDAG without
modification.
Reviewers: rovka, qcolombet, aditya_nandakumar, t.p.northover, ab
Reviewed By: rovka
Subscribers: aemerson, rengolin, dberris, kristof.beyls, llvm-commits, igorb
Differential Revision: https://reviews.llvm.org/D31418
llvm-svn: 300993
2017-04-21 23:59:56 +08:00
|
|
|
#define GET_GLOBALISEL_PREDICATE_BITSET
|
|
|
|
#include "X86GenGlobalISel.inc"
|
|
|
|
#undef GET_GLOBALISEL_PREDICATE_BITSET
|
|
|
|
|
2017-04-06 17:49:34 +08:00
|
|
|
class X86InstructionSelector : public InstructionSelector {
|
|
|
|
public:
|
[globalisel][tablegen] Import SelectionDAG's rule predicates and support the equivalent in GIRule.
Summary:
The SelectionDAG importer now imports rules with Predicate's attached via
Requires, PredicateControl, etc. These predicates are implemented as
bitset's to allow multiple predicates to be tested together. However,
unlike the MC layer subtarget features, each target only pays for it's own
predicates (e.g. AArch64 doesn't have 192 feature bits just because X86
needs a lot).
Both AArch64 and X86 derive at least one predicate from the MachineFunction
or Function so they must re-initialize AvailableFeatures before each
function. They also declare locals in <Target>InstructionSelector so that
computeAvailableFeatures() can use the code from SelectionDAG without
modification.
Reviewers: rovka, qcolombet, aditya_nandakumar, t.p.northover, ab
Reviewed By: rovka
Subscribers: aemerson, rengolin, dberris, kristof.beyls, llvm-commits, igorb
Differential Revision: https://reviews.llvm.org/D31418
llvm-svn: 300993
2017-04-21 23:59:56 +08:00
|
|
|
X86InstructionSelector(const X86TargetMachine &TM, const X86Subtarget &STI,
|
2017-04-06 17:49:34 +08:00
|
|
|
const X86RegisterBankInfo &RBI);
|
|
|
|
|
[globalisel][tablegen] Generate rule coverage and use it to identify untested rules
Summary:
This patch adds a LLVM_ENABLE_GISEL_COV which, like LLVM_ENABLE_DAGISEL_COV,
causes TableGen to instrument the generated table to collect rule coverage
information. However, LLVM_ENABLE_GISEL_COV goes a bit further than
LLVM_ENABLE_DAGISEL_COV. The information is written to files
(${CMAKE_BINARY_DIR}/gisel-coverage-* by default). These files can then be
concatenated into ${LLVM_GISEL_COV_PREFIX}-all after which TableGen will
read this information and use it to emit warnings about untested rules.
This technique could also be used by SelectionDAG and can be further
extended to detect hot rules and give them priority over colder rules.
Usage:
* Enable LLVM_ENABLE_GISEL_COV in CMake
* Build the compiler and run some tests
* cat gisel-coverage-[0-9]* > gisel-coverage-all
* Delete lib/Target/*/*GenGlobalISel.inc*
* Build the compiler
Known issues:
* ${LLVM_GISEL_COV_PREFIX}-all must be generated as a manual
step due to a lack of a portable 'cat' command. It should be the
concatenation of all ${LLVM_GISEL_COV_PREFIX}-[0-9]* files.
* There's no mechanism to discard coverage information when the ruleset
changes
Depends on D39742
Reviewers: ab, qcolombet, t.p.northover, aditya_nandakumar, rovka
Reviewed By: rovka
Subscribers: vsk, arsenm, nhaehnle, mgorny, kristof.beyls, javed.absar, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D39747
llvm-svn: 318356
2017-11-16 08:46:35 +08:00
|
|
|
bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const override;
|
2017-10-27 07:39:54 +08:00
|
|
|
static const char *getName() { return DEBUG_TYPE; }
|
2017-04-06 17:49:34 +08:00
|
|
|
|
|
|
|
private:
|
|
|
|
/// tblgen-erated 'select' implementation, used as the initial selector for
|
|
|
|
/// the patterns that don't require complex C++.
|
[globalisel][tablegen] Generate rule coverage and use it to identify untested rules
Summary:
This patch adds a LLVM_ENABLE_GISEL_COV which, like LLVM_ENABLE_DAGISEL_COV,
causes TableGen to instrument the generated table to collect rule coverage
information. However, LLVM_ENABLE_GISEL_COV goes a bit further than
LLVM_ENABLE_DAGISEL_COV. The information is written to files
(${CMAKE_BINARY_DIR}/gisel-coverage-* by default). These files can then be
concatenated into ${LLVM_GISEL_COV_PREFIX}-all after which TableGen will
read this information and use it to emit warnings about untested rules.
This technique could also be used by SelectionDAG and can be further
extended to detect hot rules and give them priority over colder rules.
Usage:
* Enable LLVM_ENABLE_GISEL_COV in CMake
* Build the compiler and run some tests
* cat gisel-coverage-[0-9]* > gisel-coverage-all
* Delete lib/Target/*/*GenGlobalISel.inc*
* Build the compiler
Known issues:
* ${LLVM_GISEL_COV_PREFIX}-all must be generated as a manual
step due to a lack of a portable 'cat' command. It should be the
concatenation of all ${LLVM_GISEL_COV_PREFIX}-[0-9]* files.
* There's no mechanism to discard coverage information when the ruleset
changes
Depends on D39742
Reviewers: ab, qcolombet, t.p.northover, aditya_nandakumar, rovka
Reviewed By: rovka
Subscribers: vsk, arsenm, nhaehnle, mgorny, kristof.beyls, javed.absar, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D39747
llvm-svn: 318356
2017-11-16 08:46:35 +08:00
|
|
|
bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
|
2017-04-06 17:49:34 +08:00
|
|
|
|
2017-07-02 11:24:54 +08:00
|
|
|
// TODO: remove after supported by Tablegen-erated instruction selection.
|
2017-09-17 16:08:13 +08:00
|
|
|
unsigned getLoadStoreOp(const LLT &Ty, const RegisterBank &RB, unsigned Opc,
|
2017-04-06 17:49:34 +08:00
|
|
|
uint64_t Alignment) const;
|
|
|
|
|
|
|
|
bool selectLoadStoreOp(MachineInstr &I, MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const;
|
2017-05-08 17:40:43 +08:00
|
|
|
bool selectFrameIndexOrGep(MachineInstr &I, MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const;
|
2017-07-02 16:58:29 +08:00
|
|
|
bool selectGlobalValue(MachineInstr &I, MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const;
|
2017-04-12 20:54:54 +08:00
|
|
|
bool selectConstant(MachineInstr &I, MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const;
|
2018-02-28 17:18:47 +08:00
|
|
|
bool selectTruncOrPtrToInt(MachineInstr &I, MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const;
|
2017-05-10 14:52:58 +08:00
|
|
|
bool selectZext(MachineInstr &I, MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const;
|
2017-09-11 17:41:13 +08:00
|
|
|
bool selectAnyext(MachineInstr &I, MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const;
|
2017-05-11 15:17:40 +08:00
|
|
|
bool selectCmp(MachineInstr &I, MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const;
|
2017-05-17 20:48:08 +08:00
|
|
|
bool selectUadde(MachineInstr &I, MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const;
|
2017-06-20 17:15:10 +08:00
|
|
|
bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const;
|
2017-07-02 16:15:49 +08:00
|
|
|
bool selectUnmergeValues(MachineInstr &I, MachineRegisterInfo &MRI,
|
[globalisel][tablegen] Generate rule coverage and use it to identify untested rules
Summary:
This patch adds a LLVM_ENABLE_GISEL_COV which, like LLVM_ENABLE_DAGISEL_COV,
causes TableGen to instrument the generated table to collect rule coverage
information. However, LLVM_ENABLE_GISEL_COV goes a bit further than
LLVM_ENABLE_DAGISEL_COV. The information is written to files
(${CMAKE_BINARY_DIR}/gisel-coverage-* by default). These files can then be
concatenated into ${LLVM_GISEL_COV_PREFIX}-all after which TableGen will
read this information and use it to emit warnings about untested rules.
This technique could also be used by SelectionDAG and can be further
extended to detect hot rules and give them priority over colder rules.
Usage:
* Enable LLVM_ENABLE_GISEL_COV in CMake
* Build the compiler and run some tests
* cat gisel-coverage-[0-9]* > gisel-coverage-all
* Delete lib/Target/*/*GenGlobalISel.inc*
* Build the compiler
Known issues:
* ${LLVM_GISEL_COV_PREFIX}-all must be generated as a manual
step due to a lack of a portable 'cat' command. It should be the
concatenation of all ${LLVM_GISEL_COV_PREFIX}-[0-9]* files.
* There's no mechanism to discard coverage information when the ruleset
changes
Depends on D39742
Reviewers: ab, qcolombet, t.p.northover, aditya_nandakumar, rovka
Reviewed By: rovka
Subscribers: vsk, arsenm, nhaehnle, mgorny, kristof.beyls, javed.absar, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D39747
llvm-svn: 318356
2017-11-16 08:46:35 +08:00
|
|
|
MachineFunction &MF,
|
|
|
|
CodeGenCoverage &CoverageInfo) const;
|
2017-06-29 20:08:28 +08:00
|
|
|
bool selectMergeValues(MachineInstr &I, MachineRegisterInfo &MRI,
|
[globalisel][tablegen] Generate rule coverage and use it to identify untested rules
Summary:
This patch adds a LLVM_ENABLE_GISEL_COV which, like LLVM_ENABLE_DAGISEL_COV,
causes TableGen to instrument the generated table to collect rule coverage
information. However, LLVM_ENABLE_GISEL_COV goes a bit further than
LLVM_ENABLE_DAGISEL_COV. The information is written to files
(${CMAKE_BINARY_DIR}/gisel-coverage-* by default). These files can then be
concatenated into ${LLVM_GISEL_COV_PREFIX}-all after which TableGen will
read this information and use it to emit warnings about untested rules.
This technique could also be used by SelectionDAG and can be further
extended to detect hot rules and give them priority over colder rules.
Usage:
* Enable LLVM_ENABLE_GISEL_COV in CMake
* Build the compiler and run some tests
* cat gisel-coverage-[0-9]* > gisel-coverage-all
* Delete lib/Target/*/*GenGlobalISel.inc*
* Build the compiler
Known issues:
* ${LLVM_GISEL_COV_PREFIX}-all must be generated as a manual
step due to a lack of a portable 'cat' command. It should be the
concatenation of all ${LLVM_GISEL_COV_PREFIX}-[0-9]* files.
* There's no mechanism to discard coverage information when the ruleset
changes
Depends on D39742
Reviewers: ab, qcolombet, t.p.northover, aditya_nandakumar, rovka
Reviewed By: rovka
Subscribers: vsk, arsenm, nhaehnle, mgorny, kristof.beyls, javed.absar, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D39747
llvm-svn: 318356
2017-11-16 08:46:35 +08:00
|
|
|
MachineFunction &MF,
|
|
|
|
CodeGenCoverage &CoverageInfo) const;
|
2017-06-22 17:43:35 +08:00
|
|
|
bool selectInsert(MachineInstr &I, MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const;
|
2017-06-25 19:42:17 +08:00
|
|
|
bool selectExtract(MachineInstr &I, MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const;
|
2017-08-21 18:51:54 +08:00
|
|
|
bool selectCondBranch(MachineInstr &I, MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const;
|
2018-02-09 06:41:47 +08:00
|
|
|
bool selectTurnIntoCOPY(MachineInstr &I, MachineRegisterInfo &MRI,
|
|
|
|
const unsigned DstReg,
|
|
|
|
const TargetRegisterClass *DstRC,
|
|
|
|
const unsigned SrcReg,
|
|
|
|
const TargetRegisterClass *SrcRC) const;
|
2017-09-17 16:08:13 +08:00
|
|
|
bool materializeFP(MachineInstr &I, MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const;
|
2017-09-04 17:06:45 +08:00
|
|
|
bool selectImplicitDefOrPHI(MachineInstr &I, MachineRegisterInfo &MRI) const;
|
[GlobalISel][X86] Support G_LSHR/G_ASHR/G_SHL
Support G_LSHR/G_ASHR/G_SHL. We have 3 variance for
shift instructions : shift gpr, shift imm, shift 1.
Currently GlobalIsel TableGen generate patterns for
shift imm and shift 1, but with shiftCount i8.
In G_LSHR/G_ASHR/G_SHL like LLVM-IR both arguments
has the same type, so for now only shift i8 can use
auto generated TableGen patterns.
The support of G_SHL/G_ASHR enables tryCombineSExt
from LegalizationArtifactCombiner.h to hit, which
results in different legalization for the following tests:
LLVM :: CodeGen/X86/GlobalISel/ext-x86-64.ll
LLVM :: CodeGen/X86/GlobalISel/gep.ll
LLVM :: CodeGen/X86/GlobalISel/legalize-ext-x86-64.mir
-; X64-NEXT: movsbl %dil, %eax
+; X64-NEXT: movl $24, %ecx
+; X64-NEXT: # kill: def $cl killed $ecx
+; X64-NEXT: shll %cl, %edi
+; X64-NEXT: movl $24, %ecx
+; X64-NEXT: # kill: def $cl killed $ecx
+; X64-NEXT: sarl %cl, %edi
+; X64-NEXT: movl %edi, %eax
..which is not optimal and should be addressed later.
Rework of the patch by igorb
Reviewed By: igorb
Differential Revision: https://reviews.llvm.org/D44395
llvm-svn: 327499
2018-03-14 19:23:57 +08:00
|
|
|
bool selectShift(MachineInstr &I, MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const;
|
2018-03-14 23:41:11 +08:00
|
|
|
bool selectSDiv(MachineInstr &I, MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const;
|
2017-06-22 17:43:35 +08:00
|
|
|
|
|
|
|
// emit insert subreg instruction and insert it before MachineInstr &I
|
|
|
|
bool emitInsertSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI, MachineFunction &MF) const;
|
2017-06-25 19:42:17 +08:00
|
|
|
// emit extract subreg instruction and insert it before MachineInstr &I
|
|
|
|
bool emitExtractSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI, MachineFunction &MF) const;
|
2017-06-20 17:15:10 +08:00
|
|
|
|
|
|
|
const TargetRegisterClass *getRegClass(LLT Ty, const RegisterBank &RB) const;
|
|
|
|
const TargetRegisterClass *getRegClass(LLT Ty, unsigned Reg,
|
|
|
|
MachineRegisterInfo &MRI) const;
|
2017-05-17 20:48:08 +08:00
|
|
|
|
[globalisel][tablegen] Import SelectionDAG's rule predicates and support the equivalent in GIRule.
Summary:
The SelectionDAG importer now imports rules with Predicate's attached via
Requires, PredicateControl, etc. These predicates are implemented as
bitset's to allow multiple predicates to be tested together. However,
unlike the MC layer subtarget features, each target only pays for it's own
predicates (e.g. AArch64 doesn't have 192 feature bits just because X86
needs a lot).
Both AArch64 and X86 derive at least one predicate from the MachineFunction
or Function so they must re-initialize AvailableFeatures before each
function. They also declare locals in <Target>InstructionSelector so that
computeAvailableFeatures() can use the code from SelectionDAG without
modification.
Reviewers: rovka, qcolombet, aditya_nandakumar, t.p.northover, ab
Reviewed By: rovka
Subscribers: aemerson, rengolin, dberris, kristof.beyls, llvm-commits, igorb
Differential Revision: https://reviews.llvm.org/D31418
llvm-svn: 300993
2017-04-21 23:59:56 +08:00
|
|
|
const X86TargetMachine &TM;
|
2017-04-06 17:49:34 +08:00
|
|
|
const X86Subtarget &STI;
|
|
|
|
const X86InstrInfo &TII;
|
|
|
|
const X86RegisterInfo &TRI;
|
|
|
|
const X86RegisterBankInfo &RBI;
|
[globalisel][tablegen] Import SelectionDAG's rule predicates and support the equivalent in GIRule.
Summary:
The SelectionDAG importer now imports rules with Predicate's attached via
Requires, PredicateControl, etc. These predicates are implemented as
bitset's to allow multiple predicates to be tested together. However,
unlike the MC layer subtarget features, each target only pays for it's own
predicates (e.g. AArch64 doesn't have 192 feature bits just because X86
needs a lot).
Both AArch64 and X86 derive at least one predicate from the MachineFunction
or Function so they must re-initialize AvailableFeatures before each
function. They also declare locals in <Target>InstructionSelector so that
computeAvailableFeatures() can use the code from SelectionDAG without
modification.
Reviewers: rovka, qcolombet, aditya_nandakumar, t.p.northover, ab
Reviewed By: rovka
Subscribers: aemerson, rengolin, dberris, kristof.beyls, llvm-commits, igorb
Differential Revision: https://reviews.llvm.org/D31418
llvm-svn: 300993
2017-04-21 23:59:56 +08:00
|
|
|
|
2017-04-30 01:30:09 +08:00
|
|
|
#define GET_GLOBALISEL_PREDICATES_DECL
|
|
|
|
#include "X86GenGlobalISel.inc"
|
|
|
|
#undef GET_GLOBALISEL_PREDICATES_DECL
|
2017-04-06 17:49:34 +08:00
|
|
|
|
|
|
|
#define GET_GLOBALISEL_TEMPORARIES_DECL
|
|
|
|
#include "X86GenGlobalISel.inc"
|
|
|
|
#undef GET_GLOBALISEL_TEMPORARIES_DECL
|
|
|
|
};
|
|
|
|
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
2017-03-15 05:32:08 +08:00
|
|
|
#define GET_GLOBALISEL_IMPL
|
2017-02-22 20:25:09 +08:00
|
|
|
#include "X86GenGlobalISel.inc"
|
2017-03-15 05:32:08 +08:00
|
|
|
#undef GET_GLOBALISEL_IMPL
|
2017-02-22 20:25:09 +08:00
|
|
|
|
[globalisel][tablegen] Import SelectionDAG's rule predicates and support the equivalent in GIRule.
Summary:
The SelectionDAG importer now imports rules with Predicate's attached via
Requires, PredicateControl, etc. These predicates are implemented as
bitset's to allow multiple predicates to be tested together. However,
unlike the MC layer subtarget features, each target only pays for it's own
predicates (e.g. AArch64 doesn't have 192 feature bits just because X86
needs a lot).
Both AArch64 and X86 derive at least one predicate from the MachineFunction
or Function so they must re-initialize AvailableFeatures before each
function. They also declare locals in <Target>InstructionSelector so that
computeAvailableFeatures() can use the code from SelectionDAG without
modification.
Reviewers: rovka, qcolombet, aditya_nandakumar, t.p.northover, ab
Reviewed By: rovka
Subscribers: aemerson, rengolin, dberris, kristof.beyls, llvm-commits, igorb
Differential Revision: https://reviews.llvm.org/D31418
llvm-svn: 300993
2017-04-21 23:59:56 +08:00
|
|
|
X86InstructionSelector::X86InstructionSelector(const X86TargetMachine &TM,
|
|
|
|
const X86Subtarget &STI,
|
2017-02-22 20:25:09 +08:00
|
|
|
const X86RegisterBankInfo &RBI)
|
[globalisel][tablegen] Import SelectionDAG's rule predicates and support the equivalent in GIRule.
Summary:
The SelectionDAG importer now imports rules with Predicate's attached via
Requires, PredicateControl, etc. These predicates are implemented as
bitset's to allow multiple predicates to be tested together. However,
unlike the MC layer subtarget features, each target only pays for it's own
predicates (e.g. AArch64 doesn't have 192 feature bits just because X86
needs a lot).
Both AArch64 and X86 derive at least one predicate from the MachineFunction
or Function so they must re-initialize AvailableFeatures before each
function. They also declare locals in <Target>InstructionSelector so that
computeAvailableFeatures() can use the code from SelectionDAG without
modification.
Reviewers: rovka, qcolombet, aditya_nandakumar, t.p.northover, ab
Reviewed By: rovka
Subscribers: aemerson, rengolin, dberris, kristof.beyls, llvm-commits, igorb
Differential Revision: https://reviews.llvm.org/D31418
llvm-svn: 300993
2017-04-21 23:59:56 +08:00
|
|
|
: InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
|
2017-04-30 01:30:09 +08:00
|
|
|
TRI(*STI.getRegisterInfo()), RBI(RBI),
|
|
|
|
#define GET_GLOBALISEL_PREDICATES_INIT
|
|
|
|
#include "X86GenGlobalISel.inc"
|
|
|
|
#undef GET_GLOBALISEL_PREDICATES_INIT
|
2017-03-15 05:32:08 +08:00
|
|
|
#define GET_GLOBALISEL_TEMPORARIES_INIT
|
|
|
|
#include "X86GenGlobalISel.inc"
|
|
|
|
#undef GET_GLOBALISEL_TEMPORARIES_INIT
|
|
|
|
{
|
|
|
|
}
|
2017-02-22 20:25:09 +08:00
|
|
|
|
|
|
|
// FIXME: This should be target-independent, inferred from the types declared
|
|
|
|
// for each class in the bank.
|
2017-06-20 17:15:10 +08:00
|
|
|
const TargetRegisterClass *
|
|
|
|
X86InstructionSelector::getRegClass(LLT Ty, const RegisterBank &RB) const {
|
2017-02-22 20:25:09 +08:00
|
|
|
if (RB.getID() == X86::GPRRegBankID) {
|
2017-04-19 19:34:59 +08:00
|
|
|
if (Ty.getSizeInBits() <= 8)
|
|
|
|
return &X86::GR8RegClass;
|
|
|
|
if (Ty.getSizeInBits() == 16)
|
|
|
|
return &X86::GR16RegClass;
|
2017-03-03 16:06:46 +08:00
|
|
|
if (Ty.getSizeInBits() == 32)
|
2017-02-22 20:25:09 +08:00
|
|
|
return &X86::GR32RegClass;
|
|
|
|
if (Ty.getSizeInBits() == 64)
|
|
|
|
return &X86::GR64RegClass;
|
|
|
|
}
|
2017-03-03 16:06:46 +08:00
|
|
|
if (RB.getID() == X86::VECRRegBankID) {
|
|
|
|
if (Ty.getSizeInBits() == 32)
|
2017-06-20 17:15:10 +08:00
|
|
|
return STI.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
|
2017-03-03 16:06:46 +08:00
|
|
|
if (Ty.getSizeInBits() == 64)
|
2017-06-20 17:15:10 +08:00
|
|
|
return STI.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
|
2017-03-03 16:06:46 +08:00
|
|
|
if (Ty.getSizeInBits() == 128)
|
2017-06-20 17:15:10 +08:00
|
|
|
return STI.hasAVX512() ? &X86::VR128XRegClass : &X86::VR128RegClass;
|
2017-03-03 16:06:46 +08:00
|
|
|
if (Ty.getSizeInBits() == 256)
|
2017-06-20 17:15:10 +08:00
|
|
|
return STI.hasAVX512() ? &X86::VR256XRegClass : &X86::VR256RegClass;
|
2017-03-03 16:06:46 +08:00
|
|
|
if (Ty.getSizeInBits() == 512)
|
|
|
|
return &X86::VR512RegClass;
|
|
|
|
}
|
2017-02-22 20:25:09 +08:00
|
|
|
|
|
|
|
llvm_unreachable("Unknown RegBank!");
|
|
|
|
}
|
|
|
|
|
2017-06-20 17:15:10 +08:00
|
|
|
const TargetRegisterClass *
|
|
|
|
X86InstructionSelector::getRegClass(LLT Ty, unsigned Reg,
|
|
|
|
MachineRegisterInfo &MRI) const {
|
|
|
|
const RegisterBank &RegBank = *RBI.getRegBank(Reg, MRI, TRI);
|
|
|
|
return getRegClass(Ty, RegBank);
|
|
|
|
}
|
|
|
|
|
2017-08-20 21:03:48 +08:00
|
|
|
static unsigned getSubRegIndex(const TargetRegisterClass *RC) {
|
2017-08-20 15:14:40 +08:00
|
|
|
unsigned SubIdx = X86::NoSubRegister;
|
|
|
|
if (RC == &X86::GR32RegClass) {
|
|
|
|
SubIdx = X86::sub_32bit;
|
|
|
|
} else if (RC == &X86::GR16RegClass) {
|
|
|
|
SubIdx = X86::sub_16bit;
|
|
|
|
} else if (RC == &X86::GR8RegClass) {
|
|
|
|
SubIdx = X86::sub_8bit;
|
|
|
|
}
|
|
|
|
|
|
|
|
return SubIdx;
|
|
|
|
}
|
|
|
|
|
2017-08-20 21:03:48 +08:00
|
|
|
static const TargetRegisterClass *getRegClassFromGRPhysReg(unsigned Reg) {
|
2017-08-20 15:14:40 +08:00
|
|
|
assert(TargetRegisterInfo::isPhysicalRegister(Reg));
|
|
|
|
if (X86::GR64RegClass.contains(Reg))
|
|
|
|
return &X86::GR64RegClass;
|
|
|
|
if (X86::GR32RegClass.contains(Reg))
|
|
|
|
return &X86::GR32RegClass;
|
|
|
|
if (X86::GR16RegClass.contains(Reg))
|
|
|
|
return &X86::GR16RegClass;
|
|
|
|
if (X86::GR8RegClass.contains(Reg))
|
|
|
|
return &X86::GR8RegClass;
|
|
|
|
|
|
|
|
llvm_unreachable("Unknown RegClass for PhysReg!");
|
|
|
|
}
|
|
|
|
|
2017-02-22 20:25:09 +08:00
|
|
|
// Set X86 Opcode and constrain DestReg.
|
2017-06-20 17:15:10 +08:00
|
|
|
bool X86InstructionSelector::selectCopy(MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI) const {
|
2017-02-22 20:25:09 +08:00
|
|
|
unsigned DstReg = I.getOperand(0).getReg();
|
2017-08-20 15:14:40 +08:00
|
|
|
const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
|
|
|
|
const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
|
|
|
|
|
|
|
|
unsigned SrcReg = I.getOperand(1).getReg();
|
|
|
|
const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
|
|
|
|
const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
|
|
|
|
|
2017-02-22 20:25:09 +08:00
|
|
|
if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
|
|
|
|
assert(I.isCopy() && "Generic operators do not allow physical registers");
|
2017-08-20 15:14:40 +08:00
|
|
|
|
|
|
|
if (DstSize > SrcSize && SrcRegBank.getID() == X86::GPRRegBankID &&
|
|
|
|
DstRegBank.getID() == X86::GPRRegBankID) {
|
|
|
|
|
|
|
|
const TargetRegisterClass *SrcRC =
|
|
|
|
getRegClass(MRI.getType(SrcReg), SrcRegBank);
|
|
|
|
const TargetRegisterClass *DstRC = getRegClassFromGRPhysReg(DstReg);
|
|
|
|
|
|
|
|
if (SrcRC != DstRC) {
|
|
|
|
// This case can be generated by ABI lowering, performe anyext
|
|
|
|
unsigned ExtSrc = MRI.createVirtualRegister(DstRC);
|
|
|
|
BuildMI(*I.getParent(), I, I.getDebugLoc(),
|
|
|
|
TII.get(TargetOpcode::SUBREG_TO_REG))
|
|
|
|
.addDef(ExtSrc)
|
|
|
|
.addImm(0)
|
|
|
|
.addReg(SrcReg)
|
|
|
|
.addImm(getSubRegIndex(SrcRC));
|
|
|
|
|
|
|
|
I.getOperand(1).setReg(ExtSrc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-22 20:25:09 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert((!TargetRegisterInfo::isPhysicalRegister(SrcReg) || I.isCopy()) &&
|
|
|
|
"No phys reg on generic operators");
|
|
|
|
assert((DstSize == SrcSize ||
|
|
|
|
// Copies are a mean to setup initial types, the number of
|
|
|
|
// bits may not exactly match.
|
|
|
|
(TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
|
|
|
|
DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI))) &&
|
|
|
|
"Copy with different width?!");
|
|
|
|
|
2017-08-20 15:14:40 +08:00
|
|
|
const TargetRegisterClass *DstRC =
|
|
|
|
getRegClass(MRI.getType(DstReg), DstRegBank);
|
2017-02-22 20:25:09 +08:00
|
|
|
|
2017-08-20 15:14:40 +08:00
|
|
|
if (SrcRegBank.getID() == X86::GPRRegBankID &&
|
|
|
|
DstRegBank.getID() == X86::GPRRegBankID && SrcSize > DstSize &&
|
|
|
|
TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
|
|
|
|
// Change the physical register to performe truncate.
|
2017-04-27 16:02:03 +08:00
|
|
|
|
2017-08-20 15:14:40 +08:00
|
|
|
const TargetRegisterClass *SrcRC = getRegClassFromGRPhysReg(SrcReg);
|
2017-04-27 16:02:03 +08:00
|
|
|
|
2017-08-20 15:14:40 +08:00
|
|
|
if (DstRC != SrcRC) {
|
|
|
|
I.getOperand(1).setSubReg(getSubRegIndex(DstRC));
|
2017-04-27 16:02:03 +08:00
|
|
|
I.getOperand(1).substPhysReg(SrcReg, TRI);
|
|
|
|
}
|
2017-02-22 20:25:09 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// No need to constrain SrcReg. It will get constrained when
|
|
|
|
// we hit another of its use or its defs.
|
|
|
|
// Copies do not have constraints.
|
2017-03-23 20:13:29 +08:00
|
|
|
const TargetRegisterClass *OldRC = MRI.getRegClassOrNull(DstReg);
|
2017-08-20 15:14:40 +08:00
|
|
|
if (!OldRC || !DstRC->hasSubClassEq(OldRC)) {
|
|
|
|
if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
|
2017-03-23 20:13:29 +08:00
|
|
|
DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
|
|
|
|
<< " operand\n");
|
|
|
|
return false;
|
|
|
|
}
|
2017-02-22 20:25:09 +08:00
|
|
|
}
|
|
|
|
I.setDesc(TII.get(X86::COPY));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
[globalisel][tablegen] Generate rule coverage and use it to identify untested rules
Summary:
This patch adds a LLVM_ENABLE_GISEL_COV which, like LLVM_ENABLE_DAGISEL_COV,
causes TableGen to instrument the generated table to collect rule coverage
information. However, LLVM_ENABLE_GISEL_COV goes a bit further than
LLVM_ENABLE_DAGISEL_COV. The information is written to files
(${CMAKE_BINARY_DIR}/gisel-coverage-* by default). These files can then be
concatenated into ${LLVM_GISEL_COV_PREFIX}-all after which TableGen will
read this information and use it to emit warnings about untested rules.
This technique could also be used by SelectionDAG and can be further
extended to detect hot rules and give them priority over colder rules.
Usage:
* Enable LLVM_ENABLE_GISEL_COV in CMake
* Build the compiler and run some tests
* cat gisel-coverage-[0-9]* > gisel-coverage-all
* Delete lib/Target/*/*GenGlobalISel.inc*
* Build the compiler
Known issues:
* ${LLVM_GISEL_COV_PREFIX}-all must be generated as a manual
step due to a lack of a portable 'cat' command. It should be the
concatenation of all ${LLVM_GISEL_COV_PREFIX}-[0-9]* files.
* There's no mechanism to discard coverage information when the ruleset
changes
Depends on D39742
Reviewers: ab, qcolombet, t.p.northover, aditya_nandakumar, rovka
Reviewed By: rovka
Subscribers: vsk, arsenm, nhaehnle, mgorny, kristof.beyls, javed.absar, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D39747
llvm-svn: 318356
2017-11-16 08:46:35 +08:00
|
|
|
bool X86InstructionSelector::select(MachineInstr &I,
|
|
|
|
CodeGenCoverage &CoverageInfo) const {
|
2017-02-22 20:25:09 +08:00
|
|
|
assert(I.getParent() && "Instruction should be in a basic block!");
|
|
|
|
assert(I.getParent()->getParent() && "Instruction should be in a function!");
|
|
|
|
|
|
|
|
MachineBasicBlock &MBB = *I.getParent();
|
|
|
|
MachineFunction &MF = *MBB.getParent();
|
|
|
|
MachineRegisterInfo &MRI = MF.getRegInfo();
|
|
|
|
|
|
|
|
unsigned Opcode = I.getOpcode();
|
|
|
|
if (!isPreISelGenericOpcode(Opcode)) {
|
|
|
|
// Certain non-generic instructions also need some special handling.
|
|
|
|
|
2017-08-21 17:17:28 +08:00
|
|
|
if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
|
|
|
|
return false;
|
|
|
|
|
2017-02-22 20:25:09 +08:00
|
|
|
if (I.isCopy())
|
2017-06-20 17:15:10 +08:00
|
|
|
return selectCopy(I, MRI);
|
2017-02-22 20:25:09 +08:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-02-22 20:59:47 +08:00
|
|
|
assert(I.getNumOperands() == I.getNumExplicitOperands() &&
|
|
|
|
"Generic instruction has unexpected implicit operands\n");
|
2017-02-22 20:25:09 +08:00
|
|
|
|
[globalisel][tablegen] Generate rule coverage and use it to identify untested rules
Summary:
This patch adds a LLVM_ENABLE_GISEL_COV which, like LLVM_ENABLE_DAGISEL_COV,
causes TableGen to instrument the generated table to collect rule coverage
information. However, LLVM_ENABLE_GISEL_COV goes a bit further than
LLVM_ENABLE_DAGISEL_COV. The information is written to files
(${CMAKE_BINARY_DIR}/gisel-coverage-* by default). These files can then be
concatenated into ${LLVM_GISEL_COV_PREFIX}-all after which TableGen will
read this information and use it to emit warnings about untested rules.
This technique could also be used by SelectionDAG and can be further
extended to detect hot rules and give them priority over colder rules.
Usage:
* Enable LLVM_ENABLE_GISEL_COV in CMake
* Build the compiler and run some tests
* cat gisel-coverage-[0-9]* > gisel-coverage-all
* Delete lib/Target/*/*GenGlobalISel.inc*
* Build the compiler
Known issues:
* ${LLVM_GISEL_COV_PREFIX}-all must be generated as a manual
step due to a lack of a portable 'cat' command. It should be the
concatenation of all ${LLVM_GISEL_COV_PREFIX}-[0-9]* files.
* There's no mechanism to discard coverage information when the ruleset
changes
Depends on D39742
Reviewers: ab, qcolombet, t.p.northover, aditya_nandakumar, rovka
Reviewed By: rovka
Subscribers: vsk, arsenm, nhaehnle, mgorny, kristof.beyls, javed.absar, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D39747
llvm-svn: 318356
2017-11-16 08:46:35 +08:00
|
|
|
if (selectImpl(I, CoverageInfo))
|
2017-05-10 14:52:58 +08:00
|
|
|
return true;
|
2017-05-01 15:06:08 +08:00
|
|
|
|
|
|
|
DEBUG(dbgs() << " C++ instruction selection: "; I.print(dbgs()));
|
|
|
|
|
|
|
|
// TODO: This should be implemented by tblgen.
|
2017-09-17 22:02:19 +08:00
|
|
|
switch (I.getOpcode()) {
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
case TargetOpcode::G_STORE:
|
|
|
|
case TargetOpcode::G_LOAD:
|
|
|
|
return selectLoadStoreOp(I, MRI, MF);
|
|
|
|
case TargetOpcode::G_GEP:
|
|
|
|
case TargetOpcode::G_FRAME_INDEX:
|
|
|
|
return selectFrameIndexOrGep(I, MRI, MF);
|
|
|
|
case TargetOpcode::G_GLOBAL_VALUE:
|
|
|
|
return selectGlobalValue(I, MRI, MF);
|
|
|
|
case TargetOpcode::G_CONSTANT:
|
|
|
|
return selectConstant(I, MRI, MF);
|
|
|
|
case TargetOpcode::G_FCONSTANT:
|
|
|
|
return materializeFP(I, MRI, MF);
|
2018-02-28 17:18:47 +08:00
|
|
|
case TargetOpcode::G_PTRTOINT:
|
2017-09-17 22:02:19 +08:00
|
|
|
case TargetOpcode::G_TRUNC:
|
2018-02-28 17:18:47 +08:00
|
|
|
return selectTruncOrPtrToInt(I, MRI, MF);
|
2018-02-28 20:11:53 +08:00
|
|
|
case TargetOpcode::G_INTTOPTR:
|
|
|
|
return selectCopy(I, MRI);
|
2017-09-17 22:02:19 +08:00
|
|
|
case TargetOpcode::G_ZEXT:
|
|
|
|
return selectZext(I, MRI, MF);
|
|
|
|
case TargetOpcode::G_ANYEXT:
|
|
|
|
return selectAnyext(I, MRI, MF);
|
|
|
|
case TargetOpcode::G_ICMP:
|
|
|
|
return selectCmp(I, MRI, MF);
|
|
|
|
case TargetOpcode::G_UADDE:
|
|
|
|
return selectUadde(I, MRI, MF);
|
|
|
|
case TargetOpcode::G_UNMERGE_VALUES:
|
[globalisel][tablegen] Generate rule coverage and use it to identify untested rules
Summary:
This patch adds a LLVM_ENABLE_GISEL_COV which, like LLVM_ENABLE_DAGISEL_COV,
causes TableGen to instrument the generated table to collect rule coverage
information. However, LLVM_ENABLE_GISEL_COV goes a bit further than
LLVM_ENABLE_DAGISEL_COV. The information is written to files
(${CMAKE_BINARY_DIR}/gisel-coverage-* by default). These files can then be
concatenated into ${LLVM_GISEL_COV_PREFIX}-all after which TableGen will
read this information and use it to emit warnings about untested rules.
This technique could also be used by SelectionDAG and can be further
extended to detect hot rules and give them priority over colder rules.
Usage:
* Enable LLVM_ENABLE_GISEL_COV in CMake
* Build the compiler and run some tests
* cat gisel-coverage-[0-9]* > gisel-coverage-all
* Delete lib/Target/*/*GenGlobalISel.inc*
* Build the compiler
Known issues:
* ${LLVM_GISEL_COV_PREFIX}-all must be generated as a manual
step due to a lack of a portable 'cat' command. It should be the
concatenation of all ${LLVM_GISEL_COV_PREFIX}-[0-9]* files.
* There's no mechanism to discard coverage information when the ruleset
changes
Depends on D39742
Reviewers: ab, qcolombet, t.p.northover, aditya_nandakumar, rovka
Reviewed By: rovka
Subscribers: vsk, arsenm, nhaehnle, mgorny, kristof.beyls, javed.absar, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D39747
llvm-svn: 318356
2017-11-16 08:46:35 +08:00
|
|
|
return selectUnmergeValues(I, MRI, MF, CoverageInfo);
|
2017-09-17 22:02:19 +08:00
|
|
|
case TargetOpcode::G_MERGE_VALUES:
|
[globalisel][tablegen] Generate rule coverage and use it to identify untested rules
Summary:
This patch adds a LLVM_ENABLE_GISEL_COV which, like LLVM_ENABLE_DAGISEL_COV,
causes TableGen to instrument the generated table to collect rule coverage
information. However, LLVM_ENABLE_GISEL_COV goes a bit further than
LLVM_ENABLE_DAGISEL_COV. The information is written to files
(${CMAKE_BINARY_DIR}/gisel-coverage-* by default). These files can then be
concatenated into ${LLVM_GISEL_COV_PREFIX}-all after which TableGen will
read this information and use it to emit warnings about untested rules.
This technique could also be used by SelectionDAG and can be further
extended to detect hot rules and give them priority over colder rules.
Usage:
* Enable LLVM_ENABLE_GISEL_COV in CMake
* Build the compiler and run some tests
* cat gisel-coverage-[0-9]* > gisel-coverage-all
* Delete lib/Target/*/*GenGlobalISel.inc*
* Build the compiler
Known issues:
* ${LLVM_GISEL_COV_PREFIX}-all must be generated as a manual
step due to a lack of a portable 'cat' command. It should be the
concatenation of all ${LLVM_GISEL_COV_PREFIX}-[0-9]* files.
* There's no mechanism to discard coverage information when the ruleset
changes
Depends on D39742
Reviewers: ab, qcolombet, t.p.northover, aditya_nandakumar, rovka
Reviewed By: rovka
Subscribers: vsk, arsenm, nhaehnle, mgorny, kristof.beyls, javed.absar, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D39747
llvm-svn: 318356
2017-11-16 08:46:35 +08:00
|
|
|
return selectMergeValues(I, MRI, MF, CoverageInfo);
|
2017-09-17 22:02:19 +08:00
|
|
|
case TargetOpcode::G_EXTRACT:
|
|
|
|
return selectExtract(I, MRI, MF);
|
|
|
|
case TargetOpcode::G_INSERT:
|
|
|
|
return selectInsert(I, MRI, MF);
|
|
|
|
case TargetOpcode::G_BRCOND:
|
|
|
|
return selectCondBranch(I, MRI, MF);
|
|
|
|
case TargetOpcode::G_IMPLICIT_DEF:
|
|
|
|
case TargetOpcode::G_PHI:
|
|
|
|
return selectImplicitDefOrPHI(I, MRI);
|
[GlobalISel][X86] Support G_LSHR/G_ASHR/G_SHL
Support G_LSHR/G_ASHR/G_SHL. We have 3 variance for
shift instructions : shift gpr, shift imm, shift 1.
Currently GlobalIsel TableGen generate patterns for
shift imm and shift 1, but with shiftCount i8.
In G_LSHR/G_ASHR/G_SHL like LLVM-IR both arguments
has the same type, so for now only shift i8 can use
auto generated TableGen patterns.
The support of G_SHL/G_ASHR enables tryCombineSExt
from LegalizationArtifactCombiner.h to hit, which
results in different legalization for the following tests:
LLVM :: CodeGen/X86/GlobalISel/ext-x86-64.ll
LLVM :: CodeGen/X86/GlobalISel/gep.ll
LLVM :: CodeGen/X86/GlobalISel/legalize-ext-x86-64.mir
-; X64-NEXT: movsbl %dil, %eax
+; X64-NEXT: movl $24, %ecx
+; X64-NEXT: # kill: def $cl killed $ecx
+; X64-NEXT: shll %cl, %edi
+; X64-NEXT: movl $24, %ecx
+; X64-NEXT: # kill: def $cl killed $ecx
+; X64-NEXT: sarl %cl, %edi
+; X64-NEXT: movl %edi, %eax
..which is not optimal and should be addressed later.
Rework of the patch by igorb
Reviewed By: igorb
Differential Revision: https://reviews.llvm.org/D44395
llvm-svn: 327499
2018-03-14 19:23:57 +08:00
|
|
|
case TargetOpcode::G_SHL:
|
|
|
|
case TargetOpcode::G_ASHR:
|
|
|
|
case TargetOpcode::G_LSHR:
|
|
|
|
return selectShift(I, MRI, MF);
|
2018-03-14 23:41:11 +08:00
|
|
|
case TargetOpcode::G_SDIV:
|
|
|
|
return selectSDiv(I, MRI, MF);
|
2017-09-17 22:02:19 +08:00
|
|
|
}
|
2017-03-03 16:06:46 +08:00
|
|
|
|
2017-05-01 15:06:08 +08:00
|
|
|
return false;
|
2017-02-22 20:25:09 +08:00
|
|
|
}
|
2017-03-03 16:06:46 +08:00
|
|
|
|
2017-09-17 16:08:13 +08:00
|
|
|
unsigned X86InstructionSelector::getLoadStoreOp(const LLT &Ty,
|
|
|
|
const RegisterBank &RB,
|
2017-03-23 23:25:57 +08:00
|
|
|
unsigned Opc,
|
|
|
|
uint64_t Alignment) const {
|
|
|
|
bool Isload = (Opc == TargetOpcode::G_LOAD);
|
|
|
|
bool HasAVX = STI.hasAVX();
|
|
|
|
bool HasAVX512 = STI.hasAVX512();
|
|
|
|
bool HasVLX = STI.hasVLX();
|
|
|
|
|
|
|
|
if (Ty == LLT::scalar(8)) {
|
|
|
|
if (X86::GPRRegBankID == RB.getID())
|
|
|
|
return Isload ? X86::MOV8rm : X86::MOV8mr;
|
|
|
|
} else if (Ty == LLT::scalar(16)) {
|
|
|
|
if (X86::GPRRegBankID == RB.getID())
|
|
|
|
return Isload ? X86::MOV16rm : X86::MOV16mr;
|
2017-05-01 14:08:32 +08:00
|
|
|
} else if (Ty == LLT::scalar(32) || Ty == LLT::pointer(0, 32)) {
|
2017-03-23 23:25:57 +08:00
|
|
|
if (X86::GPRRegBankID == RB.getID())
|
|
|
|
return Isload ? X86::MOV32rm : X86::MOV32mr;
|
|
|
|
if (X86::VECRRegBankID == RB.getID())
|
|
|
|
return Isload ? (HasAVX512 ? X86::VMOVSSZrm
|
|
|
|
: HasAVX ? X86::VMOVSSrm : X86::MOVSSrm)
|
|
|
|
: (HasAVX512 ? X86::VMOVSSZmr
|
|
|
|
: HasAVX ? X86::VMOVSSmr : X86::MOVSSmr);
|
2017-05-01 14:08:32 +08:00
|
|
|
} else if (Ty == LLT::scalar(64) || Ty == LLT::pointer(0, 64)) {
|
2017-03-23 23:25:57 +08:00
|
|
|
if (X86::GPRRegBankID == RB.getID())
|
|
|
|
return Isload ? X86::MOV64rm : X86::MOV64mr;
|
|
|
|
if (X86::VECRRegBankID == RB.getID())
|
|
|
|
return Isload ? (HasAVX512 ? X86::VMOVSDZrm
|
|
|
|
: HasAVX ? X86::VMOVSDrm : X86::MOVSDrm)
|
|
|
|
: (HasAVX512 ? X86::VMOVSDZmr
|
|
|
|
: HasAVX ? X86::VMOVSDmr : X86::MOVSDmr);
|
|
|
|
} else if (Ty.isVector() && Ty.getSizeInBits() == 128) {
|
|
|
|
if (Alignment >= 16)
|
|
|
|
return Isload ? (HasVLX ? X86::VMOVAPSZ128rm
|
|
|
|
: HasAVX512
|
|
|
|
? X86::VMOVAPSZ128rm_NOVLX
|
|
|
|
: HasAVX ? X86::VMOVAPSrm : X86::MOVAPSrm)
|
|
|
|
: (HasVLX ? X86::VMOVAPSZ128mr
|
|
|
|
: HasAVX512
|
|
|
|
? X86::VMOVAPSZ128mr_NOVLX
|
|
|
|
: HasAVX ? X86::VMOVAPSmr : X86::MOVAPSmr);
|
|
|
|
else
|
|
|
|
return Isload ? (HasVLX ? X86::VMOVUPSZ128rm
|
|
|
|
: HasAVX512
|
|
|
|
? X86::VMOVUPSZ128rm_NOVLX
|
|
|
|
: HasAVX ? X86::VMOVUPSrm : X86::MOVUPSrm)
|
|
|
|
: (HasVLX ? X86::VMOVUPSZ128mr
|
|
|
|
: HasAVX512
|
|
|
|
? X86::VMOVUPSZ128mr_NOVLX
|
|
|
|
: HasAVX ? X86::VMOVUPSmr : X86::MOVUPSmr);
|
2017-05-23 16:23:51 +08:00
|
|
|
} else if (Ty.isVector() && Ty.getSizeInBits() == 256) {
|
|
|
|
if (Alignment >= 32)
|
|
|
|
return Isload ? (HasVLX ? X86::VMOVAPSZ256rm
|
|
|
|
: HasAVX512 ? X86::VMOVAPSZ256rm_NOVLX
|
|
|
|
: X86::VMOVAPSYrm)
|
|
|
|
: (HasVLX ? X86::VMOVAPSZ256mr
|
|
|
|
: HasAVX512 ? X86::VMOVAPSZ256mr_NOVLX
|
|
|
|
: X86::VMOVAPSYmr);
|
|
|
|
else
|
|
|
|
return Isload ? (HasVLX ? X86::VMOVUPSZ256rm
|
|
|
|
: HasAVX512 ? X86::VMOVUPSZ256rm_NOVLX
|
|
|
|
: X86::VMOVUPSYrm)
|
|
|
|
: (HasVLX ? X86::VMOVUPSZ256mr
|
|
|
|
: HasAVX512 ? X86::VMOVUPSZ256mr_NOVLX
|
|
|
|
: X86::VMOVUPSYmr);
|
|
|
|
} else if (Ty.isVector() && Ty.getSizeInBits() == 512) {
|
|
|
|
if (Alignment >= 64)
|
|
|
|
return Isload ? X86::VMOVAPSZrm : X86::VMOVAPSZmr;
|
|
|
|
else
|
|
|
|
return Isload ? X86::VMOVUPSZrm : X86::VMOVUPSZmr;
|
2017-03-23 23:25:57 +08:00
|
|
|
}
|
|
|
|
return Opc;
|
|
|
|
}
|
|
|
|
|
2017-06-19 21:12:57 +08:00
|
|
|
// Fill in an address from the given instruction.
|
2017-08-20 21:03:48 +08:00
|
|
|
static void X86SelectAddress(const MachineInstr &I,
|
|
|
|
const MachineRegisterInfo &MRI,
|
|
|
|
X86AddressMode &AM) {
|
2017-06-19 21:12:57 +08:00
|
|
|
assert(I.getOperand(0).isReg() && "unsupported opperand.");
|
|
|
|
assert(MRI.getType(I.getOperand(0).getReg()).isPointer() &&
|
|
|
|
"unsupported type.");
|
|
|
|
|
|
|
|
if (I.getOpcode() == TargetOpcode::G_GEP) {
|
|
|
|
if (auto COff = getConstantVRegVal(I.getOperand(2).getReg(), MRI)) {
|
|
|
|
int64_t Imm = *COff;
|
|
|
|
if (isInt<32>(Imm)) { // Check for displacement overflow.
|
|
|
|
AM.Disp = static_cast<int32_t>(Imm);
|
|
|
|
AM.Base.Reg = I.getOperand(1).getReg();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if (I.getOpcode() == TargetOpcode::G_FRAME_INDEX) {
|
|
|
|
AM.Base.FrameIndex = I.getOperand(1).getIndex();
|
|
|
|
AM.BaseType = X86AddressMode::FrameIndexBase;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Default behavior.
|
|
|
|
AM.Base.Reg = I.getOperand(0).getReg();
|
|
|
|
}
|
|
|
|
|
2017-03-23 23:25:57 +08:00
|
|
|
bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const {
|
|
|
|
unsigned Opc = I.getOpcode();
|
|
|
|
|
2017-09-17 22:02:19 +08:00
|
|
|
assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&
|
|
|
|
"unexpected instruction");
|
2017-03-23 23:25:57 +08:00
|
|
|
|
|
|
|
const unsigned DefReg = I.getOperand(0).getReg();
|
|
|
|
LLT Ty = MRI.getType(DefReg);
|
|
|
|
const RegisterBank &RB = *RBI.getRegBank(DefReg, MRI, TRI);
|
|
|
|
|
|
|
|
auto &MemOp = **I.memoperands_begin();
|
2017-12-05 13:52:07 +08:00
|
|
|
if (MemOp.getOrdering() != AtomicOrdering::NotAtomic) {
|
|
|
|
DEBUG(dbgs() << "Atomic load/store not supported yet\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-03-23 23:25:57 +08:00
|
|
|
unsigned NewOpc = getLoadStoreOp(Ty, RB, Opc, MemOp.getAlignment());
|
|
|
|
if (NewOpc == Opc)
|
|
|
|
return false;
|
|
|
|
|
2017-06-19 21:12:57 +08:00
|
|
|
X86AddressMode AM;
|
|
|
|
X86SelectAddress(*MRI.getVRegDef(I.getOperand(1).getReg()), MRI, AM);
|
|
|
|
|
2017-03-23 23:25:57 +08:00
|
|
|
I.setDesc(TII.get(NewOpc));
|
|
|
|
MachineInstrBuilder MIB(MF, I);
|
2017-06-19 21:12:57 +08:00
|
|
|
if (Opc == TargetOpcode::G_LOAD) {
|
|
|
|
I.RemoveOperand(1);
|
|
|
|
addFullAddress(MIB, AM);
|
|
|
|
} else {
|
2017-03-23 23:25:57 +08:00
|
|
|
// G_STORE (VAL, Addr), X86Store instruction (Addr, VAL)
|
2017-06-19 21:12:57 +08:00
|
|
|
I.RemoveOperand(1);
|
2017-03-23 23:25:57 +08:00
|
|
|
I.RemoveOperand(0);
|
2017-06-19 21:12:57 +08:00
|
|
|
addFullAddress(MIB, AM).addUse(DefReg);
|
2017-03-23 23:25:57 +08:00
|
|
|
}
|
|
|
|
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
|
|
|
|
}
|
|
|
|
|
2017-07-02 16:58:29 +08:00
|
|
|
static unsigned getLeaOP(LLT Ty, const X86Subtarget &STI) {
|
|
|
|
if (Ty == LLT::pointer(0, 64))
|
|
|
|
return X86::LEA64r;
|
|
|
|
else if (Ty == LLT::pointer(0, 32))
|
|
|
|
return STI.isTarget64BitILP32() ? X86::LEA64_32r : X86::LEA32r;
|
|
|
|
else
|
|
|
|
llvm_unreachable("Can't get LEA opcode. Unsupported type.");
|
|
|
|
}
|
|
|
|
|
2017-05-08 17:40:43 +08:00
|
|
|
bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const {
|
|
|
|
unsigned Opc = I.getOpcode();
|
|
|
|
|
2017-09-17 22:02:19 +08:00
|
|
|
assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_GEP) &&
|
|
|
|
"unexpected instruction");
|
2017-03-26 16:11:12 +08:00
|
|
|
|
|
|
|
const unsigned DefReg = I.getOperand(0).getReg();
|
|
|
|
LLT Ty = MRI.getType(DefReg);
|
|
|
|
|
2017-05-08 17:40:43 +08:00
|
|
|
// Use LEA to calculate frame index and GEP
|
2017-07-02 16:58:29 +08:00
|
|
|
unsigned NewOpc = getLeaOP(Ty, STI);
|
2017-03-26 16:11:12 +08:00
|
|
|
I.setDesc(TII.get(NewOpc));
|
|
|
|
MachineInstrBuilder MIB(MF, I);
|
2017-05-08 17:40:43 +08:00
|
|
|
|
|
|
|
if (Opc == TargetOpcode::G_FRAME_INDEX) {
|
|
|
|
addOffset(MIB, 0);
|
|
|
|
} else {
|
|
|
|
MachineOperand &InxOp = I.getOperand(2);
|
|
|
|
I.addOperand(InxOp); // set IndexReg
|
|
|
|
InxOp.ChangeToImmediate(1); // set Scale
|
|
|
|
MIB.addImm(0).addReg(0);
|
|
|
|
}
|
2017-03-26 16:11:12 +08:00
|
|
|
|
|
|
|
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
|
|
|
|
}
|
2017-04-06 17:49:34 +08:00
|
|
|
|
2017-07-02 16:58:29 +08:00
|
|
|
bool X86InstructionSelector::selectGlobalValue(MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const {
|
2017-09-17 22:02:19 +08:00
|
|
|
assert((I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE) &&
|
|
|
|
"unexpected instruction");
|
2017-07-02 16:58:29 +08:00
|
|
|
|
|
|
|
auto GV = I.getOperand(1).getGlobal();
|
|
|
|
if (GV->isThreadLocal()) {
|
|
|
|
return false; // TODO: we don't support TLS yet.
|
|
|
|
}
|
|
|
|
|
|
|
|
// Can't handle alternate code models yet.
|
|
|
|
if (TM.getCodeModel() != CodeModel::Small)
|
2017-10-05 08:33:50 +08:00
|
|
|
return false;
|
2017-07-02 16:58:29 +08:00
|
|
|
|
|
|
|
X86AddressMode AM;
|
|
|
|
AM.GV = GV;
|
|
|
|
AM.GVOpFlags = STI.classifyGlobalReference(GV);
|
|
|
|
|
|
|
|
// TODO: The ABI requires an extra load. not supported yet.
|
|
|
|
if (isGlobalStubReference(AM.GVOpFlags))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// TODO: This reference is relative to the pic base. not supported yet.
|
|
|
|
if (isGlobalRelativeToPICBase(AM.GVOpFlags))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (STI.isPICStyleRIPRel()) {
|
|
|
|
// Use rip-relative addressing.
|
|
|
|
assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
|
|
|
|
AM.Base.Reg = X86::RIP;
|
|
|
|
}
|
|
|
|
|
|
|
|
const unsigned DefReg = I.getOperand(0).getReg();
|
|
|
|
LLT Ty = MRI.getType(DefReg);
|
|
|
|
unsigned NewOpc = getLeaOP(Ty, STI);
|
|
|
|
|
|
|
|
I.setDesc(TII.get(NewOpc));
|
|
|
|
MachineInstrBuilder MIB(MF, I);
|
|
|
|
|
|
|
|
I.RemoveOperand(1);
|
|
|
|
addFullAddress(MIB, AM);
|
|
|
|
|
|
|
|
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
|
|
|
|
}
|
|
|
|
|
2017-04-12 20:54:54 +08:00
|
|
|
bool X86InstructionSelector::selectConstant(MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const {
|
2017-09-17 22:02:19 +08:00
|
|
|
assert((I.getOpcode() == TargetOpcode::G_CONSTANT) &&
|
|
|
|
"unexpected instruction");
|
2017-04-12 20:54:54 +08:00
|
|
|
|
|
|
|
const unsigned DefReg = I.getOperand(0).getReg();
|
|
|
|
LLT Ty = MRI.getType(DefReg);
|
|
|
|
|
2017-07-03 19:06:54 +08:00
|
|
|
if (RBI.getRegBank(DefReg, MRI, TRI)->getID() != X86::GPRRegBankID)
|
|
|
|
return false;
|
2017-04-12 20:54:54 +08:00
|
|
|
|
|
|
|
uint64_t Val = 0;
|
|
|
|
if (I.getOperand(1).isCImm()) {
|
|
|
|
Val = I.getOperand(1).getCImm()->getZExtValue();
|
|
|
|
I.getOperand(1).ChangeToImmediate(Val);
|
|
|
|
} else if (I.getOperand(1).isImm()) {
|
|
|
|
Val = I.getOperand(1).getImm();
|
|
|
|
} else
|
|
|
|
llvm_unreachable("Unsupported operand type.");
|
|
|
|
|
|
|
|
unsigned NewOpc;
|
|
|
|
switch (Ty.getSizeInBits()) {
|
|
|
|
case 8:
|
|
|
|
NewOpc = X86::MOV8ri;
|
|
|
|
break;
|
|
|
|
case 16:
|
|
|
|
NewOpc = X86::MOV16ri;
|
|
|
|
break;
|
|
|
|
case 32:
|
|
|
|
NewOpc = X86::MOV32ri;
|
|
|
|
break;
|
2017-10-05 08:33:50 +08:00
|
|
|
case 64:
|
2017-04-12 20:54:54 +08:00
|
|
|
// TODO: in case isUInt<32>(Val), X86::MOV32ri can be used
|
|
|
|
if (isInt<32>(Val))
|
|
|
|
NewOpc = X86::MOV64ri32;
|
|
|
|
else
|
|
|
|
NewOpc = X86::MOV64ri;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
llvm_unreachable("Can't select G_CONSTANT, unsupported type.");
|
|
|
|
}
|
|
|
|
|
|
|
|
I.setDesc(TII.get(NewOpc));
|
|
|
|
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
|
|
|
|
}
|
|
|
|
|
2018-02-28 17:18:47 +08:00
|
|
|
// Helper function for selectTruncOrPtrToInt and selectAnyext.
|
2018-02-09 06:41:47 +08:00
|
|
|
// Returns true if DstRC lives on a floating register class and
|
|
|
|
// SrcRC lives on a 128-bit vector class.
|
|
|
|
static bool canTurnIntoCOPY(const TargetRegisterClass *DstRC,
|
|
|
|
const TargetRegisterClass *SrcRC) {
|
|
|
|
return (DstRC == &X86::FR32RegClass || DstRC == &X86::FR32XRegClass ||
|
|
|
|
DstRC == &X86::FR64RegClass || DstRC == &X86::FR64XRegClass) &&
|
|
|
|
(SrcRC == &X86::VR128RegClass || SrcRC == &X86::VR128XRegClass);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool X86InstructionSelector::selectTurnIntoCOPY(
|
|
|
|
MachineInstr &I, MachineRegisterInfo &MRI, const unsigned DstReg,
|
|
|
|
const TargetRegisterClass *DstRC, const unsigned SrcReg,
|
|
|
|
const TargetRegisterClass *SrcRC) const {
|
|
|
|
|
|
|
|
if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
|
|
|
|
!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
|
|
|
|
DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
|
|
|
|
<< " operand\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
I.setDesc(TII.get(X86::COPY));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-02-28 17:18:47 +08:00
|
|
|
bool X86InstructionSelector::selectTruncOrPtrToInt(MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const {
|
|
|
|
assert((I.getOpcode() == TargetOpcode::G_TRUNC ||
|
|
|
|
I.getOpcode() == TargetOpcode::G_PTRTOINT) &&
|
|
|
|
"unexpected instruction");
|
2017-04-19 19:34:59 +08:00
|
|
|
|
|
|
|
const unsigned DstReg = I.getOperand(0).getReg();
|
|
|
|
const unsigned SrcReg = I.getOperand(1).getReg();
|
|
|
|
|
|
|
|
const LLT DstTy = MRI.getType(DstReg);
|
|
|
|
const LLT SrcTy = MRI.getType(SrcReg);
|
|
|
|
|
|
|
|
const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
|
|
|
|
const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
|
|
|
|
|
|
|
|
if (DstRB.getID() != SrcRB.getID()) {
|
2018-02-28 17:18:47 +08:00
|
|
|
DEBUG(dbgs() << TII.getName(I.getOpcode())
|
|
|
|
<< " input/output on different banks\n");
|
2017-04-19 19:34:59 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-06-20 17:15:10 +08:00
|
|
|
const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
|
2018-02-09 06:41:47 +08:00
|
|
|
const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
|
|
|
|
|
|
|
|
if (!DstRC || !SrcRC)
|
2017-04-19 19:34:59 +08:00
|
|
|
return false;
|
|
|
|
|
2018-02-09 06:41:47 +08:00
|
|
|
// If that's truncation of the value that lives on the vector class and goes
|
|
|
|
// into the floating class, just replace it with copy, as we are able to
|
|
|
|
// select it as a regular move.
|
|
|
|
if (canTurnIntoCOPY(DstRC, SrcRC))
|
|
|
|
return selectTurnIntoCOPY(I, MRI, DstReg, DstRC, SrcReg, SrcRC);
|
|
|
|
|
|
|
|
if (DstRB.getID() != X86::GPRRegBankID)
|
2017-04-19 19:34:59 +08:00
|
|
|
return false;
|
|
|
|
|
2017-05-21 19:13:56 +08:00
|
|
|
unsigned SubIdx;
|
2017-04-19 19:34:59 +08:00
|
|
|
if (DstRC == SrcRC) {
|
|
|
|
// Nothing to be done
|
2017-05-21 19:13:56 +08:00
|
|
|
SubIdx = X86::NoSubRegister;
|
2017-04-19 19:34:59 +08:00
|
|
|
} else if (DstRC == &X86::GR32RegClass) {
|
2017-05-21 19:13:56 +08:00
|
|
|
SubIdx = X86::sub_32bit;
|
2017-04-19 19:34:59 +08:00
|
|
|
} else if (DstRC == &X86::GR16RegClass) {
|
2017-05-21 19:13:56 +08:00
|
|
|
SubIdx = X86::sub_16bit;
|
2017-04-19 19:34:59 +08:00
|
|
|
} else if (DstRC == &X86::GR8RegClass) {
|
2017-05-21 19:13:56 +08:00
|
|
|
SubIdx = X86::sub_8bit;
|
2017-04-19 19:34:59 +08:00
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-05-21 19:13:56 +08:00
|
|
|
SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
|
|
|
|
|
|
|
|
if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
|
|
|
|
!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
|
2018-02-28 17:18:47 +08:00
|
|
|
DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
|
|
|
|
<< "\n");
|
2017-05-21 19:13:56 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
I.getOperand(1).setSubReg(SubIdx);
|
|
|
|
|
2017-04-19 19:34:59 +08:00
|
|
|
I.setDesc(TII.get(X86::COPY));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-05-10 14:52:58 +08:00
|
|
|
bool X86InstructionSelector::selectZext(MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const {
|
2017-09-17 22:02:19 +08:00
|
|
|
assert((I.getOpcode() == TargetOpcode::G_ZEXT) && "unexpected instruction");
|
2017-05-10 14:52:58 +08:00
|
|
|
|
|
|
|
const unsigned DstReg = I.getOperand(0).getReg();
|
|
|
|
const unsigned SrcReg = I.getOperand(1).getReg();
|
|
|
|
|
|
|
|
const LLT DstTy = MRI.getType(DstReg);
|
|
|
|
const LLT SrcTy = MRI.getType(SrcReg);
|
|
|
|
|
2018-03-14 17:11:23 +08:00
|
|
|
assert(!(SrcTy == LLT::scalar(8) && DstTy == LLT::scalar(32)) &&
|
|
|
|
"8=>32 Zext is handled by tablegen");
|
|
|
|
assert(!(SrcTy == LLT::scalar(16) && DstTy == LLT::scalar(32)) &&
|
|
|
|
"16=>32 Zext is handled by tablegen");
|
|
|
|
|
|
|
|
const static struct ZextEntry {
|
|
|
|
LLT SrcTy;
|
|
|
|
LLT DstTy;
|
|
|
|
unsigned MovOp;
|
|
|
|
bool NeedSubregToReg;
|
|
|
|
} OpTable[] = {
|
|
|
|
{LLT::scalar(8), LLT::scalar(16), X86::MOVZX16rr8, false}, // i8 => i16
|
|
|
|
{LLT::scalar(8), LLT::scalar(64), X86::MOVZX32rr8, true}, // i8 => i64
|
|
|
|
{LLT::scalar(16), LLT::scalar(64), X86::MOVZX32rr16, true}, // i16 => i64
|
|
|
|
{LLT::scalar(32), LLT::scalar(64), 0, true} // i32 => i64
|
|
|
|
};
|
|
|
|
|
|
|
|
auto ZextEntryIt =
|
|
|
|
std::find_if(std::begin(OpTable), std::end(OpTable),
|
|
|
|
[SrcTy, DstTy](const ZextEntry &El) {
|
|
|
|
return El.DstTy == DstTy && El.SrcTy == SrcTy;
|
|
|
|
});
|
|
|
|
|
|
|
|
// Here we try to select Zext into a MOVZ and/or SUBREG_TO_REG instruction.
|
|
|
|
if (ZextEntryIt != std::end(OpTable)) {
|
|
|
|
const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
|
|
|
|
const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
|
|
|
|
const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
|
|
|
|
const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
|
|
|
|
|
|
|
|
if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
|
|
|
|
!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
|
|
|
|
DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
|
|
|
|
<< " operand\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned TransitRegTo = DstReg;
|
|
|
|
unsigned TransitRegFrom = SrcReg;
|
|
|
|
if (ZextEntryIt->MovOp) {
|
|
|
|
// If we select Zext into MOVZ + SUBREG_TO_REG, we need to have
|
|
|
|
// a transit register in between: create it here.
|
|
|
|
if (ZextEntryIt->NeedSubregToReg) {
|
|
|
|
TransitRegFrom = MRI.createVirtualRegister(
|
|
|
|
getRegClass(LLT::scalar(32), DstReg, MRI));
|
|
|
|
TransitRegTo = TransitRegFrom;
|
|
|
|
}
|
|
|
|
|
|
|
|
BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(ZextEntryIt->MovOp))
|
|
|
|
.addDef(TransitRegTo)
|
|
|
|
.addReg(SrcReg);
|
|
|
|
}
|
|
|
|
if (ZextEntryIt->NeedSubregToReg) {
|
|
|
|
BuildMI(*I.getParent(), I, I.getDebugLoc(),
|
|
|
|
TII.get(TargetOpcode::SUBREG_TO_REG))
|
|
|
|
.addDef(DstReg)
|
|
|
|
.addImm(0)
|
|
|
|
.addReg(TransitRegFrom)
|
|
|
|
.addImm(X86::sub_32bit);
|
|
|
|
}
|
|
|
|
I.eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-07-10 17:07:34 +08:00
|
|
|
if (SrcTy != LLT::scalar(1))
|
|
|
|
return false;
|
2017-05-10 14:52:58 +08:00
|
|
|
|
2017-07-10 17:07:34 +08:00
|
|
|
unsigned AndOpc;
|
|
|
|
if (DstTy == LLT::scalar(8))
|
2017-07-11 16:04:51 +08:00
|
|
|
AndOpc = X86::AND8ri;
|
2017-07-10 17:07:34 +08:00
|
|
|
else if (DstTy == LLT::scalar(16))
|
|
|
|
AndOpc = X86::AND16ri8;
|
|
|
|
else if (DstTy == LLT::scalar(32))
|
|
|
|
AndOpc = X86::AND32ri8;
|
|
|
|
else if (DstTy == LLT::scalar(64))
|
|
|
|
AndOpc = X86::AND64ri8;
|
|
|
|
else
|
|
|
|
return false;
|
2017-05-10 14:52:58 +08:00
|
|
|
|
2017-07-10 17:07:34 +08:00
|
|
|
unsigned DefReg = SrcReg;
|
|
|
|
if (DstTy != LLT::scalar(8)) {
|
|
|
|
DefReg = MRI.createVirtualRegister(getRegClass(DstTy, DstReg, MRI));
|
2017-05-10 14:52:58 +08:00
|
|
|
BuildMI(*I.getParent(), I, I.getDebugLoc(),
|
|
|
|
TII.get(TargetOpcode::SUBREG_TO_REG), DefReg)
|
|
|
|
.addImm(0)
|
|
|
|
.addReg(SrcReg)
|
|
|
|
.addImm(X86::sub_8bit);
|
2017-07-10 17:07:34 +08:00
|
|
|
}
|
2017-05-10 14:52:58 +08:00
|
|
|
|
2017-07-10 17:07:34 +08:00
|
|
|
MachineInstr &AndInst =
|
|
|
|
*BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(AndOpc), DstReg)
|
|
|
|
.addReg(DefReg)
|
|
|
|
.addImm(1);
|
2017-05-10 14:52:58 +08:00
|
|
|
|
2017-07-10 17:07:34 +08:00
|
|
|
constrainSelectedInstRegOperands(AndInst, TII, TRI, RBI);
|
2017-05-10 14:52:58 +08:00
|
|
|
|
2017-07-10 17:07:34 +08:00
|
|
|
I.eraseFromParent();
|
|
|
|
return true;
|
2017-05-10 14:52:58 +08:00
|
|
|
}
|
|
|
|
|
2017-09-11 17:41:13 +08:00
|
|
|
bool X86InstructionSelector::selectAnyext(MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const {
|
2017-09-17 22:02:19 +08:00
|
|
|
assert((I.getOpcode() == TargetOpcode::G_ANYEXT) && "unexpected instruction");
|
2017-09-11 17:41:13 +08:00
|
|
|
|
|
|
|
const unsigned DstReg = I.getOperand(0).getReg();
|
|
|
|
const unsigned SrcReg = I.getOperand(1).getReg();
|
|
|
|
|
|
|
|
const LLT DstTy = MRI.getType(DstReg);
|
|
|
|
const LLT SrcTy = MRI.getType(SrcReg);
|
|
|
|
|
|
|
|
const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
|
|
|
|
const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
|
|
|
|
|
2017-09-17 16:08:13 +08:00
|
|
|
assert(DstRB.getID() == SrcRB.getID() &&
|
|
|
|
"G_ANYEXT input/output on different banks\n");
|
2017-09-11 17:41:13 +08:00
|
|
|
|
2017-09-17 16:08:13 +08:00
|
|
|
assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
|
|
|
|
"G_ANYEXT incorrect operand size");
|
2017-09-11 17:41:13 +08:00
|
|
|
|
|
|
|
const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
|
|
|
|
const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
|
|
|
|
|
2018-02-09 06:41:47 +08:00
|
|
|
// If that's ANY_EXT of the value that lives on the floating class and goes
|
|
|
|
// into the vector class, just replace it with copy, as we are able to select
|
|
|
|
// it as a regular move.
|
|
|
|
if (canTurnIntoCOPY(SrcRC, DstRC))
|
|
|
|
return selectTurnIntoCOPY(I, MRI, SrcReg, SrcRC, DstReg, DstRC);
|
|
|
|
|
|
|
|
if (DstRB.getID() != X86::GPRRegBankID)
|
|
|
|
return false;
|
|
|
|
|
2017-09-11 17:41:13 +08:00
|
|
|
if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
|
|
|
|
!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
|
|
|
|
DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
|
|
|
|
<< " operand\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (SrcRC == DstRC) {
|
|
|
|
I.setDesc(TII.get(X86::COPY));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
BuildMI(*I.getParent(), I, I.getDebugLoc(),
|
|
|
|
TII.get(TargetOpcode::SUBREG_TO_REG))
|
|
|
|
.addDef(DstReg)
|
|
|
|
.addImm(0)
|
|
|
|
.addReg(SrcReg)
|
|
|
|
.addImm(getSubRegIndex(SrcRC));
|
|
|
|
|
|
|
|
I.eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-05-11 15:17:40 +08:00
|
|
|
bool X86InstructionSelector::selectCmp(MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const {
|
2017-09-17 22:02:19 +08:00
|
|
|
assert((I.getOpcode() == TargetOpcode::G_ICMP) && "unexpected instruction");
|
2017-05-11 15:17:40 +08:00
|
|
|
|
|
|
|
X86::CondCode CC;
|
|
|
|
bool SwapArgs;
|
|
|
|
std::tie(CC, SwapArgs) = X86::getX86ConditionCode(
|
|
|
|
(CmpInst::Predicate)I.getOperand(1).getPredicate());
|
|
|
|
unsigned OpSet = X86::getSETFromCond(CC);
|
|
|
|
|
|
|
|
unsigned LHS = I.getOperand(2).getReg();
|
|
|
|
unsigned RHS = I.getOperand(3).getReg();
|
|
|
|
|
|
|
|
if (SwapArgs)
|
|
|
|
std::swap(LHS, RHS);
|
|
|
|
|
|
|
|
unsigned OpCmp;
|
|
|
|
LLT Ty = MRI.getType(LHS);
|
|
|
|
|
|
|
|
switch (Ty.getSizeInBits()) {
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
case 8:
|
|
|
|
OpCmp = X86::CMP8rr;
|
|
|
|
break;
|
|
|
|
case 16:
|
|
|
|
OpCmp = X86::CMP16rr;
|
|
|
|
break;
|
|
|
|
case 32:
|
|
|
|
OpCmp = X86::CMP32rr;
|
|
|
|
break;
|
|
|
|
case 64:
|
|
|
|
OpCmp = X86::CMP64rr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
MachineInstr &CmpInst =
|
|
|
|
*BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(OpCmp))
|
|
|
|
.addReg(LHS)
|
|
|
|
.addReg(RHS);
|
|
|
|
|
|
|
|
MachineInstr &SetInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
|
|
|
|
TII.get(OpSet), I.getOperand(0).getReg());
|
|
|
|
|
|
|
|
constrainSelectedInstRegOperands(CmpInst, TII, TRI, RBI);
|
|
|
|
constrainSelectedInstRegOperands(SetInst, TII, TRI, RBI);
|
|
|
|
|
|
|
|
I.eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-05-17 20:48:08 +08:00
|
|
|
bool X86InstructionSelector::selectUadde(MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const {
|
2017-09-17 22:02:19 +08:00
|
|
|
assert((I.getOpcode() == TargetOpcode::G_UADDE) && "unexpected instruction");
|
2017-05-17 20:48:08 +08:00
|
|
|
|
|
|
|
const unsigned DstReg = I.getOperand(0).getReg();
|
|
|
|
const unsigned CarryOutReg = I.getOperand(1).getReg();
|
|
|
|
const unsigned Op0Reg = I.getOperand(2).getReg();
|
|
|
|
const unsigned Op1Reg = I.getOperand(3).getReg();
|
|
|
|
unsigned CarryInReg = I.getOperand(4).getReg();
|
|
|
|
|
|
|
|
const LLT DstTy = MRI.getType(DstReg);
|
|
|
|
|
|
|
|
if (DstTy != LLT::scalar(32))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// find CarryIn def instruction.
|
|
|
|
MachineInstr *Def = MRI.getVRegDef(CarryInReg);
|
|
|
|
while (Def->getOpcode() == TargetOpcode::G_TRUNC) {
|
|
|
|
CarryInReg = Def->getOperand(1).getReg();
|
|
|
|
Def = MRI.getVRegDef(CarryInReg);
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned Opcode;
|
|
|
|
if (Def->getOpcode() == TargetOpcode::G_UADDE) {
|
|
|
|
// carry set by prev ADD.
|
|
|
|
|
|
|
|
BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), X86::EFLAGS)
|
|
|
|
.addReg(CarryInReg);
|
|
|
|
|
|
|
|
if (!RBI.constrainGenericRegister(CarryInReg, X86::GR32RegClass, MRI))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
Opcode = X86::ADC32rr;
|
|
|
|
} else if (auto val = getConstantVRegVal(CarryInReg, MRI)) {
|
|
|
|
// carry is constant, support only 0.
|
|
|
|
if (*val != 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
Opcode = X86::ADD32rr;
|
|
|
|
} else
|
|
|
|
return false;
|
|
|
|
|
|
|
|
MachineInstr &AddInst =
|
|
|
|
*BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg)
|
|
|
|
.addReg(Op0Reg)
|
|
|
|
.addReg(Op1Reg);
|
|
|
|
|
|
|
|
BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), CarryOutReg)
|
|
|
|
.addReg(X86::EFLAGS);
|
|
|
|
|
|
|
|
if (!constrainSelectedInstRegOperands(AddInst, TII, TRI, RBI) ||
|
|
|
|
!RBI.constrainGenericRegister(CarryOutReg, X86::GR32RegClass, MRI))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
I.eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-06-25 19:42:17 +08:00
|
|
|
bool X86InstructionSelector::selectExtract(MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const {
|
2017-09-17 22:02:19 +08:00
|
|
|
assert((I.getOpcode() == TargetOpcode::G_EXTRACT) &&
|
|
|
|
"unexpected instruction");
|
2017-06-25 19:42:17 +08:00
|
|
|
|
|
|
|
const unsigned DstReg = I.getOperand(0).getReg();
|
|
|
|
const unsigned SrcReg = I.getOperand(1).getReg();
|
|
|
|
int64_t Index = I.getOperand(2).getImm();
|
|
|
|
|
|
|
|
const LLT DstTy = MRI.getType(DstReg);
|
|
|
|
const LLT SrcTy = MRI.getType(SrcReg);
|
|
|
|
|
|
|
|
// Meanwile handle vector type only.
|
|
|
|
if (!DstTy.isVector())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (Index % DstTy.getSizeInBits() != 0)
|
|
|
|
return false; // Not extract subvector.
|
|
|
|
|
|
|
|
if (Index == 0) {
|
|
|
|
// Replace by extract subreg copy.
|
|
|
|
if (!emitExtractSubreg(DstReg, SrcReg, I, MRI, MF))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
I.eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool HasAVX = STI.hasAVX();
|
|
|
|
bool HasAVX512 = STI.hasAVX512();
|
|
|
|
bool HasVLX = STI.hasVLX();
|
|
|
|
|
|
|
|
if (SrcTy.getSizeInBits() == 256 && DstTy.getSizeInBits() == 128) {
|
|
|
|
if (HasVLX)
|
|
|
|
I.setDesc(TII.get(X86::VEXTRACTF32x4Z256rr));
|
|
|
|
else if (HasAVX)
|
|
|
|
I.setDesc(TII.get(X86::VEXTRACTF128rr));
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
} else if (SrcTy.getSizeInBits() == 512 && HasAVX512) {
|
|
|
|
if (DstTy.getSizeInBits() == 128)
|
|
|
|
I.setDesc(TII.get(X86::VEXTRACTF32x4Zrr));
|
|
|
|
else if (DstTy.getSizeInBits() == 256)
|
|
|
|
I.setDesc(TII.get(X86::VEXTRACTF64x4Zrr));
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
} else
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Convert to X86 VEXTRACT immediate.
|
|
|
|
Index = Index / DstTy.getSizeInBits();
|
|
|
|
I.getOperand(2).setImm(Index);
|
|
|
|
|
|
|
|
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool X86InstructionSelector::emitExtractSubreg(unsigned DstReg, unsigned SrcReg,
|
|
|
|
MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const {
|
|
|
|
const LLT DstTy = MRI.getType(DstReg);
|
|
|
|
const LLT SrcTy = MRI.getType(SrcReg);
|
|
|
|
unsigned SubIdx = X86::NoSubRegister;
|
|
|
|
|
|
|
|
if (!DstTy.isVector() || !SrcTy.isVector())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
assert(SrcTy.getSizeInBits() > DstTy.getSizeInBits() &&
|
|
|
|
"Incorrect Src/Dst register size");
|
|
|
|
|
|
|
|
if (DstTy.getSizeInBits() == 128)
|
|
|
|
SubIdx = X86::sub_xmm;
|
|
|
|
else if (DstTy.getSizeInBits() == 256)
|
|
|
|
SubIdx = X86::sub_ymm;
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
|
|
|
|
const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
|
|
|
|
|
|
|
|
SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubIdx);
|
|
|
|
|
|
|
|
if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
|
|
|
|
!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
|
|
|
|
DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), DstReg)
|
|
|
|
.addReg(SrcReg, 0, SubIdx);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-06-22 17:43:35 +08:00
|
|
|
bool X86InstructionSelector::emitInsertSubreg(unsigned DstReg, unsigned SrcReg,
|
|
|
|
MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const {
|
|
|
|
const LLT DstTy = MRI.getType(DstReg);
|
|
|
|
const LLT SrcTy = MRI.getType(SrcReg);
|
|
|
|
unsigned SubIdx = X86::NoSubRegister;
|
|
|
|
|
|
|
|
// TODO: support scalar types
|
|
|
|
if (!DstTy.isVector() || !SrcTy.isVector())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
assert(SrcTy.getSizeInBits() < DstTy.getSizeInBits() &&
|
|
|
|
"Incorrect Src/Dst register size");
|
|
|
|
|
|
|
|
if (SrcTy.getSizeInBits() == 128)
|
|
|
|
SubIdx = X86::sub_xmm;
|
|
|
|
else if (SrcTy.getSizeInBits() == 256)
|
|
|
|
SubIdx = X86::sub_ymm;
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcReg, MRI);
|
|
|
|
const TargetRegisterClass *DstRC = getRegClass(DstTy, DstReg, MRI);
|
|
|
|
|
|
|
|
if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
|
|
|
|
!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
|
|
|
|
DEBUG(dbgs() << "Failed to constrain INSERT_SUBREG\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY))
|
|
|
|
.addReg(DstReg, RegState::DefineNoRead, SubIdx)
|
|
|
|
.addReg(SrcReg);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool X86InstructionSelector::selectInsert(MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const {
|
2017-09-17 22:02:19 +08:00
|
|
|
assert((I.getOpcode() == TargetOpcode::G_INSERT) && "unexpected instruction");
|
2017-06-22 17:43:35 +08:00
|
|
|
|
|
|
|
const unsigned DstReg = I.getOperand(0).getReg();
|
|
|
|
const unsigned SrcReg = I.getOperand(1).getReg();
|
|
|
|
const unsigned InsertReg = I.getOperand(2).getReg();
|
|
|
|
int64_t Index = I.getOperand(3).getImm();
|
|
|
|
|
|
|
|
const LLT DstTy = MRI.getType(DstReg);
|
|
|
|
const LLT InsertRegTy = MRI.getType(InsertReg);
|
|
|
|
|
|
|
|
// Meanwile handle vector type only.
|
|
|
|
if (!DstTy.isVector())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (Index % InsertRegTy.getSizeInBits() != 0)
|
|
|
|
return false; // Not insert subvector.
|
|
|
|
|
|
|
|
if (Index == 0 && MRI.getVRegDef(SrcReg)->isImplicitDef()) {
|
|
|
|
// Replace by subreg copy.
|
|
|
|
if (!emitInsertSubreg(DstReg, InsertReg, I, MRI, MF))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
I.eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool HasAVX = STI.hasAVX();
|
|
|
|
bool HasAVX512 = STI.hasAVX512();
|
|
|
|
bool HasVLX = STI.hasVLX();
|
|
|
|
|
|
|
|
if (DstTy.getSizeInBits() == 256 && InsertRegTy.getSizeInBits() == 128) {
|
|
|
|
if (HasVLX)
|
|
|
|
I.setDesc(TII.get(X86::VINSERTF32x4Z256rr));
|
|
|
|
else if (HasAVX)
|
|
|
|
I.setDesc(TII.get(X86::VINSERTF128rr));
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
} else if (DstTy.getSizeInBits() == 512 && HasAVX512) {
|
|
|
|
if (InsertRegTy.getSizeInBits() == 128)
|
|
|
|
I.setDesc(TII.get(X86::VINSERTF32x4Zrr));
|
|
|
|
else if (InsertRegTy.getSizeInBits() == 256)
|
|
|
|
I.setDesc(TII.get(X86::VINSERTF64x4Zrr));
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
} else
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Convert to X86 VINSERT immediate.
|
|
|
|
Index = Index / InsertRegTy.getSizeInBits();
|
|
|
|
|
|
|
|
I.getOperand(3).setImm(Index);
|
|
|
|
|
|
|
|
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
|
|
|
|
}
|
|
|
|
|
[globalisel][tablegen] Generate rule coverage and use it to identify untested rules
Summary:
This patch adds a LLVM_ENABLE_GISEL_COV which, like LLVM_ENABLE_DAGISEL_COV,
causes TableGen to instrument the generated table to collect rule coverage
information. However, LLVM_ENABLE_GISEL_COV goes a bit further than
LLVM_ENABLE_DAGISEL_COV. The information is written to files
(${CMAKE_BINARY_DIR}/gisel-coverage-* by default). These files can then be
concatenated into ${LLVM_GISEL_COV_PREFIX}-all after which TableGen will
read this information and use it to emit warnings about untested rules.
This technique could also be used by SelectionDAG and can be further
extended to detect hot rules and give them priority over colder rules.
Usage:
* Enable LLVM_ENABLE_GISEL_COV in CMake
* Build the compiler and run some tests
* cat gisel-coverage-[0-9]* > gisel-coverage-all
* Delete lib/Target/*/*GenGlobalISel.inc*
* Build the compiler
Known issues:
* ${LLVM_GISEL_COV_PREFIX}-all must be generated as a manual
step due to a lack of a portable 'cat' command. It should be the
concatenation of all ${LLVM_GISEL_COV_PREFIX}-[0-9]* files.
* There's no mechanism to discard coverage information when the ruleset
changes
Depends on D39742
Reviewers: ab, qcolombet, t.p.northover, aditya_nandakumar, rovka
Reviewed By: rovka
Subscribers: vsk, arsenm, nhaehnle, mgorny, kristof.beyls, javed.absar, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D39747
llvm-svn: 318356
2017-11-16 08:46:35 +08:00
|
|
|
bool X86InstructionSelector::selectUnmergeValues(
|
|
|
|
MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF,
|
|
|
|
CodeGenCoverage &CoverageInfo) const {
|
2017-09-17 22:02:19 +08:00
|
|
|
assert((I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES) &&
|
|
|
|
"unexpected instruction");
|
2017-07-02 16:15:49 +08:00
|
|
|
|
|
|
|
// Split to extracts.
|
|
|
|
unsigned NumDefs = I.getNumOperands() - 1;
|
|
|
|
unsigned SrcReg = I.getOperand(NumDefs).getReg();
|
|
|
|
unsigned DefSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
|
|
|
|
|
|
|
|
for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
|
|
|
|
MachineInstr &ExtrInst =
|
|
|
|
*BuildMI(*I.getParent(), I, I.getDebugLoc(),
|
|
|
|
TII.get(TargetOpcode::G_EXTRACT), I.getOperand(Idx).getReg())
|
|
|
|
.addReg(SrcReg)
|
|
|
|
.addImm(Idx * DefSize);
|
|
|
|
|
[globalisel][tablegen] Generate rule coverage and use it to identify untested rules
Summary:
This patch adds a LLVM_ENABLE_GISEL_COV which, like LLVM_ENABLE_DAGISEL_COV,
causes TableGen to instrument the generated table to collect rule coverage
information. However, LLVM_ENABLE_GISEL_COV goes a bit further than
LLVM_ENABLE_DAGISEL_COV. The information is written to files
(${CMAKE_BINARY_DIR}/gisel-coverage-* by default). These files can then be
concatenated into ${LLVM_GISEL_COV_PREFIX}-all after which TableGen will
read this information and use it to emit warnings about untested rules.
This technique could also be used by SelectionDAG and can be further
extended to detect hot rules and give them priority over colder rules.
Usage:
* Enable LLVM_ENABLE_GISEL_COV in CMake
* Build the compiler and run some tests
* cat gisel-coverage-[0-9]* > gisel-coverage-all
* Delete lib/Target/*/*GenGlobalISel.inc*
* Build the compiler
Known issues:
* ${LLVM_GISEL_COV_PREFIX}-all must be generated as a manual
step due to a lack of a portable 'cat' command. It should be the
concatenation of all ${LLVM_GISEL_COV_PREFIX}-[0-9]* files.
* There's no mechanism to discard coverage information when the ruleset
changes
Depends on D39742
Reviewers: ab, qcolombet, t.p.northover, aditya_nandakumar, rovka
Reviewed By: rovka
Subscribers: vsk, arsenm, nhaehnle, mgorny, kristof.beyls, javed.absar, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D39747
llvm-svn: 318356
2017-11-16 08:46:35 +08:00
|
|
|
if (!select(ExtrInst, CoverageInfo))
|
2017-07-02 16:15:49 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
I.eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
[globalisel][tablegen] Generate rule coverage and use it to identify untested rules
Summary:
This patch adds a LLVM_ENABLE_GISEL_COV which, like LLVM_ENABLE_DAGISEL_COV,
causes TableGen to instrument the generated table to collect rule coverage
information. However, LLVM_ENABLE_GISEL_COV goes a bit further than
LLVM_ENABLE_DAGISEL_COV. The information is written to files
(${CMAKE_BINARY_DIR}/gisel-coverage-* by default). These files can then be
concatenated into ${LLVM_GISEL_COV_PREFIX}-all after which TableGen will
read this information and use it to emit warnings about untested rules.
This technique could also be used by SelectionDAG and can be further
extended to detect hot rules and give them priority over colder rules.
Usage:
* Enable LLVM_ENABLE_GISEL_COV in CMake
* Build the compiler and run some tests
* cat gisel-coverage-[0-9]* > gisel-coverage-all
* Delete lib/Target/*/*GenGlobalISel.inc*
* Build the compiler
Known issues:
* ${LLVM_GISEL_COV_PREFIX}-all must be generated as a manual
step due to a lack of a portable 'cat' command. It should be the
concatenation of all ${LLVM_GISEL_COV_PREFIX}-[0-9]* files.
* There's no mechanism to discard coverage information when the ruleset
changes
Depends on D39742
Reviewers: ab, qcolombet, t.p.northover, aditya_nandakumar, rovka
Reviewed By: rovka
Subscribers: vsk, arsenm, nhaehnle, mgorny, kristof.beyls, javed.absar, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D39747
llvm-svn: 318356
2017-11-16 08:46:35 +08:00
|
|
|
bool X86InstructionSelector::selectMergeValues(
|
|
|
|
MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF,
|
|
|
|
CodeGenCoverage &CoverageInfo) const {
|
2017-09-17 22:02:19 +08:00
|
|
|
assert((I.getOpcode() == TargetOpcode::G_MERGE_VALUES) &&
|
|
|
|
"unexpected instruction");
|
2017-06-29 20:08:28 +08:00
|
|
|
|
|
|
|
// Split to inserts.
|
|
|
|
unsigned DstReg = I.getOperand(0).getReg();
|
|
|
|
unsigned SrcReg0 = I.getOperand(1).getReg();
|
|
|
|
|
|
|
|
const LLT DstTy = MRI.getType(DstReg);
|
|
|
|
const LLT SrcTy = MRI.getType(SrcReg0);
|
|
|
|
unsigned SrcSize = SrcTy.getSizeInBits();
|
|
|
|
|
|
|
|
const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
|
|
|
|
|
|
|
|
// For the first src use insertSubReg.
|
|
|
|
unsigned DefReg = MRI.createGenericVirtualRegister(DstTy);
|
|
|
|
MRI.setRegBank(DefReg, RegBank);
|
|
|
|
if (!emitInsertSubreg(DefReg, I.getOperand(1).getReg(), I, MRI, MF))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (unsigned Idx = 2; Idx < I.getNumOperands(); ++Idx) {
|
|
|
|
unsigned Tmp = MRI.createGenericVirtualRegister(DstTy);
|
|
|
|
MRI.setRegBank(Tmp, RegBank);
|
|
|
|
|
|
|
|
MachineInstr &InsertInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
|
|
|
|
TII.get(TargetOpcode::G_INSERT), Tmp)
|
|
|
|
.addReg(DefReg)
|
|
|
|
.addReg(I.getOperand(Idx).getReg())
|
|
|
|
.addImm((Idx - 1) * SrcSize);
|
|
|
|
|
|
|
|
DefReg = Tmp;
|
|
|
|
|
[globalisel][tablegen] Generate rule coverage and use it to identify untested rules
Summary:
This patch adds a LLVM_ENABLE_GISEL_COV which, like LLVM_ENABLE_DAGISEL_COV,
causes TableGen to instrument the generated table to collect rule coverage
information. However, LLVM_ENABLE_GISEL_COV goes a bit further than
LLVM_ENABLE_DAGISEL_COV. The information is written to files
(${CMAKE_BINARY_DIR}/gisel-coverage-* by default). These files can then be
concatenated into ${LLVM_GISEL_COV_PREFIX}-all after which TableGen will
read this information and use it to emit warnings about untested rules.
This technique could also be used by SelectionDAG and can be further
extended to detect hot rules and give them priority over colder rules.
Usage:
* Enable LLVM_ENABLE_GISEL_COV in CMake
* Build the compiler and run some tests
* cat gisel-coverage-[0-9]* > gisel-coverage-all
* Delete lib/Target/*/*GenGlobalISel.inc*
* Build the compiler
Known issues:
* ${LLVM_GISEL_COV_PREFIX}-all must be generated as a manual
step due to a lack of a portable 'cat' command. It should be the
concatenation of all ${LLVM_GISEL_COV_PREFIX}-[0-9]* files.
* There's no mechanism to discard coverage information when the ruleset
changes
Depends on D39742
Reviewers: ab, qcolombet, t.p.northover, aditya_nandakumar, rovka
Reviewed By: rovka
Subscribers: vsk, arsenm, nhaehnle, mgorny, kristof.beyls, javed.absar, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D39747
llvm-svn: 318356
2017-11-16 08:46:35 +08:00
|
|
|
if (!select(InsertInst, CoverageInfo))
|
2017-06-29 20:08:28 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
MachineInstr &CopyInst = *BuildMI(*I.getParent(), I, I.getDebugLoc(),
|
|
|
|
TII.get(TargetOpcode::COPY), DstReg)
|
|
|
|
.addReg(DefReg);
|
|
|
|
|
[globalisel][tablegen] Generate rule coverage and use it to identify untested rules
Summary:
This patch adds a LLVM_ENABLE_GISEL_COV which, like LLVM_ENABLE_DAGISEL_COV,
causes TableGen to instrument the generated table to collect rule coverage
information. However, LLVM_ENABLE_GISEL_COV goes a bit further than
LLVM_ENABLE_DAGISEL_COV. The information is written to files
(${CMAKE_BINARY_DIR}/gisel-coverage-* by default). These files can then be
concatenated into ${LLVM_GISEL_COV_PREFIX}-all after which TableGen will
read this information and use it to emit warnings about untested rules.
This technique could also be used by SelectionDAG and can be further
extended to detect hot rules and give them priority over colder rules.
Usage:
* Enable LLVM_ENABLE_GISEL_COV in CMake
* Build the compiler and run some tests
* cat gisel-coverage-[0-9]* > gisel-coverage-all
* Delete lib/Target/*/*GenGlobalISel.inc*
* Build the compiler
Known issues:
* ${LLVM_GISEL_COV_PREFIX}-all must be generated as a manual
step due to a lack of a portable 'cat' command. It should be the
concatenation of all ${LLVM_GISEL_COV_PREFIX}-[0-9]* files.
* There's no mechanism to discard coverage information when the ruleset
changes
Depends on D39742
Reviewers: ab, qcolombet, t.p.northover, aditya_nandakumar, rovka
Reviewed By: rovka
Subscribers: vsk, arsenm, nhaehnle, mgorny, kristof.beyls, javed.absar, igorb, llvm-commits
Differential Revision: https://reviews.llvm.org/D39747
llvm-svn: 318356
2017-11-16 08:46:35 +08:00
|
|
|
if (!select(CopyInst, CoverageInfo))
|
2017-06-29 20:08:28 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
I.eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
2017-08-21 18:51:54 +08:00
|
|
|
|
|
|
|
bool X86InstructionSelector::selectCondBranch(MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const {
|
2017-09-17 22:02:19 +08:00
|
|
|
assert((I.getOpcode() == TargetOpcode::G_BRCOND) && "unexpected instruction");
|
2017-08-21 18:51:54 +08:00
|
|
|
|
|
|
|
const unsigned CondReg = I.getOperand(0).getReg();
|
|
|
|
MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
|
|
|
|
|
|
|
|
MachineInstr &TestInst =
|
|
|
|
*BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::TEST8ri))
|
|
|
|
.addReg(CondReg)
|
|
|
|
.addImm(1);
|
|
|
|
BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::JNE_1))
|
|
|
|
.addMBB(DestMBB);
|
|
|
|
|
|
|
|
constrainSelectedInstRegOperands(TestInst, TII, TRI, RBI);
|
|
|
|
|
|
|
|
I.eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-09-17 16:08:13 +08:00
|
|
|
bool X86InstructionSelector::materializeFP(MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const {
|
2017-09-17 22:02:19 +08:00
|
|
|
assert((I.getOpcode() == TargetOpcode::G_FCONSTANT) &&
|
|
|
|
"unexpected instruction");
|
2017-09-17 16:08:13 +08:00
|
|
|
|
|
|
|
// Can't handle alternate code models yet.
|
|
|
|
CodeModel::Model CM = TM.getCodeModel();
|
|
|
|
if (CM != CodeModel::Small && CM != CodeModel::Large)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const unsigned DstReg = I.getOperand(0).getReg();
|
|
|
|
const LLT DstTy = MRI.getType(DstReg);
|
|
|
|
const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
|
|
|
|
unsigned Align = DstTy.getSizeInBits();
|
|
|
|
const DebugLoc &DbgLoc = I.getDebugLoc();
|
|
|
|
|
|
|
|
unsigned Opc = getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Align);
|
|
|
|
|
|
|
|
// Create the load from the constant pool.
|
|
|
|
const ConstantFP *CFP = I.getOperand(1).getFPImm();
|
|
|
|
unsigned CPI = MF.getConstantPool()->getConstantPoolIndex(CFP, Align);
|
|
|
|
MachineInstr *LoadInst = nullptr;
|
|
|
|
unsigned char OpFlag = STI.classifyLocalReference(nullptr);
|
|
|
|
|
|
|
|
if (CM == CodeModel::Large && STI.is64Bit()) {
|
|
|
|
// Under X86-64 non-small code model, GV (and friends) are 64-bits, so
|
|
|
|
// they cannot be folded into immediate fields.
|
|
|
|
|
|
|
|
unsigned AddrReg = MRI.createVirtualRegister(&X86::GR64RegClass);
|
|
|
|
BuildMI(*I.getParent(), I, DbgLoc, TII.get(X86::MOV64ri), AddrReg)
|
|
|
|
.addConstantPoolIndex(CPI, 0, OpFlag);
|
|
|
|
|
|
|
|
MachineMemOperand *MMO = MF.getMachineMemOperand(
|
|
|
|
MachinePointerInfo::getConstantPool(MF), MachineMemOperand::MOLoad,
|
|
|
|
MF.getDataLayout().getPointerSize(), Align);
|
|
|
|
|
|
|
|
LoadInst =
|
|
|
|
addDirectMem(BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg),
|
|
|
|
AddrReg)
|
|
|
|
.addMemOperand(MMO);
|
|
|
|
|
2017-09-17 22:02:19 +08:00
|
|
|
} else if (CM == CodeModel::Small || !STI.is64Bit()) {
|
2017-09-17 16:08:13 +08:00
|
|
|
// Handle the case when globals fit in our immediate field.
|
|
|
|
// This is true for X86-32 always and X86-64 when in -mcmodel=small mode.
|
|
|
|
|
|
|
|
// x86-32 PIC requires a PIC base register for constant pools.
|
|
|
|
unsigned PICBase = 0;
|
|
|
|
if (OpFlag == X86II::MO_PIC_BASE_OFFSET || OpFlag == X86II::MO_GOTOFF) {
|
|
|
|
// PICBase can be allocated by TII.getGlobalBaseReg(&MF).
|
|
|
|
// In DAGISEL the code that initialize it generated by the CGBR pass.
|
|
|
|
return false; // TODO support the mode.
|
2017-09-17 22:02:19 +08:00
|
|
|
} else if (STI.is64Bit() && TM.getCodeModel() == CodeModel::Small)
|
2017-09-17 16:08:13 +08:00
|
|
|
PICBase = X86::RIP;
|
|
|
|
|
|
|
|
LoadInst = addConstantPoolReference(
|
|
|
|
BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg), CPI, PICBase,
|
|
|
|
OpFlag);
|
|
|
|
} else
|
|
|
|
return false;
|
|
|
|
|
|
|
|
constrainSelectedInstRegOperands(*LoadInst, TII, TRI, RBI);
|
|
|
|
I.eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-09-04 17:06:45 +08:00
|
|
|
bool X86InstructionSelector::selectImplicitDefOrPHI(
|
|
|
|
MachineInstr &I, MachineRegisterInfo &MRI) const {
|
2017-09-17 22:02:19 +08:00
|
|
|
assert((I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
|
|
|
|
I.getOpcode() == TargetOpcode::G_PHI) &&
|
|
|
|
"unexpected instruction");
|
2017-08-24 15:06:27 +08:00
|
|
|
|
|
|
|
unsigned DstReg = I.getOperand(0).getReg();
|
|
|
|
|
|
|
|
if (!MRI.getRegClassOrNull(DstReg)) {
|
|
|
|
const LLT DstTy = MRI.getType(DstReg);
|
|
|
|
const TargetRegisterClass *RC = getRegClass(DstTy, DstReg, MRI);
|
|
|
|
|
|
|
|
if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
|
|
|
|
DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
|
|
|
|
<< " operand\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-04 17:06:45 +08:00
|
|
|
if (I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
|
|
|
|
I.setDesc(TII.get(X86::IMPLICIT_DEF));
|
|
|
|
else
|
|
|
|
I.setDesc(TII.get(X86::PHI));
|
|
|
|
|
2017-08-24 15:06:27 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
[GlobalISel][X86] Support G_LSHR/G_ASHR/G_SHL
Support G_LSHR/G_ASHR/G_SHL. We have 3 variance for
shift instructions : shift gpr, shift imm, shift 1.
Currently GlobalIsel TableGen generate patterns for
shift imm and shift 1, but with shiftCount i8.
In G_LSHR/G_ASHR/G_SHL like LLVM-IR both arguments
has the same type, so for now only shift i8 can use
auto generated TableGen patterns.
The support of G_SHL/G_ASHR enables tryCombineSExt
from LegalizationArtifactCombiner.h to hit, which
results in different legalization for the following tests:
LLVM :: CodeGen/X86/GlobalISel/ext-x86-64.ll
LLVM :: CodeGen/X86/GlobalISel/gep.ll
LLVM :: CodeGen/X86/GlobalISel/legalize-ext-x86-64.mir
-; X64-NEXT: movsbl %dil, %eax
+; X64-NEXT: movl $24, %ecx
+; X64-NEXT: # kill: def $cl killed $ecx
+; X64-NEXT: shll %cl, %edi
+; X64-NEXT: movl $24, %ecx
+; X64-NEXT: # kill: def $cl killed $ecx
+; X64-NEXT: sarl %cl, %edi
+; X64-NEXT: movl %edi, %eax
..which is not optimal and should be addressed later.
Rework of the patch by igorb
Reviewed By: igorb
Differential Revision: https://reviews.llvm.org/D44395
llvm-svn: 327499
2018-03-14 19:23:57 +08:00
|
|
|
// Currently GlobalIsel TableGen generates patterns for shift imm and shift 1,
|
|
|
|
// but with shiftCount i8. In G_LSHR/G_ASHR/G_SHL like LLVM-IR both arguments
|
|
|
|
// has the same type, so for now only shift i8 can use auto generated
|
|
|
|
// TableGen patterns.
|
|
|
|
bool X86InstructionSelector::selectShift(MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const {
|
|
|
|
|
|
|
|
assert((I.getOpcode() == TargetOpcode::G_SHL ||
|
|
|
|
I.getOpcode() == TargetOpcode::G_ASHR ||
|
|
|
|
I.getOpcode() == TargetOpcode::G_LSHR) &&
|
|
|
|
"unexpected instruction");
|
|
|
|
|
|
|
|
unsigned DstReg = I.getOperand(0).getReg();
|
|
|
|
const LLT DstTy = MRI.getType(DstReg);
|
|
|
|
const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
|
|
|
|
|
|
|
|
const static struct ShiftEntry {
|
|
|
|
unsigned SizeInBits;
|
|
|
|
unsigned CReg;
|
|
|
|
unsigned OpLSHR;
|
|
|
|
unsigned OpASHR;
|
|
|
|
unsigned OpSHL;
|
|
|
|
} OpTable[] = {
|
|
|
|
{8, X86::CL, X86::SHR8rCL, X86::SAR8rCL, X86::SHL8rCL}, // i8
|
|
|
|
{16, X86::CX, X86::SHR16rCL, X86::SAR16rCL, X86::SHL16rCL}, // i16
|
|
|
|
{32, X86::ECX, X86::SHR32rCL, X86::SAR32rCL, X86::SHL32rCL}, // i32
|
|
|
|
{64, X86::RCX, X86::SHR64rCL, X86::SAR64rCL, X86::SHL64rCL} // i64
|
|
|
|
};
|
|
|
|
|
|
|
|
if (DstRB.getID() != X86::GPRRegBankID)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
auto ShiftEntryIt = std::find_if(
|
|
|
|
std::begin(OpTable), std::end(OpTable), [DstTy](const ShiftEntry &El) {
|
|
|
|
return El.SizeInBits == DstTy.getSizeInBits();
|
|
|
|
});
|
|
|
|
if (ShiftEntryIt == std::end(OpTable))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned CReg = ShiftEntryIt->CReg;
|
|
|
|
unsigned Opcode = 0;
|
|
|
|
switch (I.getOpcode()) {
|
|
|
|
case TargetOpcode::G_SHL:
|
|
|
|
Opcode = ShiftEntryIt->OpSHL;
|
|
|
|
break;
|
|
|
|
case TargetOpcode::G_ASHR:
|
|
|
|
Opcode = ShiftEntryIt->OpASHR;
|
|
|
|
break;
|
|
|
|
case TargetOpcode::G_LSHR:
|
|
|
|
Opcode = ShiftEntryIt->OpLSHR;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned Op0Reg = I.getOperand(1).getReg();
|
|
|
|
unsigned Op1Reg = I.getOperand(2).getReg();
|
|
|
|
|
|
|
|
BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),
|
|
|
|
ShiftEntryIt->CReg)
|
|
|
|
.addReg(Op1Reg);
|
|
|
|
|
|
|
|
// The shift instruction uses X86::CL. If we defined a super-register
|
|
|
|
// of X86::CL, emit a subreg KILL to precisely describe what we're doing here.
|
|
|
|
if (CReg != X86::CL)
|
|
|
|
BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::KILL),
|
|
|
|
X86::CL)
|
|
|
|
.addReg(CReg, RegState::Kill);
|
|
|
|
|
|
|
|
MachineInstr &ShiftInst =
|
|
|
|
*BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg)
|
|
|
|
.addReg(Op0Reg);
|
|
|
|
|
|
|
|
constrainSelectedInstRegOperands(ShiftInst, TII, TRI, RBI);
|
|
|
|
I.eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-03-14 23:41:11 +08:00
|
|
|
bool X86InstructionSelector::selectSDiv(MachineInstr &I,
|
|
|
|
MachineRegisterInfo &MRI,
|
|
|
|
MachineFunction &MF) const {
|
|
|
|
|
|
|
|
assert(I.getOpcode() == TargetOpcode::G_SDIV && "unexpected instruction");
|
|
|
|
|
|
|
|
const unsigned DstReg = I.getOperand(0).getReg();
|
|
|
|
const unsigned DividentReg = I.getOperand(1).getReg();
|
|
|
|
const unsigned DiviserReg = I.getOperand(2).getReg();
|
|
|
|
|
|
|
|
const LLT RegTy = MRI.getType(DstReg);
|
|
|
|
assert(RegTy == MRI.getType(DividentReg) &&
|
|
|
|
RegTy == MRI.getType(DiviserReg) &&
|
|
|
|
"Arguments and return value types must match");
|
|
|
|
|
|
|
|
const RegisterBank &RegRB = *RBI.getRegBank(DstReg, MRI, TRI);
|
|
|
|
|
|
|
|
// For the X86 IDIV instruction, in most cases the dividend
|
|
|
|
// (numerator) must be in a specific register pair highreg:lowreg,
|
|
|
|
// producing the quotient in lowreg and the remainder in highreg.
|
|
|
|
// For most data types, to set up the instruction, the dividend is
|
|
|
|
// copied into lowreg, and lowreg is sign-extended into highreg. The
|
|
|
|
// exception is i8, where the dividend is defined as a single register rather
|
|
|
|
// than a register pair, and we therefore directly sign-extend the dividend
|
|
|
|
// into lowreg, instead of copying, and ignore the highreg.
|
|
|
|
const static struct SDivEntry {
|
|
|
|
unsigned SizeInBits;
|
|
|
|
unsigned QuotientReg;
|
|
|
|
unsigned DividentRegUpper;
|
|
|
|
unsigned DividentRegLower;
|
|
|
|
unsigned OpSignExtend;
|
|
|
|
unsigned OpCopy;
|
|
|
|
unsigned OpDiv;
|
|
|
|
} OpTable[] = {
|
|
|
|
{8, X86::AL, X86::NoRegister, X86::AX, 0, X86::MOVSX16rr8,
|
|
|
|
X86::IDIV8r}, // i8
|
|
|
|
{16, X86::AX, X86::DX, X86::AX, X86::CWD, TargetOpcode::COPY,
|
|
|
|
X86::IDIV16r}, // i16
|
|
|
|
{32, X86::EAX, X86::EDX, X86::EAX, X86::CDQ, TargetOpcode::COPY,
|
|
|
|
X86::IDIV32r}, // i32
|
|
|
|
{64, X86::RAX, X86::RDX, X86::RAX, X86::CQO, TargetOpcode::COPY,
|
|
|
|
X86::IDIV64r} // i64
|
|
|
|
};
|
|
|
|
|
|
|
|
if (RegRB.getID() != X86::GPRRegBankID)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
auto SDivEntryIt = std::find_if(
|
|
|
|
std::begin(OpTable), std::end(OpTable), [RegTy](const SDivEntry &El) {
|
|
|
|
return El.SizeInBits == RegTy.getSizeInBits();
|
|
|
|
});
|
|
|
|
|
|
|
|
if (SDivEntryIt == std::end(OpTable))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const TargetRegisterClass *RegRC = getRegClass(RegTy, RegRB);
|
|
|
|
if (!RBI.constrainGenericRegister(DividentReg, *RegRC, MRI) ||
|
|
|
|
!RBI.constrainGenericRegister(DiviserReg, *RegRC, MRI) ||
|
|
|
|
!RBI.constrainGenericRegister(DstReg, *RegRC, MRI)) {
|
|
|
|
DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
|
|
|
|
<< " operand\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SDivEntryIt->OpCopy),
|
|
|
|
SDivEntryIt->DividentRegLower)
|
|
|
|
.addReg(DividentReg);
|
|
|
|
if (SDivEntryIt->DividentRegUpper != X86::NoRegister)
|
|
|
|
BuildMI(*I.getParent(), I, I.getDebugLoc(),
|
|
|
|
TII.get(SDivEntryIt->OpSignExtend));
|
|
|
|
BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(SDivEntryIt->OpDiv))
|
|
|
|
.addReg(DiviserReg);
|
|
|
|
BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(TargetOpcode::COPY),
|
|
|
|
DstReg)
|
|
|
|
.addReg(SDivEntryIt->QuotientReg);
|
|
|
|
|
|
|
|
I.eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-04-06 17:49:34 +08:00
|
|
|
InstructionSelector *
|
[globalisel][tablegen] Import SelectionDAG's rule predicates and support the equivalent in GIRule.
Summary:
The SelectionDAG importer now imports rules with Predicate's attached via
Requires, PredicateControl, etc. These predicates are implemented as
bitset's to allow multiple predicates to be tested together. However,
unlike the MC layer subtarget features, each target only pays for it's own
predicates (e.g. AArch64 doesn't have 192 feature bits just because X86
needs a lot).
Both AArch64 and X86 derive at least one predicate from the MachineFunction
or Function so they must re-initialize AvailableFeatures before each
function. They also declare locals in <Target>InstructionSelector so that
computeAvailableFeatures() can use the code from SelectionDAG without
modification.
Reviewers: rovka, qcolombet, aditya_nandakumar, t.p.northover, ab
Reviewed By: rovka
Subscribers: aemerson, rengolin, dberris, kristof.beyls, llvm-commits, igorb
Differential Revision: https://reviews.llvm.org/D31418
llvm-svn: 300993
2017-04-21 23:59:56 +08:00
|
|
|
llvm::createX86InstructionSelector(const X86TargetMachine &TM,
|
|
|
|
X86Subtarget &Subtarget,
|
2017-04-06 17:49:34 +08:00
|
|
|
X86RegisterBankInfo &RBI) {
|
[globalisel][tablegen] Import SelectionDAG's rule predicates and support the equivalent in GIRule.
Summary:
The SelectionDAG importer now imports rules with Predicate's attached via
Requires, PredicateControl, etc. These predicates are implemented as
bitset's to allow multiple predicates to be tested together. However,
unlike the MC layer subtarget features, each target only pays for it's own
predicates (e.g. AArch64 doesn't have 192 feature bits just because X86
needs a lot).
Both AArch64 and X86 derive at least one predicate from the MachineFunction
or Function so they must re-initialize AvailableFeatures before each
function. They also declare locals in <Target>InstructionSelector so that
computeAvailableFeatures() can use the code from SelectionDAG without
modification.
Reviewers: rovka, qcolombet, aditya_nandakumar, t.p.northover, ab
Reviewed By: rovka
Subscribers: aemerson, rengolin, dberris, kristof.beyls, llvm-commits, igorb
Differential Revision: https://reviews.llvm.org/D31418
llvm-svn: 300993
2017-04-21 23:59:56 +08:00
|
|
|
return new X86InstructionSelector(TM, Subtarget, RBI);
|
2017-04-06 17:49:34 +08:00
|
|
|
}
|