2008-08-20 05:45:35 +08:00
|
|
|
//===-- X86FastISel.cpp - X86 FastISel implementation ---------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file defines the X86-specific support for the FastISel class. Much
|
|
|
|
// of the target-specific code is generated by tablegen in the file
|
|
|
|
// X86GenFastISel.inc, which is #included here.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "X86.h"
|
2011-06-24 01:54:54 +08:00
|
|
|
#include "X86ISelLowering.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "X86InstrBuilder.h"
|
2008-09-03 09:04:47 +08:00
|
|
|
#include "X86RegisterInfo.h"
|
|
|
|
#include "X86Subtarget.h"
|
2008-08-22 08:20:26 +08:00
|
|
|
#include "X86TargetMachine.h"
|
2010-07-10 17:00:22 +08:00
|
|
|
#include "llvm/CodeGen/Analysis.h"
|
2008-09-03 08:03:49 +08:00
|
|
|
#include "llvm/CodeGen/FastISel.h"
|
2010-07-08 00:29:44 +08:00
|
|
|
#include "llvm/CodeGen/FunctionLoweringInfo.h"
|
2008-09-05 08:06:23 +08:00
|
|
|
#include "llvm/CodeGen/MachineConstantPool.h"
|
2008-09-07 17:09:33 +08:00
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
2008-08-30 01:45:56 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/CallingConv.h"
|
|
|
|
#include "llvm/IR/DerivedTypes.h"
|
|
|
|
#include "llvm/IR/GlobalAlias.h"
|
|
|
|
#include "llvm/IR/GlobalVariable.h"
|
|
|
|
#include "llvm/IR/Instructions.h"
|
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
|
|
|
#include "llvm/IR/Operator.h"
|
2008-09-07 17:09:33 +08:00
|
|
|
#include "llvm/Support/CallSite.h"
|
2009-07-12 04:10:48 +08:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2008-09-19 07:23:44 +08:00
|
|
|
#include "llvm/Support/GetElementPtrTypeIterator.h"
|
2010-01-27 08:00:57 +08:00
|
|
|
#include "llvm/Target/TargetOptions.h"
|
2008-09-03 08:03:49 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2009-03-09 02:44:31 +08:00
|
|
|
namespace {
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2008-09-03 08:03:49 +08:00
|
|
|
class X86FastISel : public FastISel {
|
|
|
|
/// Subtarget - Keep a pointer to the X86Subtarget around so that we can
|
|
|
|
/// make the right decision when generating code for different targets.
|
|
|
|
const X86Subtarget *Subtarget;
|
2008-09-07 17:09:33 +08:00
|
|
|
|
2012-11-01 11:47:50 +08:00
|
|
|
/// RegInfo - X86 register info.
|
2008-09-07 17:09:33 +08:00
|
|
|
///
|
2012-11-01 11:47:50 +08:00
|
|
|
const X86RegisterInfo *RegInfo;
|
2008-09-07 17:09:33 +08:00
|
|
|
|
2010-11-23 11:31:01 +08:00
|
|
|
/// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
|
2008-09-07 17:09:33 +08:00
|
|
|
/// floating point ops.
|
|
|
|
/// When SSE is available, use it for f32 operations.
|
|
|
|
/// When SSE2 is available, use it for f64 operations.
|
|
|
|
bool X86ScalarSSEf64;
|
|
|
|
bool X86ScalarSSEf32;
|
|
|
|
|
2008-09-03 14:44:39 +08:00
|
|
|
public:
|
2012-08-03 12:06:28 +08:00
|
|
|
explicit X86FastISel(FunctionLoweringInfo &funcInfo,
|
|
|
|
const TargetLibraryInfo *libInfo)
|
|
|
|
: FastISel(funcInfo, libInfo) {
|
2008-09-03 09:04:47 +08:00
|
|
|
Subtarget = &TM.getSubtarget<X86Subtarget>();
|
2012-01-10 14:54:16 +08:00
|
|
|
X86ScalarSSEf64 = Subtarget->hasSSE2();
|
|
|
|
X86ScalarSSEf32 = Subtarget->hasSSE1();
|
2012-11-01 11:47:50 +08:00
|
|
|
RegInfo = static_cast<const X86RegisterInfo*>(TM.getRegisterInfo());
|
2008-09-03 09:04:47 +08:00
|
|
|
}
|
2008-09-03 08:03:49 +08:00
|
|
|
|
2010-04-15 09:51:59 +08:00
|
|
|
virtual bool TargetSelectInstruction(const Instruction *I);
|
2008-08-29 07:21:34 +08:00
|
|
|
|
implement rdar://6653118 - fastisel should fold loads where possible.
Since mem2reg isn't run at -O0, we get a ton of reloads from the stack,
for example, before, this code:
int foo(int x, int y, int z) {
return x+y+z;
}
used to compile into:
_foo: ## @foo
subq $12, %rsp
movl %edi, 8(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
movl 8(%rsp), %edx
movl 4(%rsp), %esi
addl %edx, %esi
movl (%rsp), %edx
addl %esi, %edx
movl %edx, %eax
addq $12, %rsp
ret
Now we produce:
_foo: ## @foo
subq $12, %rsp
movl %edi, 8(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
movl 8(%rsp), %edx
addl 4(%rsp), %edx ## Folded load
addl (%rsp), %edx ## Folded load
movl %edx, %eax
addq $12, %rsp
ret
Fewer instructions and less register use = faster compiles.
llvm-svn: 113102
2010-09-05 10:18:34 +08:00
|
|
|
/// TryToFoldLoad - The specified machine instr operand is a vreg, and that
|
|
|
|
/// vreg is being provided by the specified load instruction. If possible,
|
|
|
|
/// try to fold the load as an operand to the instruction, returning true if
|
|
|
|
/// possible.
|
|
|
|
virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
|
|
|
|
const LoadInst *LI);
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2013-02-26 05:59:35 +08:00
|
|
|
virtual bool FastLowerArguments();
|
|
|
|
|
2008-09-03 08:03:49 +08:00
|
|
|
#include "X86GenFastISel.inc"
|
2008-09-03 14:44:39 +08:00
|
|
|
|
|
|
|
private:
|
2010-04-15 09:51:59 +08:00
|
|
|
bool X86FastEmitCompare(const Value *LHS, const Value *RHS, EVT VT);
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2009-08-11 06:56:29 +08:00
|
|
|
bool X86FastEmitLoad(EVT VT, const X86AddressMode &AM, unsigned &RR);
|
2008-09-06 05:00:03 +08:00
|
|
|
|
2011-04-19 13:09:50 +08:00
|
|
|
bool X86FastEmitStore(EVT VT, const Value *Val, const X86AddressMode &AM);
|
|
|
|
bool X86FastEmitStore(EVT VT, unsigned Val, const X86AddressMode &AM);
|
2008-09-08 14:35:17 +08:00
|
|
|
|
2009-08-11 06:56:29 +08:00
|
|
|
bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT,
|
2008-09-08 14:35:17 +08:00
|
|
|
unsigned &ResultReg);
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2010-04-15 09:51:59 +08:00
|
|
|
bool X86SelectAddress(const Value *V, X86AddressMode &AM);
|
|
|
|
bool X86SelectCallAddress(const Value *V, X86AddressMode &AM);
|
2008-09-11 04:11:02 +08:00
|
|
|
|
2010-04-15 09:51:59 +08:00
|
|
|
bool X86SelectLoad(const Instruction *I);
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2010-04-15 09:51:59 +08:00
|
|
|
bool X86SelectStore(const Instruction *I);
|
2008-09-05 07:26:51 +08:00
|
|
|
|
2010-07-10 17:00:22 +08:00
|
|
|
bool X86SelectRet(const Instruction *I);
|
|
|
|
|
2010-04-15 09:51:59 +08:00
|
|
|
bool X86SelectCmp(const Instruction *I);
|
2008-09-05 09:06:14 +08:00
|
|
|
|
2010-04-15 09:51:59 +08:00
|
|
|
bool X86SelectZExt(const Instruction *I);
|
2008-09-05 09:06:14 +08:00
|
|
|
|
2010-04-15 09:51:59 +08:00
|
|
|
bool X86SelectBranch(const Instruction *I);
|
2008-09-06 02:30:08 +08:00
|
|
|
|
2010-04-15 09:51:59 +08:00
|
|
|
bool X86SelectShift(const Instruction *I);
|
2008-09-06 02:30:08 +08:00
|
|
|
|
2013-04-18 04:10:13 +08:00
|
|
|
bool X86SelectDivRem(const Instruction *I);
|
|
|
|
|
2010-04-15 09:51:59 +08:00
|
|
|
bool X86SelectSelect(const Instruction *I);
|
2008-09-06 05:00:03 +08:00
|
|
|
|
2010-04-15 09:51:59 +08:00
|
|
|
bool X86SelectTrunc(const Instruction *I);
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2010-04-15 09:51:59 +08:00
|
|
|
bool X86SelectFPExt(const Instruction *I);
|
|
|
|
bool X86SelectFPTrunc(const Instruction *I);
|
2008-09-11 05:02:08 +08:00
|
|
|
|
2010-04-15 09:51:59 +08:00
|
|
|
bool X86VisitIntrinsicCall(const IntrinsicInst &I);
|
|
|
|
bool X86SelectCall(const Instruction *I);
|
2008-09-07 17:09:33 +08:00
|
|
|
|
2011-06-11 07:39:36 +08:00
|
|
|
bool DoSelectCall(const Instruction *I, const char *MemIntName);
|
|
|
|
|
2008-09-25 23:24:26 +08:00
|
|
|
const X86InstrInfo *getInstrInfo() const {
|
2008-09-27 03:15:30 +08:00
|
|
|
return getTargetMachine()->getInstrInfo();
|
|
|
|
}
|
|
|
|
const X86TargetMachine *getTargetMachine() const {
|
|
|
|
return static_cast<const X86TargetMachine *>(&TM);
|
2008-09-25 23:24:26 +08:00
|
|
|
}
|
|
|
|
|
2010-04-15 09:51:59 +08:00
|
|
|
unsigned TargetMaterializeConstant(const Constant *C);
|
2008-09-11 04:11:02 +08:00
|
|
|
|
2010-04-15 09:51:59 +08:00
|
|
|
unsigned TargetMaterializeAlloca(const AllocaInst *C);
|
2008-09-07 17:09:33 +08:00
|
|
|
|
2011-04-28 06:41:55 +08:00
|
|
|
unsigned TargetMaterializeFloatZero(const ConstantFP *CF);
|
|
|
|
|
2008-09-07 17:09:33 +08:00
|
|
|
/// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
|
|
|
|
/// computed in an SSE register, not on the X87 floating point stack.
|
2009-08-11 06:56:29 +08:00
|
|
|
bool isScalarFPTypeInSSEReg(EVT VT) const {
|
2009-08-12 04:47:22 +08:00
|
|
|
return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
|
|
|
|
(VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
|
2008-09-07 17:09:33 +08:00
|
|
|
}
|
|
|
|
|
2011-07-18 12:54:35 +08:00
|
|
|
bool isTypeLegal(Type *Ty, MVT &VT, bool AllowI1 = false);
|
2011-04-27 09:45:07 +08:00
|
|
|
|
2011-05-21 06:21:04 +08:00
|
|
|
bool IsMemcpySmall(uint64_t Len);
|
|
|
|
|
2011-04-27 09:45:07 +08:00
|
|
|
bool TryEmitSmallMemcpy(X86AddressMode DestAM,
|
|
|
|
X86AddressMode SrcAM, uint64_t Len);
|
2008-09-03 08:03:49 +08:00
|
|
|
};
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2009-03-09 02:44:31 +08:00
|
|
|
} // end anonymous namespace.
|
2008-08-29 07:21:34 +08:00
|
|
|
|
2011-07-18 12:54:35 +08:00
|
|
|
bool X86FastISel::isTypeLegal(Type *Ty, MVT &VT, bool AllowI1) {
|
2010-11-03 19:35:31 +08:00
|
|
|
EVT evt = TLI.getValueType(Ty, /*HandleUnknown=*/true);
|
|
|
|
if (evt == MVT::Other || !evt.isSimple())
|
2008-09-07 17:09:33 +08:00
|
|
|
// Unhandled type. Halt "fast" selection and bail.
|
|
|
|
return false;
|
2010-11-03 19:35:31 +08:00
|
|
|
|
|
|
|
VT = evt.getSimpleVT();
|
2008-09-30 08:48:39 +08:00
|
|
|
// For now, require SSE/SSE2 for performing floating-point operations,
|
|
|
|
// since x87 requires additional work.
|
2009-08-12 04:47:22 +08:00
|
|
|
if (VT == MVT::f64 && !X86ScalarSSEf64)
|
2012-08-12 01:53:00 +08:00
|
|
|
return false;
|
2009-08-12 04:47:22 +08:00
|
|
|
if (VT == MVT::f32 && !X86ScalarSSEf32)
|
2012-08-12 01:53:00 +08:00
|
|
|
return false;
|
2008-09-30 08:48:39 +08:00
|
|
|
// Similarly, no f80 support yet.
|
2009-08-12 04:47:22 +08:00
|
|
|
if (VT == MVT::f80)
|
2008-09-30 08:48:39 +08:00
|
|
|
return false;
|
2008-09-07 17:09:33 +08:00
|
|
|
// We only handle legal types. For example, on x86-32 the instruction
|
|
|
|
// selector contains all of the 64-bit instructions from x86-64,
|
|
|
|
// under the assumption that i64 won't be used if the target doesn't
|
|
|
|
// support it.
|
2009-08-12 04:47:22 +08:00
|
|
|
return (AllowI1 && VT == MVT::i1) || TLI.isTypeLegal(VT);
|
2008-09-07 17:09:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#include "X86GenCallingConv.inc"
|
|
|
|
|
2008-09-06 05:00:03 +08:00
|
|
|
/// X86FastEmitLoad - Emit a machine instruction to load a value of type VT.
|
2008-09-07 17:09:33 +08:00
|
|
|
/// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV.
|
2008-09-06 05:00:03 +08:00
|
|
|
/// Return true and the result register by reference if it is possible.
|
2009-08-11 06:56:29 +08:00
|
|
|
bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM,
|
2008-09-06 05:00:03 +08:00
|
|
|
unsigned &ResultReg) {
|
|
|
|
// Get opcode and regclass of the output for the given load instruction.
|
|
|
|
unsigned Opc = 0;
|
|
|
|
const TargetRegisterClass *RC = NULL;
|
2009-08-12 04:47:22 +08:00
|
|
|
switch (VT.getSimpleVT().SimpleTy) {
|
2008-09-06 05:00:03 +08:00
|
|
|
default: return false;
|
2009-08-27 08:31:47 +08:00
|
|
|
case MVT::i1:
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::i8:
|
2008-09-06 05:00:03 +08:00
|
|
|
Opc = X86::MOV8rm;
|
2012-04-20 14:31:50 +08:00
|
|
|
RC = &X86::GR8RegClass;
|
2008-09-06 05:00:03 +08:00
|
|
|
break;
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::i16:
|
2008-09-06 05:00:03 +08:00
|
|
|
Opc = X86::MOV16rm;
|
2012-04-20 14:31:50 +08:00
|
|
|
RC = &X86::GR16RegClass;
|
2008-09-06 05:00:03 +08:00
|
|
|
break;
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::i32:
|
2008-09-06 05:00:03 +08:00
|
|
|
Opc = X86::MOV32rm;
|
2012-04-20 14:31:50 +08:00
|
|
|
RC = &X86::GR32RegClass;
|
2008-09-06 05:00:03 +08:00
|
|
|
break;
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::i64:
|
2008-09-06 05:00:03 +08:00
|
|
|
// Must be in x86-64 mode.
|
|
|
|
Opc = X86::MOV64rm;
|
2012-04-20 14:31:50 +08:00
|
|
|
RC = &X86::GR64RegClass;
|
2008-09-06 05:00:03 +08:00
|
|
|
break;
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::f32:
|
2011-09-03 08:46:42 +08:00
|
|
|
if (X86ScalarSSEf32) {
|
|
|
|
Opc = Subtarget->hasAVX() ? X86::VMOVSSrm : X86::MOVSSrm;
|
2012-04-20 14:31:50 +08:00
|
|
|
RC = &X86::FR32RegClass;
|
2008-09-03 14:44:39 +08:00
|
|
|
} else {
|
2008-09-06 05:00:03 +08:00
|
|
|
Opc = X86::LD_Fp32m;
|
2012-04-20 14:31:50 +08:00
|
|
|
RC = &X86::RFP32RegClass;
|
2008-09-03 14:44:39 +08:00
|
|
|
}
|
2008-09-06 05:00:03 +08:00
|
|
|
break;
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::f64:
|
2011-09-03 08:46:42 +08:00
|
|
|
if (X86ScalarSSEf64) {
|
|
|
|
Opc = Subtarget->hasAVX() ? X86::VMOVSDrm : X86::MOVSDrm;
|
2012-04-20 14:31:50 +08:00
|
|
|
RC = &X86::FR64RegClass;
|
2008-09-06 05:00:03 +08:00
|
|
|
} else {
|
|
|
|
Opc = X86::LD_Fp64m;
|
2012-04-20 14:31:50 +08:00
|
|
|
RC = &X86::RFP64RegClass;
|
2008-09-06 05:00:03 +08:00
|
|
|
}
|
|
|
|
break;
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::f80:
|
2008-09-26 09:39:32 +08:00
|
|
|
// No f80 support yet.
|
|
|
|
return false;
|
2008-09-03 14:44:39 +08:00
|
|
|
}
|
2008-09-06 05:00:03 +08:00
|
|
|
|
|
|
|
ResultReg = createResultReg(RC);
|
2010-07-10 17:00:22 +08:00
|
|
|
addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
|
|
|
|
DL, TII.get(Opc), ResultReg), AM);
|
2008-09-03 14:44:39 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2008-09-07 17:09:33 +08:00
|
|
|
/// X86FastEmitStore - Emit a machine instruction to store a value Val of
|
|
|
|
/// type VT. The address is either pre-computed, consisted of a base ptr, Ptr
|
|
|
|
/// and a displacement offset, or a GlobalAddress,
|
2008-09-06 05:00:03 +08:00
|
|
|
/// i.e. V. Return true if it is possible.
|
|
|
|
bool
|
2011-04-19 13:09:50 +08:00
|
|
|
X86FastISel::X86FastEmitStore(EVT VT, unsigned Val, const X86AddressMode &AM) {
|
2008-09-09 00:31:35 +08:00
|
|
|
// Get opcode and regclass of the output for the given store instruction.
|
2008-09-04 15:08:58 +08:00
|
|
|
unsigned Opc = 0;
|
2009-08-12 04:47:22 +08:00
|
|
|
switch (VT.getSimpleVT().SimpleTy) {
|
|
|
|
case MVT::f80: // No f80 support yet.
|
2008-09-04 15:08:58 +08:00
|
|
|
default: return false;
|
2009-08-27 08:31:47 +08:00
|
|
|
case MVT::i1: {
|
|
|
|
// Mask out all but lowest bit.
|
2012-04-20 14:31:50 +08:00
|
|
|
unsigned AndResult = createResultReg(&X86::GR8RegClass);
|
2010-07-10 17:00:22 +08:00
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
2009-08-27 08:31:47 +08:00
|
|
|
TII.get(X86::AND8ri), AndResult).addReg(Val).addImm(1);
|
|
|
|
Val = AndResult;
|
|
|
|
}
|
|
|
|
// FALLTHROUGH, handling i1 as i8.
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::i8: Opc = X86::MOV8mr; break;
|
|
|
|
case MVT::i16: Opc = X86::MOV16mr; break;
|
|
|
|
case MVT::i32: Opc = X86::MOV32mr; break;
|
|
|
|
case MVT::i64: Opc = X86::MOV64mr; break; // Must be in x86-64 mode.
|
|
|
|
case MVT::f32:
|
2011-09-03 08:46:42 +08:00
|
|
|
Opc = X86ScalarSSEf32 ?
|
|
|
|
(Subtarget->hasAVX() ? X86::VMOVSSmr : X86::MOVSSmr) : X86::ST_Fp32m;
|
2008-09-04 15:08:58 +08:00
|
|
|
break;
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::f64:
|
2011-09-03 08:46:42 +08:00
|
|
|
Opc = X86ScalarSSEf64 ?
|
|
|
|
(Subtarget->hasAVX() ? X86::VMOVSDmr : X86::MOVSDmr) : X86::ST_Fp64m;
|
2008-09-04 15:08:58 +08:00
|
|
|
break;
|
2011-10-19 06:11:33 +08:00
|
|
|
case MVT::v4f32:
|
|
|
|
Opc = X86::MOVAPSmr;
|
|
|
|
break;
|
|
|
|
case MVT::v2f64:
|
|
|
|
Opc = X86::MOVAPDmr;
|
|
|
|
break;
|
|
|
|
case MVT::v4i32:
|
|
|
|
case MVT::v2i64:
|
|
|
|
case MVT::v8i16:
|
|
|
|
case MVT::v16i8:
|
|
|
|
Opc = X86::MOVDQAmr;
|
|
|
|
break;
|
2008-09-04 15:08:58 +08:00
|
|
|
}
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2010-07-10 17:00:22 +08:00
|
|
|
addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
|
|
|
|
DL, TII.get(Opc)), AM).addReg(Val);
|
2008-09-04 15:08:58 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-04-15 09:51:59 +08:00
|
|
|
bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
|
2008-10-15 13:30:52 +08:00
|
|
|
const X86AddressMode &AM) {
|
|
|
|
// Handle 'null' like i32/i64 0.
|
2012-11-01 16:07:29 +08:00
|
|
|
if (isa<ConstantPointerNull>(Val))
|
|
|
|
Val = Constant::getNullValue(TD.getIntPtrType(Val->getContext()));
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2008-10-15 13:30:52 +08:00
|
|
|
// If this is a store of a simple constant, fold the constant into the store.
|
2010-04-15 09:51:59 +08:00
|
|
|
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
|
2008-10-15 13:30:52 +08:00
|
|
|
unsigned Opc = 0;
|
2009-08-27 08:31:47 +08:00
|
|
|
bool Signed = true;
|
2009-08-12 04:47:22 +08:00
|
|
|
switch (VT.getSimpleVT().SimpleTy) {
|
2008-10-15 13:30:52 +08:00
|
|
|
default: break;
|
2009-08-27 08:31:47 +08:00
|
|
|
case MVT::i1: Signed = false; // FALLTHROUGH to handle as i8.
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::i8: Opc = X86::MOV8mi; break;
|
|
|
|
case MVT::i16: Opc = X86::MOV16mi; break;
|
|
|
|
case MVT::i32: Opc = X86::MOV32mi; break;
|
|
|
|
case MVT::i64:
|
2008-10-15 13:30:52 +08:00
|
|
|
// Must be a 32-bit sign extended value.
|
2012-11-16 03:05:23 +08:00
|
|
|
if (isInt<32>(CI->getSExtValue()))
|
2008-10-15 13:30:52 +08:00
|
|
|
Opc = X86::MOV64mi32;
|
|
|
|
break;
|
|
|
|
}
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2008-10-15 13:30:52 +08:00
|
|
|
if (Opc) {
|
2010-07-10 17:00:22 +08:00
|
|
|
addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
|
|
|
|
DL, TII.get(Opc)), AM)
|
2010-04-07 07:35:53 +08:00
|
|
|
.addImm(Signed ? (uint64_t) CI->getSExtValue() :
|
2009-08-27 08:31:47 +08:00
|
|
|
CI->getZExtValue());
|
2008-10-15 13:30:52 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2008-10-15 13:30:52 +08:00
|
|
|
unsigned ValReg = getRegForValue(Val);
|
|
|
|
if (ValReg == 0)
|
2010-11-23 11:31:01 +08:00
|
|
|
return false;
|
|
|
|
|
2008-10-15 13:30:52 +08:00
|
|
|
return X86FastEmitStore(VT, ValReg, AM);
|
|
|
|
}
|
|
|
|
|
2008-09-08 14:35:17 +08:00
|
|
|
/// X86FastEmitExtend - Emit a machine instruction to extend a value Src of
|
|
|
|
/// type SrcVT to type DstVT using the specified extension opcode Opc (e.g.
|
|
|
|
/// ISD::SIGN_EXTEND).
|
2009-08-11 06:56:29 +08:00
|
|
|
bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT,
|
|
|
|
unsigned Src, EVT SrcVT,
|
2008-09-08 14:35:17 +08:00
|
|
|
unsigned &ResultReg) {
|
2010-05-12 07:54:07 +08:00
|
|
|
unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc,
|
|
|
|
Src, /*TODO: Kill=*/false);
|
2013-02-15 05:50:09 +08:00
|
|
|
if (RR == 0)
|
2008-09-12 03:44:55 +08:00
|
|
|
return false;
|
2013-02-15 05:50:09 +08:00
|
|
|
|
|
|
|
ResultReg = RR;
|
|
|
|
return true;
|
2008-09-08 14:35:17 +08:00
|
|
|
}
|
|
|
|
|
2008-09-11 04:11:02 +08:00
|
|
|
/// X86SelectAddress - Attempt to fill in an address from the given value.
|
|
|
|
///
|
2010-04-15 09:51:59 +08:00
|
|
|
bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
|
|
|
|
const User *U = NULL;
|
2008-09-19 07:23:44 +08:00
|
|
|
unsigned Opcode = Instruction::UserOp1;
|
2010-04-15 09:51:59 +08:00
|
|
|
if (const Instruction *I = dyn_cast<Instruction>(V)) {
|
2010-06-19 04:44:47 +08:00
|
|
|
// Don't walk into other basic blocks; it's possible we haven't
|
|
|
|
// visited them yet, so the instructions may not yet be assigned
|
|
|
|
// virtual registers.
|
2010-11-17 06:43:23 +08:00
|
|
|
if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(V)) ||
|
|
|
|
FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
|
|
|
|
Opcode = I->getOpcode();
|
|
|
|
U = I;
|
|
|
|
}
|
2010-04-15 09:51:59 +08:00
|
|
|
} else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
|
2008-09-19 07:23:44 +08:00
|
|
|
Opcode = C->getOpcode();
|
|
|
|
U = C;
|
|
|
|
}
|
2008-09-11 04:11:02 +08:00
|
|
|
|
2011-07-18 12:54:35 +08:00
|
|
|
if (PointerType *Ty = dyn_cast<PointerType>(V->getType()))
|
2010-06-16 03:08:40 +08:00
|
|
|
if (Ty->getAddressSpace() > 255)
|
2010-06-19 04:45:41 +08:00
|
|
|
// Fast instruction selection doesn't support the special
|
|
|
|
// address spaces.
|
2010-06-16 03:08:40 +08:00
|
|
|
return false;
|
|
|
|
|
2008-09-19 07:23:44 +08:00
|
|
|
switch (Opcode) {
|
|
|
|
default: break;
|
|
|
|
case Instruction::BitCast:
|
|
|
|
// Look past bitcasts.
|
2009-07-10 13:33:42 +08:00
|
|
|
return X86SelectAddress(U->getOperand(0), AM);
|
2008-09-19 07:23:44 +08:00
|
|
|
|
|
|
|
case Instruction::IntToPtr:
|
|
|
|
// Look past no-op inttoptrs.
|
|
|
|
if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy())
|
2009-07-10 13:33:42 +08:00
|
|
|
return X86SelectAddress(U->getOperand(0), AM);
|
2008-12-09 07:50:06 +08:00
|
|
|
break;
|
2008-09-19 07:23:44 +08:00
|
|
|
|
|
|
|
case Instruction::PtrToInt:
|
|
|
|
// Look past no-op ptrtoints.
|
|
|
|
if (TLI.getValueType(U->getType()) == TLI.getPointerTy())
|
2009-07-10 13:33:42 +08:00
|
|
|
return X86SelectAddress(U->getOperand(0), AM);
|
2008-12-09 07:50:06 +08:00
|
|
|
break;
|
2008-09-19 07:23:44 +08:00
|
|
|
|
|
|
|
case Instruction::Alloca: {
|
|
|
|
// Do static allocas.
|
|
|
|
const AllocaInst *A = cast<AllocaInst>(V);
|
2010-07-08 00:29:44 +08:00
|
|
|
DenseMap<const AllocaInst*, int>::iterator SI =
|
|
|
|
FuncInfo.StaticAllocaMap.find(A);
|
|
|
|
if (SI != FuncInfo.StaticAllocaMap.end()) {
|
2008-09-27 03:15:30 +08:00
|
|
|
AM.BaseType = X86AddressMode::FrameIndexBase;
|
|
|
|
AM.Base.FrameIndex = SI->second;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
break;
|
2008-09-19 07:23:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
case Instruction::Add: {
|
|
|
|
// Adds of constants are common and easy enough.
|
2010-04-15 09:51:59 +08:00
|
|
|
if (const ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
|
2008-09-27 04:04:15 +08:00
|
|
|
uint64_t Disp = (int32_t)AM.Disp + (uint64_t)CI->getSExtValue();
|
|
|
|
// They have to fit in the 32-bit signed displacement field though.
|
2010-03-30 05:13:41 +08:00
|
|
|
if (isInt<32>(Disp)) {
|
2008-09-27 04:04:15 +08:00
|
|
|
AM.Disp = (uint32_t)Disp;
|
2009-07-10 13:33:42 +08:00
|
|
|
return X86SelectAddress(U->getOperand(0), AM);
|
2008-09-27 04:04:15 +08:00
|
|
|
}
|
2008-09-19 07:23:44 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case Instruction::GetElementPtr: {
|
2010-03-05 03:54:45 +08:00
|
|
|
X86AddressMode SavedAM = AM;
|
|
|
|
|
2008-09-19 07:23:44 +08:00
|
|
|
// Pattern-match simple GEPs.
|
2008-09-27 04:04:15 +08:00
|
|
|
uint64_t Disp = (int32_t)AM.Disp;
|
2008-09-19 07:23:44 +08:00
|
|
|
unsigned IndexReg = AM.IndexReg;
|
|
|
|
unsigned Scale = AM.Scale;
|
|
|
|
gep_type_iterator GTI = gep_type_begin(U);
|
2008-12-08 15:57:47 +08:00
|
|
|
// Iterate through the indices, folding what we can. Constants can be
|
|
|
|
// folded, and one dynamic index can be handled, if the scale is supported.
|
2010-04-15 09:51:59 +08:00
|
|
|
for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
|
2008-09-19 07:23:44 +08:00
|
|
|
i != e; ++i, ++GTI) {
|
2010-04-15 09:51:59 +08:00
|
|
|
const Value *Op = *i;
|
2011-07-18 12:54:35 +08:00
|
|
|
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
|
2008-09-19 07:23:44 +08:00
|
|
|
const StructLayout *SL = TD.getStructLayout(STy);
|
2011-04-18 01:05:12 +08:00
|
|
|
Disp += SL->getElementOffset(cast<ConstantInt>(Op)->getZExtValue());
|
|
|
|
continue;
|
|
|
|
}
|
2011-06-09 07:55:35 +08:00
|
|
|
|
2011-04-18 01:05:12 +08:00
|
|
|
// A array/variable index is always of the form i*S where S is the
|
|
|
|
// constant scale size. See if we can push the scale into immediates.
|
|
|
|
uint64_t S = TD.getTypeAllocSize(GTI.getIndexedType());
|
|
|
|
for (;;) {
|
|
|
|
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
|
|
|
|
// Constant-offset addressing.
|
|
|
|
Disp += CI->getSExtValue() * S;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (isa<AddOperator>(Op) &&
|
|
|
|
(!isa<Instruction>(Op) ||
|
|
|
|
FuncInfo.MBBMap[cast<Instruction>(Op)->getParent()]
|
|
|
|
== FuncInfo.MBB) &&
|
|
|
|
isa<ConstantInt>(cast<AddOperator>(Op)->getOperand(1))) {
|
|
|
|
// An add (in the same block) with a constant operand. Fold the
|
|
|
|
// constant.
|
|
|
|
ConstantInt *CI =
|
|
|
|
cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
|
|
|
|
Disp += CI->getSExtValue() * S;
|
|
|
|
// Iterate on the other operand.
|
|
|
|
Op = cast<AddOperator>(Op)->getOperand(0);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (IndexReg == 0 &&
|
|
|
|
(!AM.GV || !Subtarget->isPICStyleRIPRel()) &&
|
|
|
|
(S == 1 || S == 2 || S == 4 || S == 8)) {
|
|
|
|
// Scaled-index addressing.
|
|
|
|
Scale = S;
|
|
|
|
IndexReg = getRegForGEPIndex(Op).first;
|
|
|
|
if (IndexReg == 0)
|
|
|
|
return false;
|
|
|
|
break;
|
2011-03-22 08:04:35 +08:00
|
|
|
}
|
2011-04-18 01:05:12 +08:00
|
|
|
// Unsupported.
|
|
|
|
goto unsupported_gep;
|
2008-09-19 07:23:44 +08:00
|
|
|
}
|
2008-09-11 04:11:02 +08:00
|
|
|
}
|
2008-09-27 04:04:15 +08:00
|
|
|
// Check for displacement overflow.
|
2010-03-30 05:13:41 +08:00
|
|
|
if (!isInt<32>(Disp))
|
2008-09-27 04:04:15 +08:00
|
|
|
break;
|
2008-09-19 07:23:44 +08:00
|
|
|
// Ok, the GEP indices were covered by constant-offset and scaled-index
|
|
|
|
// addressing. Update the address state and move on to examining the base.
|
|
|
|
AM.IndexReg = IndexReg;
|
|
|
|
AM.Scale = Scale;
|
2008-09-27 04:04:15 +08:00
|
|
|
AM.Disp = (uint32_t)Disp;
|
2010-03-05 03:48:19 +08:00
|
|
|
if (X86SelectAddress(U->getOperand(0), AM))
|
|
|
|
return true;
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2011-04-18 01:05:12 +08:00
|
|
|
// If we couldn't merge the gep value into this addr mode, revert back to
|
2010-03-05 03:48:19 +08:00
|
|
|
// our address and just match the value instead of completely failing.
|
|
|
|
AM = SavedAM;
|
|
|
|
break;
|
2008-09-19 07:23:44 +08:00
|
|
|
unsupported_gep:
|
|
|
|
// Ok, the GEP indices weren't all covered.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handle constant address.
|
2010-04-15 09:51:59 +08:00
|
|
|
if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
|
2011-09-23 07:41:28 +08:00
|
|
|
// Can't handle alternate code models yet.
|
2009-07-11 05:03:06 +08:00
|
|
|
if (TM.getCodeModel() != CodeModel::Small)
|
2008-09-25 23:24:26 +08:00
|
|
|
return false;
|
|
|
|
|
2011-09-23 07:41:28 +08:00
|
|
|
// Can't handle TLS yet.
|
2010-04-15 09:51:59 +08:00
|
|
|
if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
|
2009-02-24 06:03:08 +08:00
|
|
|
if (GVar->isThreadLocal())
|
|
|
|
return false;
|
2011-06-09 07:55:35 +08:00
|
|
|
|
2011-09-23 07:41:28 +08:00
|
|
|
// Can't handle TLS yet, part 2 (this is slightly crazy, but this is how
|
|
|
|
// it works...).
|
|
|
|
if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
|
|
|
|
if (const GlobalVariable *GVar =
|
|
|
|
dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal(false)))
|
|
|
|
if (GVar->isThreadLocal())
|
|
|
|
return false;
|
|
|
|
|
2011-04-18 01:47:38 +08:00
|
|
|
// RIP-relative addresses can't have additional register operands, so if
|
|
|
|
// we've already folded stuff into the addressing mode, just force the
|
|
|
|
// global value into its own register, which we can use as the basereg.
|
|
|
|
if (!Subtarget->isPICStyleRIPRel() ||
|
|
|
|
(AM.Base.Reg == 0 && AM.IndexReg == 0)) {
|
|
|
|
// Okay, we've committed to selecting this global. Set up the address.
|
|
|
|
AM.GV = GV;
|
|
|
|
|
|
|
|
// Allow the subtarget to classify the global.
|
|
|
|
unsigned char GVFlags = Subtarget->ClassifyGlobalReference(GV, TM);
|
|
|
|
|
|
|
|
// If this reference is relative to the pic base, set it now.
|
|
|
|
if (isGlobalRelativeToPICBase(GVFlags)) {
|
|
|
|
// FIXME: How do we know Base.Reg is free??
|
|
|
|
AM.Base.Reg = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
|
|
|
|
}
|
2009-02-24 06:03:08 +08:00
|
|
|
|
2011-04-18 01:47:38 +08:00
|
|
|
// Unless the ABI requires an extra load, return a direct reference to
|
|
|
|
// the global.
|
|
|
|
if (!isGlobalStubReference(GVFlags)) {
|
|
|
|
if (Subtarget->isPICStyleRIPRel()) {
|
|
|
|
// Use rip-relative addressing if we can. Above we verified that the
|
|
|
|
// base and index registers are unused.
|
|
|
|
assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
|
|
|
|
AM.Base.Reg = X86::RIP;
|
|
|
|
}
|
|
|
|
AM.GVOpFlags = GVFlags;
|
|
|
|
return true;
|
2009-07-10 13:33:42 +08:00
|
|
|
}
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2011-04-18 01:47:38 +08:00
|
|
|
// Ok, we need to do a load from a stub. If we've already loaded from
|
|
|
|
// this stub, reuse the loaded pointer, otherwise emit the load now.
|
|
|
|
DenseMap<const Value*, unsigned>::iterator I = LocalValueMap.find(V);
|
|
|
|
unsigned LoadReg;
|
|
|
|
if (I != LocalValueMap.end() && I->second != 0) {
|
|
|
|
LoadReg = I->second;
|
2009-07-10 13:33:42 +08:00
|
|
|
} else {
|
2011-04-18 01:47:38 +08:00
|
|
|
// Issue load from stub.
|
|
|
|
unsigned Opc = 0;
|
|
|
|
const TargetRegisterClass *RC = NULL;
|
|
|
|
X86AddressMode StubAM;
|
|
|
|
StubAM.Base.Reg = AM.Base.Reg;
|
|
|
|
StubAM.GV = GV;
|
|
|
|
StubAM.GVOpFlags = GVFlags;
|
|
|
|
|
|
|
|
// Prepare for inserting code in the local-value area.
|
2012-10-03 16:10:01 +08:00
|
|
|
SavePoint SaveInsertPt = enterLocalValueArea();
|
2011-04-18 01:47:38 +08:00
|
|
|
|
|
|
|
if (TLI.getPointerTy() == MVT::i64) {
|
|
|
|
Opc = X86::MOV64rm;
|
2012-04-20 14:31:50 +08:00
|
|
|
RC = &X86::GR64RegClass;
|
2011-04-18 01:47:38 +08:00
|
|
|
|
|
|
|
if (Subtarget->isPICStyleRIPRel())
|
|
|
|
StubAM.Base.Reg = X86::RIP;
|
|
|
|
} else {
|
|
|
|
Opc = X86::MOV32rm;
|
2012-04-20 14:31:50 +08:00
|
|
|
RC = &X86::GR32RegClass;
|
2011-04-18 01:47:38 +08:00
|
|
|
}
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2011-04-18 01:47:38 +08:00
|
|
|
LoadReg = createResultReg(RC);
|
|
|
|
MachineInstrBuilder LoadMI =
|
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), LoadReg);
|
|
|
|
addFullAddress(LoadMI, StubAM);
|
2010-07-10 17:00:22 +08:00
|
|
|
|
2011-04-18 01:47:38 +08:00
|
|
|
// Ok, back to normal mode.
|
2012-10-03 16:10:01 +08:00
|
|
|
leaveLocalValueArea(SaveInsertPt);
|
2010-07-10 17:00:22 +08:00
|
|
|
|
2011-04-18 01:47:38 +08:00
|
|
|
// Prevent loading GV stub multiple times in same MBB.
|
|
|
|
LocalValueMap[V] = LoadReg;
|
|
|
|
}
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2011-04-18 01:47:38 +08:00
|
|
|
// Now construct the final address. Note that the Disp, Scale,
|
|
|
|
// and Index values may already be set here.
|
|
|
|
AM.Base.Reg = LoadReg;
|
|
|
|
AM.GV = 0;
|
|
|
|
return true;
|
|
|
|
}
|
2009-07-10 13:33:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// If all else fails, try to materialize the value in a register.
|
|
|
|
if (!AM.GV || !Subtarget->isPICStyleRIPRel()) {
|
|
|
|
if (AM.Base.Reg == 0) {
|
|
|
|
AM.Base.Reg = getRegForValue(V);
|
|
|
|
return AM.Base.Reg != 0;
|
|
|
|
}
|
|
|
|
if (AM.IndexReg == 0) {
|
|
|
|
assert(AM.Scale == 1 && "Scale with no index!");
|
|
|
|
AM.IndexReg = getRegForValue(V);
|
|
|
|
return AM.IndexReg != 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// X86SelectCallAddress - Attempt to fill in an address from the given value.
|
|
|
|
///
|
2010-04-15 09:51:59 +08:00
|
|
|
bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) {
|
|
|
|
const User *U = NULL;
|
2009-07-10 13:33:42 +08:00
|
|
|
unsigned Opcode = Instruction::UserOp1;
|
2010-04-15 09:51:59 +08:00
|
|
|
if (const Instruction *I = dyn_cast<Instruction>(V)) {
|
2009-07-10 13:33:42 +08:00
|
|
|
Opcode = I->getOpcode();
|
|
|
|
U = I;
|
2010-04-15 09:51:59 +08:00
|
|
|
} else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(V)) {
|
2009-07-10 13:33:42 +08:00
|
|
|
Opcode = C->getOpcode();
|
|
|
|
U = C;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (Opcode) {
|
|
|
|
default: break;
|
|
|
|
case Instruction::BitCast:
|
|
|
|
// Look past bitcasts.
|
|
|
|
return X86SelectCallAddress(U->getOperand(0), AM);
|
|
|
|
|
|
|
|
case Instruction::IntToPtr:
|
|
|
|
// Look past no-op inttoptrs.
|
|
|
|
if (TLI.getValueType(U->getOperand(0)->getType()) == TLI.getPointerTy())
|
|
|
|
return X86SelectCallAddress(U->getOperand(0), AM);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case Instruction::PtrToInt:
|
|
|
|
// Look past no-op ptrtoints.
|
|
|
|
if (TLI.getValueType(U->getType()) == TLI.getPointerTy())
|
|
|
|
return X86SelectCallAddress(U->getOperand(0), AM);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handle constant address.
|
2010-04-15 09:51:59 +08:00
|
|
|
if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
|
2009-07-10 13:33:42 +08:00
|
|
|
// Can't handle alternate code models yet.
|
2009-07-11 05:03:06 +08:00
|
|
|
if (TM.getCodeModel() != CodeModel::Small)
|
2009-07-10 13:33:42 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// RIP-relative addresses can't have additional register operands.
|
|
|
|
if (Subtarget->isPICStyleRIPRel() &&
|
|
|
|
(AM.Base.Reg != 0 || AM.IndexReg != 0))
|
|
|
|
return false;
|
|
|
|
|
2011-02-21 12:50:06 +08:00
|
|
|
// Can't handle DLLImport.
|
|
|
|
if (GV->hasDLLImportLinkage())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Can't handle TLS.
|
2010-04-15 09:51:59 +08:00
|
|
|
if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
|
2011-02-21 12:50:06 +08:00
|
|
|
if (GVar->isThreadLocal())
|
2009-07-10 13:33:42 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Okay, we've committed to selecting this global. Set up the basic address.
|
|
|
|
AM.GV = GV;
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2009-07-10 13:45:15 +08:00
|
|
|
// No ABI requires an extra load for anything other than DLLImport, which
|
|
|
|
// we rejected above. Return a direct reference to the global.
|
|
|
|
if (Subtarget->isPICStyleRIPRel()) {
|
|
|
|
// Use rip-relative addressing if we can. Above we verified that the
|
|
|
|
// base and index registers are unused.
|
|
|
|
assert(AM.Base.Reg == 0 && AM.IndexReg == 0);
|
|
|
|
AM.Base.Reg = X86::RIP;
|
2009-07-11 05:00:45 +08:00
|
|
|
} else if (Subtarget->isPICStyleStubPIC()) {
|
2009-07-10 13:45:15 +08:00
|
|
|
AM.GVOpFlags = X86II::MO_PIC_BASE_OFFSET;
|
|
|
|
} else if (Subtarget->isPICStyleGOT()) {
|
|
|
|
AM.GVOpFlags = X86II::MO_GOTOFF;
|
2009-07-09 14:41:35 +08:00
|
|
|
}
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2008-09-20 06:16:54 +08:00
|
|
|
return true;
|
2008-09-11 04:11:02 +08:00
|
|
|
}
|
|
|
|
|
2008-09-27 03:15:30 +08:00
|
|
|
// If all else fails, try to materialize the value in a register.
|
2009-06-27 13:24:12 +08:00
|
|
|
if (!AM.GV || !Subtarget->isPICStyleRIPRel()) {
|
2008-09-27 03:15:30 +08:00
|
|
|
if (AM.Base.Reg == 0) {
|
|
|
|
AM.Base.Reg = getRegForValue(V);
|
|
|
|
return AM.Base.Reg != 0;
|
|
|
|
}
|
|
|
|
if (AM.IndexReg == 0) {
|
|
|
|
assert(AM.Scale == 1 && "Scale with no index!");
|
|
|
|
AM.IndexReg = getRegForValue(V);
|
|
|
|
return AM.IndexReg != 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
2008-09-11 04:11:02 +08:00
|
|
|
}
|
|
|
|
|
2009-07-10 13:33:42 +08:00
|
|
|
|
2008-09-06 05:00:03 +08:00
|
|
|
/// X86SelectStore - Select and emit code to implement store instructions.
|
2010-04-15 09:51:59 +08:00
|
|
|
bool X86FastISel::X86SelectStore(const Instruction *I) {
|
2011-09-03 06:33:24 +08:00
|
|
|
// Atomic stores need special handling.
|
2011-10-19 06:11:33 +08:00
|
|
|
const StoreInst *S = cast<StoreInst>(I);
|
|
|
|
|
|
|
|
if (S->isAtomic())
|
|
|
|
return false;
|
|
|
|
|
2010-11-03 19:35:31 +08:00
|
|
|
MVT VT;
|
2009-08-27 08:31:47 +08:00
|
|
|
if (!isTypeLegal(I->getOperand(0)->getType(), VT, /*AllowI1=*/true))
|
2008-09-06 05:00:03 +08:00
|
|
|
return false;
|
|
|
|
|
2008-09-11 04:11:02 +08:00
|
|
|
X86AddressMode AM;
|
2009-07-10 13:33:42 +08:00
|
|
|
if (!X86SelectAddress(I->getOperand(1), AM))
|
2008-09-11 04:11:02 +08:00
|
|
|
return false;
|
2008-09-06 05:00:03 +08:00
|
|
|
|
2008-10-15 13:30:52 +08:00
|
|
|
return X86FastEmitStore(VT, I->getOperand(0), AM);
|
2008-09-06 05:00:03 +08:00
|
|
|
}
|
|
|
|
|
2010-07-10 17:00:22 +08:00
|
|
|
/// X86SelectRet - Select and emit code to implement ret instructions.
|
|
|
|
bool X86FastISel::X86SelectRet(const Instruction *I) {
|
|
|
|
const ReturnInst *Ret = cast<ReturnInst>(I);
|
|
|
|
const Function &F = *I->getParent()->getParent();
|
2012-10-03 06:45:06 +08:00
|
|
|
const X86MachineFunctionInfo *X86MFInfo =
|
|
|
|
FuncInfo.MF->getInfo<X86MachineFunctionInfo>();
|
2010-07-10 17:00:22 +08:00
|
|
|
|
|
|
|
if (!FuncInfo.CanLowerReturn)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
CallingConv::ID CC = F.getCallingConv();
|
|
|
|
if (CC != CallingConv::C &&
|
|
|
|
CC != CallingConv::Fast &&
|
|
|
|
CC != CallingConv::X86_FastCall)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (Subtarget->isTargetWin64())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Don't handle popping bytes on return for now.
|
2012-10-03 06:45:06 +08:00
|
|
|
if (X86MFInfo->getBytesToPopOnReturn() != 0)
|
2013-02-18 02:35:25 +08:00
|
|
|
return false;
|
2010-07-10 17:00:22 +08:00
|
|
|
|
|
|
|
// fastcc with -tailcallopt is intended to provide a guaranteed
|
|
|
|
// tail call optimization. Fastisel doesn't know how to do that.
|
2011-12-03 06:16:29 +08:00
|
|
|
if (CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt)
|
2010-07-10 17:00:22 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
// Let SDISel handle vararg functions.
|
|
|
|
if (F.isVarArg())
|
|
|
|
return false;
|
|
|
|
|
2013-02-06 01:59:48 +08:00
|
|
|
// Build a list of return value registers.
|
|
|
|
SmallVector<unsigned, 4> RetRegs;
|
|
|
|
|
2010-07-10 17:00:22 +08:00
|
|
|
if (Ret->getNumOperands() > 0) {
|
|
|
|
SmallVector<ISD::OutputArg, 4> Outs;
|
2012-12-30 21:01:51 +08:00
|
|
|
GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI);
|
2010-07-10 17:00:22 +08:00
|
|
|
|
|
|
|
// Analyze operands of the call, assigning locations to each operand.
|
|
|
|
SmallVector<CCValAssign, 16> ValLocs;
|
2011-06-09 07:55:35 +08:00
|
|
|
CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, TM, ValLocs,
|
2012-07-19 08:11:40 +08:00
|
|
|
I->getContext());
|
2010-10-31 21:02:38 +08:00
|
|
|
CCInfo.AnalyzeReturn(Outs, RetCC_X86);
|
2010-07-10 17:00:22 +08:00
|
|
|
|
|
|
|
const Value *RV = Ret->getOperand(0);
|
|
|
|
unsigned Reg = getRegForValue(RV);
|
|
|
|
if (Reg == 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Only handle a single return value for now.
|
|
|
|
if (ValLocs.size() != 1)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
CCValAssign &VA = ValLocs[0];
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2010-07-10 17:00:22 +08:00
|
|
|
// Don't bother handling odd stuff for now.
|
|
|
|
if (VA.getLocInfo() != CCValAssign::Full)
|
|
|
|
return false;
|
|
|
|
// Only handle register returns for now.
|
|
|
|
if (!VA.isRegLoc())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// The calling-convention tables for x87 returns don't tell
|
|
|
|
// the whole story.
|
|
|
|
if (VA.getLocReg() == X86::ST0 || VA.getLocReg() == X86::ST1)
|
|
|
|
return false;
|
|
|
|
|
2011-05-19 07:13:10 +08:00
|
|
|
unsigned SrcReg = Reg + VA.getValNo();
|
2011-05-20 06:16:13 +08:00
|
|
|
EVT SrcVT = TLI.getValueType(RV->getType());
|
|
|
|
EVT DstVT = VA.getValVT();
|
|
|
|
// Special handling for extended integers.
|
|
|
|
if (SrcVT != DstVT) {
|
|
|
|
if (SrcVT != MVT::i1 && SrcVT != MVT::i8 && SrcVT != MVT::i16)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!Outs[0].Flags.isZExt() && !Outs[0].Flags.isSExt())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
assert(DstVT == MVT::i32 && "X86 should always ext to i32");
|
|
|
|
|
|
|
|
if (SrcVT == MVT::i1) {
|
|
|
|
if (Outs[0].Flags.isSExt())
|
|
|
|
return false;
|
|
|
|
SrcReg = FastEmitZExtFromI1(MVT::i8, SrcReg, /*TODO: Kill=*/false);
|
|
|
|
SrcVT = MVT::i8;
|
|
|
|
}
|
|
|
|
unsigned Op = Outs[0].Flags.isZExt() ? ISD::ZERO_EXTEND :
|
|
|
|
ISD::SIGN_EXTEND;
|
|
|
|
SrcReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Op,
|
|
|
|
SrcReg, /*TODO: Kill=*/false);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make the copy.
|
2010-07-10 17:00:22 +08:00
|
|
|
unsigned DstReg = VA.getLocReg();
|
|
|
|
const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg);
|
2010-07-11 13:17:02 +08:00
|
|
|
// Avoid a cross-class copy. This is very unlikely.
|
|
|
|
if (!SrcRC->contains(DstReg))
|
2010-07-10 17:00:22 +08:00
|
|
|
return false;
|
2010-07-11 13:17:02 +08:00
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
|
|
|
|
DstReg).addReg(SrcReg);
|
2010-07-10 17:00:22 +08:00
|
|
|
|
2013-02-06 01:59:48 +08:00
|
|
|
// Add register to return instruction.
|
|
|
|
RetRegs.push_back(VA.getLocReg());
|
2010-07-10 17:00:22 +08:00
|
|
|
}
|
|
|
|
|
2012-10-03 06:45:06 +08:00
|
|
|
// The x86-64 ABI for returning structs by value requires that we copy
|
|
|
|
// the sret argument into %rax for the return. We saved the argument into
|
|
|
|
// a virtual register in the entry block, so now we copy the value out
|
2013-03-29 05:30:04 +08:00
|
|
|
// and into %rax. We also do the same with %eax for Win32.
|
|
|
|
if (F.hasStructRetAttr() &&
|
|
|
|
(Subtarget->is64Bit() || Subtarget->isTargetWindows())) {
|
2012-10-03 06:45:06 +08:00
|
|
|
unsigned Reg = X86MFInfo->getSRetReturnReg();
|
|
|
|
assert(Reg &&
|
|
|
|
"SRetReturnReg should have been set in LowerFormalArguments()!");
|
2013-03-29 05:30:04 +08:00
|
|
|
unsigned RetReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
|
2012-10-03 06:45:06 +08:00
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
|
2013-03-29 05:30:04 +08:00
|
|
|
RetReg).addReg(Reg);
|
|
|
|
RetRegs.push_back(RetReg);
|
2012-10-03 06:45:06 +08:00
|
|
|
}
|
|
|
|
|
2010-07-10 17:00:22 +08:00
|
|
|
// Now emit the RET.
|
2013-02-06 01:59:48 +08:00
|
|
|
MachineInstrBuilder MIB =
|
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::RET));
|
|
|
|
for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)
|
|
|
|
MIB.addReg(RetRegs[i], RegState::Implicit);
|
2010-07-10 17:00:22 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2008-09-03 14:44:39 +08:00
|
|
|
/// X86SelectLoad - Select and emit code to implement load instructions.
|
|
|
|
///
|
2010-04-15 09:51:59 +08:00
|
|
|
bool X86FastISel::X86SelectLoad(const Instruction *I) {
|
2011-09-03 06:33:24 +08:00
|
|
|
// Atomic loads need special handling.
|
|
|
|
if (cast<LoadInst>(I)->isAtomic())
|
|
|
|
return false;
|
|
|
|
|
2010-11-03 19:35:31 +08:00
|
|
|
MVT VT;
|
2009-08-27 08:31:47 +08:00
|
|
|
if (!isTypeLegal(I->getType(), VT, /*AllowI1=*/true))
|
2008-09-03 14:44:39 +08:00
|
|
|
return false;
|
|
|
|
|
2008-09-11 04:11:02 +08:00
|
|
|
X86AddressMode AM;
|
2009-07-10 13:33:42 +08:00
|
|
|
if (!X86SelectAddress(I->getOperand(0), AM))
|
2008-09-11 04:11:02 +08:00
|
|
|
return false;
|
2008-09-03 14:44:39 +08:00
|
|
|
|
2008-09-06 05:00:03 +08:00
|
|
|
unsigned ResultReg = 0;
|
2008-09-11 04:11:02 +08:00
|
|
|
if (X86FastEmitLoad(VT, AM, ResultReg)) {
|
2008-09-06 05:00:03 +08:00
|
|
|
UpdateValueMap(I, ResultReg);
|
|
|
|
return true;
|
2008-09-03 14:44:39 +08:00
|
|
|
}
|
2008-09-06 05:00:03 +08:00
|
|
|
return false;
|
2008-09-03 14:44:39 +08:00
|
|
|
}
|
|
|
|
|
2010-07-12 00:22:13 +08:00
|
|
|
static unsigned X86ChooseCmpOpcode(EVT VT, const X86Subtarget *Subtarget) {
|
2011-09-03 08:46:42 +08:00
|
|
|
bool HasAVX = Subtarget->hasAVX();
|
2012-01-10 14:54:16 +08:00
|
|
|
bool X86ScalarSSEf32 = Subtarget->hasSSE1();
|
|
|
|
bool X86ScalarSSEf64 = Subtarget->hasSSE2();
|
2011-09-03 08:46:42 +08:00
|
|
|
|
2009-08-12 04:47:22 +08:00
|
|
|
switch (VT.getSimpleVT().SimpleTy) {
|
2008-10-15 12:32:45 +08:00
|
|
|
default: return 0;
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::i8: return X86::CMP8rr;
|
|
|
|
case MVT::i16: return X86::CMP16rr;
|
|
|
|
case MVT::i32: return X86::CMP32rr;
|
|
|
|
case MVT::i64: return X86::CMP64rr;
|
2011-09-03 08:46:42 +08:00
|
|
|
case MVT::f32:
|
|
|
|
return X86ScalarSSEf32 ? (HasAVX ? X86::VUCOMISSrr : X86::UCOMISSrr) : 0;
|
|
|
|
case MVT::f64:
|
|
|
|
return X86ScalarSSEf64 ? (HasAVX ? X86::VUCOMISDrr : X86::UCOMISDrr) : 0;
|
2008-10-03 06:15:21 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-10-15 12:13:29 +08:00
|
|
|
/// X86ChooseCmpImmediateOpcode - If we have a comparison with RHS as the RHS
|
|
|
|
/// of the comparison, return an opcode that works for the compare (e.g.
|
|
|
|
/// CMP32ri) otherwise return 0.
|
2010-04-15 09:51:59 +08:00
|
|
|
static unsigned X86ChooseCmpImmediateOpcode(EVT VT, const ConstantInt *RHSC) {
|
2009-08-12 04:47:22 +08:00
|
|
|
switch (VT.getSimpleVT().SimpleTy) {
|
2008-10-15 12:13:29 +08:00
|
|
|
// Otherwise, we can't fold the immediate into this comparison.
|
2008-10-15 12:32:45 +08:00
|
|
|
default: return 0;
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::i8: return X86::CMP8ri;
|
|
|
|
case MVT::i16: return X86::CMP16ri;
|
|
|
|
case MVT::i32: return X86::CMP32ri;
|
|
|
|
case MVT::i64:
|
2008-10-15 12:32:45 +08:00
|
|
|
// 64-bit comparisons are only valid if the immediate fits in a 32-bit sext
|
|
|
|
// field.
|
2008-10-15 13:30:52 +08:00
|
|
|
if ((int)RHSC->getSExtValue() == RHSC->getSExtValue())
|
2008-10-15 12:32:45 +08:00
|
|
|
return X86::CMP64ri32;
|
|
|
|
return 0;
|
|
|
|
}
|
2008-10-15 12:13:29 +08:00
|
|
|
}
|
|
|
|
|
2010-04-15 09:51:59 +08:00
|
|
|
bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1,
|
|
|
|
EVT VT) {
|
2008-10-15 12:26:38 +08:00
|
|
|
unsigned Op0Reg = getRegForValue(Op0);
|
|
|
|
if (Op0Reg == 0) return false;
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2008-10-15 13:18:04 +08:00
|
|
|
// Handle 'null' like i32/i64 0.
|
2012-11-01 16:07:29 +08:00
|
|
|
if (isa<ConstantPointerNull>(Op1))
|
|
|
|
Op1 = Constant::getNullValue(TD.getIntPtrType(Op0->getContext()));
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2008-10-15 12:26:38 +08:00
|
|
|
// We have two options: compare with register or immediate. If the RHS of
|
|
|
|
// the compare is an immediate that we can fold into this compare, use
|
|
|
|
// CMPri, otherwise use CMPrr.
|
2010-04-15 09:51:59 +08:00
|
|
|
if (const ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
|
2008-10-15 12:32:45 +08:00
|
|
|
if (unsigned CompareImmOpc = X86ChooseCmpImmediateOpcode(VT, Op1C)) {
|
2010-07-10 17:00:22 +08:00
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CompareImmOpc))
|
|
|
|
.addReg(Op0Reg)
|
|
|
|
.addImm(Op1C->getSExtValue());
|
2008-10-15 12:26:38 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2010-07-12 00:22:13 +08:00
|
|
|
unsigned CompareOpc = X86ChooseCmpOpcode(VT, Subtarget);
|
2008-10-15 12:26:38 +08:00
|
|
|
if (CompareOpc == 0) return false;
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2008-10-15 12:26:38 +08:00
|
|
|
unsigned Op1Reg = getRegForValue(Op1);
|
|
|
|
if (Op1Reg == 0) return false;
|
2010-07-10 17:00:22 +08:00
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CompareOpc))
|
|
|
|
.addReg(Op0Reg)
|
|
|
|
.addReg(Op1Reg);
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2008-10-15 12:26:38 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-04-15 09:51:59 +08:00
|
|
|
bool X86FastISel::X86SelectCmp(const Instruction *I) {
|
|
|
|
const CmpInst *CI = cast<CmpInst>(I);
|
2008-09-05 07:26:51 +08:00
|
|
|
|
2010-11-03 19:35:31 +08:00
|
|
|
MVT VT;
|
2008-10-15 13:07:36 +08:00
|
|
|
if (!isTypeLegal(I->getOperand(0)->getType(), VT))
|
2008-09-05 09:33:56 +08:00
|
|
|
return false;
|
|
|
|
|
2008-09-05 07:26:51 +08:00
|
|
|
unsigned ResultReg = createResultReg(&X86::GR8RegClass);
|
2008-10-15 11:47:17 +08:00
|
|
|
unsigned SetCCOpc;
|
2008-10-15 11:52:54 +08:00
|
|
|
bool SwapArgs; // false -> compare Op0, Op1. true -> compare Op1, Op0.
|
2008-09-05 07:26:51 +08:00
|
|
|
switch (CI->getPredicate()) {
|
|
|
|
case CmpInst::FCMP_OEQ: {
|
2008-10-15 12:29:23 +08:00
|
|
|
if (!X86FastEmitCompare(CI->getOperand(0), CI->getOperand(1), VT))
|
|
|
|
return false;
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2008-09-05 07:26:51 +08:00
|
|
|
unsigned EReg = createResultReg(&X86::GR8RegClass);
|
|
|
|
unsigned NPReg = createResultReg(&X86::GR8RegClass);
|
2010-07-10 17:00:22 +08:00
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::SETEr), EReg);
|
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
|
|
|
TII.get(X86::SETNPr), NPReg);
|
2010-11-23 11:31:01 +08:00
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
2009-02-13 10:33:27 +08:00
|
|
|
TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg);
|
2008-10-15 11:47:17 +08:00
|
|
|
UpdateValueMap(I, ResultReg);
|
|
|
|
return true;
|
2008-09-05 07:26:51 +08:00
|
|
|
}
|
|
|
|
case CmpInst::FCMP_UNE: {
|
2008-10-15 12:29:23 +08:00
|
|
|
if (!X86FastEmitCompare(CI->getOperand(0), CI->getOperand(1), VT))
|
|
|
|
return false;
|
|
|
|
|
2008-09-05 07:26:51 +08:00
|
|
|
unsigned NEReg = createResultReg(&X86::GR8RegClass);
|
|
|
|
unsigned PReg = createResultReg(&X86::GR8RegClass);
|
2011-04-19 12:22:17 +08:00
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::SETNEr), NEReg);
|
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::SETPr), PReg);
|
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::OR8rr),ResultReg)
|
2010-07-10 17:00:22 +08:00
|
|
|
.addReg(PReg).addReg(NEReg);
|
2008-10-15 11:47:17 +08:00
|
|
|
UpdateValueMap(I, ResultReg);
|
|
|
|
return true;
|
2008-09-05 07:26:51 +08:00
|
|
|
}
|
2008-10-15 11:52:54 +08:00
|
|
|
case CmpInst::FCMP_OGT: SwapArgs = false; SetCCOpc = X86::SETAr; break;
|
|
|
|
case CmpInst::FCMP_OGE: SwapArgs = false; SetCCOpc = X86::SETAEr; break;
|
|
|
|
case CmpInst::FCMP_OLT: SwapArgs = true; SetCCOpc = X86::SETAr; break;
|
|
|
|
case CmpInst::FCMP_OLE: SwapArgs = true; SetCCOpc = X86::SETAEr; break;
|
|
|
|
case CmpInst::FCMP_ONE: SwapArgs = false; SetCCOpc = X86::SETNEr; break;
|
|
|
|
case CmpInst::FCMP_ORD: SwapArgs = false; SetCCOpc = X86::SETNPr; break;
|
|
|
|
case CmpInst::FCMP_UNO: SwapArgs = false; SetCCOpc = X86::SETPr; break;
|
|
|
|
case CmpInst::FCMP_UEQ: SwapArgs = false; SetCCOpc = X86::SETEr; break;
|
|
|
|
case CmpInst::FCMP_UGT: SwapArgs = true; SetCCOpc = X86::SETBr; break;
|
|
|
|
case CmpInst::FCMP_UGE: SwapArgs = true; SetCCOpc = X86::SETBEr; break;
|
|
|
|
case CmpInst::FCMP_ULT: SwapArgs = false; SetCCOpc = X86::SETBr; break;
|
|
|
|
case CmpInst::FCMP_ULE: SwapArgs = false; SetCCOpc = X86::SETBEr; break;
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2008-10-15 11:52:54 +08:00
|
|
|
case CmpInst::ICMP_EQ: SwapArgs = false; SetCCOpc = X86::SETEr; break;
|
|
|
|
case CmpInst::ICMP_NE: SwapArgs = false; SetCCOpc = X86::SETNEr; break;
|
|
|
|
case CmpInst::ICMP_UGT: SwapArgs = false; SetCCOpc = X86::SETAr; break;
|
|
|
|
case CmpInst::ICMP_UGE: SwapArgs = false; SetCCOpc = X86::SETAEr; break;
|
|
|
|
case CmpInst::ICMP_ULT: SwapArgs = false; SetCCOpc = X86::SETBr; break;
|
|
|
|
case CmpInst::ICMP_ULE: SwapArgs = false; SetCCOpc = X86::SETBEr; break;
|
|
|
|
case CmpInst::ICMP_SGT: SwapArgs = false; SetCCOpc = X86::SETGr; break;
|
|
|
|
case CmpInst::ICMP_SGE: SwapArgs = false; SetCCOpc = X86::SETGEr; break;
|
|
|
|
case CmpInst::ICMP_SLT: SwapArgs = false; SetCCOpc = X86::SETLr; break;
|
|
|
|
case CmpInst::ICMP_SLE: SwapArgs = false; SetCCOpc = X86::SETLEr; break;
|
2008-09-05 07:26:51 +08:00
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-04-15 09:51:59 +08:00
|
|
|
const Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
|
2008-10-15 11:52:54 +08:00
|
|
|
if (SwapArgs)
|
2008-10-15 12:26:38 +08:00
|
|
|
std::swap(Op0, Op1);
|
2008-10-15 11:52:54 +08:00
|
|
|
|
2008-10-15 12:26:38 +08:00
|
|
|
// Emit a compare of Op0/Op1.
|
2008-10-15 12:29:23 +08:00
|
|
|
if (!X86FastEmitCompare(Op0, Op1, VT))
|
|
|
|
return false;
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2010-07-10 17:00:22 +08:00
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(SetCCOpc), ResultReg);
|
2008-09-05 07:26:51 +08:00
|
|
|
UpdateValueMap(I, ResultReg);
|
|
|
|
return true;
|
|
|
|
}
|
2008-09-03 14:44:39 +08:00
|
|
|
|
2010-04-15 09:51:59 +08:00
|
|
|
bool X86FastISel::X86SelectZExt(const Instruction *I) {
|
2009-03-14 04:42:20 +08:00
|
|
|
// Handle zero-extension from i1 to i8, which is common.
|
2011-06-09 07:55:35 +08:00
|
|
|
if (!I->getOperand(0)->getType()->isIntegerTy(1))
|
2011-05-26 07:49:02 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
EVT DstVT = TLI.getValueType(I->getType());
|
|
|
|
if (!TLI.isTypeLegal(DstVT))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned ResultReg = getRegForValue(I->getOperand(0));
|
|
|
|
if (ResultReg == 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Set the high bits to zero.
|
|
|
|
ResultReg = FastEmitZExtFromI1(MVT::i8, ResultReg, /*TODO: Kill=*/false);
|
|
|
|
if (ResultReg == 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (DstVT != MVT::i8) {
|
|
|
|
ResultReg = FastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND,
|
|
|
|
ResultReg, /*Kill=*/true);
|
|
|
|
if (ResultReg == 0)
|
|
|
|
return false;
|
2008-09-05 09:06:14 +08:00
|
|
|
}
|
|
|
|
|
2011-05-26 07:49:02 +08:00
|
|
|
UpdateValueMap(I, ResultReg);
|
|
|
|
return true;
|
2008-09-05 09:06:14 +08:00
|
|
|
}
|
|
|
|
|
2008-10-15 12:26:38 +08:00
|
|
|
|
2010-04-15 09:51:59 +08:00
|
|
|
bool X86FastISel::X86SelectBranch(const Instruction *I) {
|
2008-09-05 09:06:14 +08:00
|
|
|
// Unconditional branches are selected by tablegen-generated code.
|
2008-10-03 06:15:21 +08:00
|
|
|
// Handle a conditional branch.
|
2010-04-15 09:51:59 +08:00
|
|
|
const BranchInst *BI = cast<BranchInst>(I);
|
2010-07-08 00:29:44 +08:00
|
|
|
MachineBasicBlock *TrueMBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
|
|
|
|
MachineBasicBlock *FalseMBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
|
2008-09-05 09:06:14 +08:00
|
|
|
|
2010-08-21 10:32:36 +08:00
|
|
|
// Fold the common case of a conditional branch with a comparison
|
|
|
|
// in the same block (values defined on other blocks may not have
|
|
|
|
// initialized registers).
|
2010-04-15 09:51:59 +08:00
|
|
|
if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
|
2010-08-21 10:32:36 +08:00
|
|
|
if (CI->hasOneUse() && CI->getParent() == I->getParent()) {
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT VT = TLI.getValueType(CI->getOperand(0)->getType());
|
2008-10-03 06:15:21 +08:00
|
|
|
|
|
|
|
// Try to take advantage of fallthrough opportunities.
|
|
|
|
CmpInst::Predicate Predicate = CI->getPredicate();
|
2010-07-10 17:00:22 +08:00
|
|
|
if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
|
2008-10-03 06:15:21 +08:00
|
|
|
std::swap(TrueMBB, FalseMBB);
|
|
|
|
Predicate = CmpInst::getInversePredicate(Predicate);
|
|
|
|
}
|
|
|
|
|
2008-10-15 11:58:05 +08:00
|
|
|
bool SwapArgs; // false -> compare Op0, Op1. true -> compare Op1, Op0.
|
|
|
|
unsigned BranchOpc; // Opcode to jump on, e.g. "X86::JA"
|
|
|
|
|
2008-10-03 06:15:21 +08:00
|
|
|
switch (Predicate) {
|
2008-10-22 02:24:51 +08:00
|
|
|
case CmpInst::FCMP_OEQ:
|
|
|
|
std::swap(TrueMBB, FalseMBB);
|
|
|
|
Predicate = CmpInst::FCMP_UNE;
|
|
|
|
// FALL THROUGH
|
2010-02-12 03:25:55 +08:00
|
|
|
case CmpInst::FCMP_UNE: SwapArgs = false; BranchOpc = X86::JNE_4; break;
|
|
|
|
case CmpInst::FCMP_OGT: SwapArgs = false; BranchOpc = X86::JA_4; break;
|
|
|
|
case CmpInst::FCMP_OGE: SwapArgs = false; BranchOpc = X86::JAE_4; break;
|
|
|
|
case CmpInst::FCMP_OLT: SwapArgs = true; BranchOpc = X86::JA_4; break;
|
|
|
|
case CmpInst::FCMP_OLE: SwapArgs = true; BranchOpc = X86::JAE_4; break;
|
|
|
|
case CmpInst::FCMP_ONE: SwapArgs = false; BranchOpc = X86::JNE_4; break;
|
|
|
|
case CmpInst::FCMP_ORD: SwapArgs = false; BranchOpc = X86::JNP_4; break;
|
|
|
|
case CmpInst::FCMP_UNO: SwapArgs = false; BranchOpc = X86::JP_4; break;
|
|
|
|
case CmpInst::FCMP_UEQ: SwapArgs = false; BranchOpc = X86::JE_4; break;
|
|
|
|
case CmpInst::FCMP_UGT: SwapArgs = true; BranchOpc = X86::JB_4; break;
|
|
|
|
case CmpInst::FCMP_UGE: SwapArgs = true; BranchOpc = X86::JBE_4; break;
|
|
|
|
case CmpInst::FCMP_ULT: SwapArgs = false; BranchOpc = X86::JB_4; break;
|
|
|
|
case CmpInst::FCMP_ULE: SwapArgs = false; BranchOpc = X86::JBE_4; break;
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2010-02-12 03:25:55 +08:00
|
|
|
case CmpInst::ICMP_EQ: SwapArgs = false; BranchOpc = X86::JE_4; break;
|
|
|
|
case CmpInst::ICMP_NE: SwapArgs = false; BranchOpc = X86::JNE_4; break;
|
|
|
|
case CmpInst::ICMP_UGT: SwapArgs = false; BranchOpc = X86::JA_4; break;
|
|
|
|
case CmpInst::ICMP_UGE: SwapArgs = false; BranchOpc = X86::JAE_4; break;
|
|
|
|
case CmpInst::ICMP_ULT: SwapArgs = false; BranchOpc = X86::JB_4; break;
|
|
|
|
case CmpInst::ICMP_ULE: SwapArgs = false; BranchOpc = X86::JBE_4; break;
|
|
|
|
case CmpInst::ICMP_SGT: SwapArgs = false; BranchOpc = X86::JG_4; break;
|
|
|
|
case CmpInst::ICMP_SGE: SwapArgs = false; BranchOpc = X86::JGE_4; break;
|
|
|
|
case CmpInst::ICMP_SLT: SwapArgs = false; BranchOpc = X86::JL_4; break;
|
|
|
|
case CmpInst::ICMP_SLE: SwapArgs = false; BranchOpc = X86::JLE_4; break;
|
2008-10-03 06:15:21 +08:00
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2010-04-15 09:51:59 +08:00
|
|
|
const Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
|
2008-10-15 11:58:05 +08:00
|
|
|
if (SwapArgs)
|
2008-10-15 12:02:26 +08:00
|
|
|
std::swap(Op0, Op1);
|
|
|
|
|
2008-10-15 12:26:38 +08:00
|
|
|
// Emit a compare of the LHS and RHS, setting the flags.
|
|
|
|
if (!X86FastEmitCompare(Op0, Op1, VT))
|
|
|
|
return false;
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2010-07-10 17:00:22 +08:00
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BranchOpc))
|
|
|
|
.addMBB(TrueMBB);
|
2008-10-22 02:24:51 +08:00
|
|
|
|
|
|
|
if (Predicate == CmpInst::FCMP_UNE) {
|
|
|
|
// X86 requires a second branch to handle UNE (and OEQ,
|
|
|
|
// which is mapped to UNE above).
|
2010-07-10 17:00:22 +08:00
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::JP_4))
|
|
|
|
.addMBB(TrueMBB);
|
2008-10-22 02:24:51 +08:00
|
|
|
}
|
|
|
|
|
2010-06-18 06:43:56 +08:00
|
|
|
FastEmitBranch(FalseMBB, DL);
|
2010-07-10 17:00:22 +08:00
|
|
|
FuncInfo.MBB->addSuccessor(TrueMBB);
|
2008-10-03 06:15:21 +08:00
|
|
|
return true;
|
|
|
|
}
|
2011-04-19 12:22:17 +08:00
|
|
|
} else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) {
|
|
|
|
// Handle things like "%cond = trunc i32 %X to i1 / br i1 %cond", which
|
|
|
|
// typically happen for _Bool and C++ bools.
|
|
|
|
MVT SourceVT;
|
|
|
|
if (TI->hasOneUse() && TI->getParent() == I->getParent() &&
|
|
|
|
isTypeLegal(TI->getOperand(0)->getType(), SourceVT)) {
|
|
|
|
unsigned TestOpc = 0;
|
|
|
|
switch (SourceVT.SimpleTy) {
|
|
|
|
default: break;
|
|
|
|
case MVT::i8: TestOpc = X86::TEST8ri; break;
|
|
|
|
case MVT::i16: TestOpc = X86::TEST16ri; break;
|
|
|
|
case MVT::i32: TestOpc = X86::TEST32ri; break;
|
|
|
|
case MVT::i64: TestOpc = X86::TEST64ri32; break;
|
|
|
|
}
|
|
|
|
if (TestOpc) {
|
|
|
|
unsigned OpReg = getRegForValue(TI->getOperand(0));
|
|
|
|
if (OpReg == 0) return false;
|
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TestOpc))
|
|
|
|
.addReg(OpReg).addImm(1);
|
2011-06-09 07:55:35 +08:00
|
|
|
|
2011-04-19 12:26:32 +08:00
|
|
|
unsigned JmpOpc = X86::JNE_4;
|
|
|
|
if (FuncInfo.MBB->isLayoutSuccessor(TrueMBB)) {
|
|
|
|
std::swap(TrueMBB, FalseMBB);
|
|
|
|
JmpOpc = X86::JE_4;
|
|
|
|
}
|
2011-06-09 07:55:35 +08:00
|
|
|
|
2011-04-19 12:26:32 +08:00
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(JmpOpc))
|
2011-04-19 12:22:17 +08:00
|
|
|
.addMBB(TrueMBB);
|
|
|
|
FastEmitBranch(FalseMBB, DL);
|
|
|
|
FuncInfo.MBB->addSuccessor(TrueMBB);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
2008-10-03 06:15:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise do a clumsy setcc and re-test it.
|
2011-04-27 09:34:27 +08:00
|
|
|
// Note that i1 essentially gets ANY_EXTEND'ed to i8 where it isn't used
|
|
|
|
// in an explicit cast, so make sure to handle that correctly.
|
2008-10-03 06:15:21 +08:00
|
|
|
unsigned OpReg = getRegForValue(BI->getCondition());
|
|
|
|
if (OpReg == 0) return false;
|
|
|
|
|
2011-04-27 09:34:27 +08:00
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TEST8ri))
|
|
|
|
.addReg(OpReg).addImm(1);
|
2010-07-10 17:00:22 +08:00
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::JNE_4))
|
|
|
|
.addMBB(TrueMBB);
|
2010-06-18 06:43:56 +08:00
|
|
|
FastEmitBranch(FalseMBB, DL);
|
2010-07-10 17:00:22 +08:00
|
|
|
FuncInfo.MBB->addSuccessor(TrueMBB);
|
2008-09-05 09:06:14 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-04-15 09:51:59 +08:00
|
|
|
bool X86FastISel::X86SelectShift(const Instruction *I) {
|
2011-04-18 04:23:29 +08:00
|
|
|
unsigned CReg = 0, OpReg = 0;
|
2008-09-06 02:30:08 +08:00
|
|
|
const TargetRegisterClass *RC = NULL;
|
2010-02-16 00:12:20 +08:00
|
|
|
if (I->getType()->isIntegerTy(8)) {
|
2008-09-06 02:30:08 +08:00
|
|
|
CReg = X86::CL;
|
|
|
|
RC = &X86::GR8RegClass;
|
|
|
|
switch (I->getOpcode()) {
|
2011-04-18 04:23:29 +08:00
|
|
|
case Instruction::LShr: OpReg = X86::SHR8rCL; break;
|
|
|
|
case Instruction::AShr: OpReg = X86::SAR8rCL; break;
|
|
|
|
case Instruction::Shl: OpReg = X86::SHL8rCL; break;
|
2008-09-06 02:30:08 +08:00
|
|
|
default: return false;
|
|
|
|
}
|
2010-02-16 00:12:20 +08:00
|
|
|
} else if (I->getType()->isIntegerTy(16)) {
|
2008-09-06 02:30:08 +08:00
|
|
|
CReg = X86::CX;
|
|
|
|
RC = &X86::GR16RegClass;
|
|
|
|
switch (I->getOpcode()) {
|
2011-04-18 04:23:29 +08:00
|
|
|
case Instruction::LShr: OpReg = X86::SHR16rCL; break;
|
|
|
|
case Instruction::AShr: OpReg = X86::SAR16rCL; break;
|
|
|
|
case Instruction::Shl: OpReg = X86::SHL16rCL; break;
|
2008-09-06 02:30:08 +08:00
|
|
|
default: return false;
|
|
|
|
}
|
2010-02-16 00:12:20 +08:00
|
|
|
} else if (I->getType()->isIntegerTy(32)) {
|
2008-09-06 02:30:08 +08:00
|
|
|
CReg = X86::ECX;
|
|
|
|
RC = &X86::GR32RegClass;
|
|
|
|
switch (I->getOpcode()) {
|
2011-04-18 04:23:29 +08:00
|
|
|
case Instruction::LShr: OpReg = X86::SHR32rCL; break;
|
|
|
|
case Instruction::AShr: OpReg = X86::SAR32rCL; break;
|
|
|
|
case Instruction::Shl: OpReg = X86::SHL32rCL; break;
|
2008-09-06 02:30:08 +08:00
|
|
|
default: return false;
|
|
|
|
}
|
2010-02-16 00:12:20 +08:00
|
|
|
} else if (I->getType()->isIntegerTy(64)) {
|
2008-09-06 02:30:08 +08:00
|
|
|
CReg = X86::RCX;
|
|
|
|
RC = &X86::GR64RegClass;
|
|
|
|
switch (I->getOpcode()) {
|
2011-04-18 04:23:29 +08:00
|
|
|
case Instruction::LShr: OpReg = X86::SHR64rCL; break;
|
|
|
|
case Instruction::AShr: OpReg = X86::SAR64rCL; break;
|
|
|
|
case Instruction::Shl: OpReg = X86::SHL64rCL; break;
|
2008-09-06 02:30:08 +08:00
|
|
|
default: return false;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-11-03 19:35:31 +08:00
|
|
|
MVT VT;
|
|
|
|
if (!isTypeLegal(I->getType(), VT))
|
2008-09-06 05:27:34 +08:00
|
|
|
return false;
|
|
|
|
|
2008-09-06 02:30:08 +08:00
|
|
|
unsigned Op0Reg = getRegForValue(I->getOperand(0));
|
|
|
|
if (Op0Reg == 0) return false;
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2008-09-06 02:30:08 +08:00
|
|
|
unsigned Op1Reg = getRegForValue(I->getOperand(1));
|
|
|
|
if (Op1Reg == 0) return false;
|
2010-07-11 11:31:00 +08:00
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
|
|
|
|
CReg).addReg(Op1Reg);
|
2008-10-08 05:50:36 +08:00
|
|
|
|
|
|
|
// The shift instruction uses X86::CL. If we defined a super-register
|
2010-07-09 00:40:22 +08:00
|
|
|
// of X86::CL, emit a subreg KILL to precisely describe what we're doing here.
|
2008-10-08 05:50:36 +08:00
|
|
|
if (CReg != X86::CL)
|
2010-07-10 17:00:22 +08:00
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
|
|
|
TII.get(TargetOpcode::KILL), X86::CL)
|
2010-07-09 00:40:22 +08:00
|
|
|
.addReg(CReg, RegState::Kill);
|
2008-10-08 05:50:36 +08:00
|
|
|
|
2008-09-06 02:30:08 +08:00
|
|
|
unsigned ResultReg = createResultReg(RC);
|
2010-07-10 17:00:22 +08:00
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpReg), ResultReg)
|
|
|
|
.addReg(Op0Reg);
|
2008-09-06 02:30:08 +08:00
|
|
|
UpdateValueMap(I, ResultReg);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-04-18 04:10:13 +08:00
|
|
|
bool X86FastISel::X86SelectDivRem(const Instruction *I) {
|
|
|
|
const static unsigned NumTypes = 4; // i8, i16, i32, i64
|
|
|
|
const static unsigned NumOps = 4; // SDiv, SRem, UDiv, URem
|
|
|
|
const static bool S = true; // IsSigned
|
|
|
|
const static bool U = false; // !IsSigned
|
|
|
|
const static unsigned Copy = TargetOpcode::COPY;
|
|
|
|
// For the X86 DIV/IDIV instruction, in most cases the dividend
|
|
|
|
// (numerator) must be in a specific register pair highreg:lowreg,
|
|
|
|
// producing the quotient in lowreg and the remainder in highreg.
|
|
|
|
// For most data types, to set up the instruction, the dividend is
|
|
|
|
// copied into lowreg, and lowreg is sign-extended or zero-extended
|
|
|
|
// into highreg. The exception is i8, where the dividend is defined
|
|
|
|
// as a single register rather than a register pair, and we
|
|
|
|
// therefore directly sign-extend or zero-extend the dividend into
|
|
|
|
// lowreg, instead of copying, and ignore the highreg.
|
|
|
|
const static struct DivRemEntry {
|
|
|
|
// The following portion depends only on the data type.
|
|
|
|
const TargetRegisterClass *RC;
|
|
|
|
unsigned LowInReg; // low part of the register pair
|
|
|
|
unsigned HighInReg; // high part of the register pair
|
|
|
|
// The following portion depends on both the data type and the operation.
|
|
|
|
struct DivRemResult {
|
|
|
|
unsigned OpDivRem; // The specific DIV/IDIV opcode to use.
|
|
|
|
unsigned OpSignExtend; // Opcode for sign-extending lowreg into
|
|
|
|
// highreg, or copying a zero into highreg.
|
|
|
|
unsigned OpCopy; // Opcode for copying dividend into lowreg, or
|
|
|
|
// zero/sign-extending into lowreg for i8.
|
|
|
|
unsigned DivRemResultReg; // Register containing the desired result.
|
|
|
|
bool IsOpSigned; // Whether to use signed or unsigned form.
|
|
|
|
} ResultTable[NumOps];
|
|
|
|
} OpTable[NumTypes] = {
|
|
|
|
{ &X86::GR8RegClass, X86::AX, 0, {
|
|
|
|
{ X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AL, S }, // SDiv
|
|
|
|
{ X86::IDIV8r, 0, X86::MOVSX16rr8, X86::AH, S }, // SRem
|
|
|
|
{ X86::DIV8r, 0, X86::MOVZX16rr8, X86::AL, U }, // UDiv
|
|
|
|
{ X86::DIV8r, 0, X86::MOVZX16rr8, X86::AH, U }, // URem
|
|
|
|
}
|
|
|
|
}, // i8
|
|
|
|
{ &X86::GR16RegClass, X86::AX, X86::DX, {
|
|
|
|
{ X86::IDIV16r, X86::CWD, Copy, X86::AX, S }, // SDiv
|
|
|
|
{ X86::IDIV16r, X86::CWD, Copy, X86::DX, S }, // SRem
|
|
|
|
{ X86::DIV16r, X86::MOV16r0, Copy, X86::AX, U }, // UDiv
|
|
|
|
{ X86::DIV16r, X86::MOV16r0, Copy, X86::DX, U }, // URem
|
|
|
|
}
|
|
|
|
}, // i16
|
|
|
|
{ &X86::GR32RegClass, X86::EAX, X86::EDX, {
|
|
|
|
{ X86::IDIV32r, X86::CDQ, Copy, X86::EAX, S }, // SDiv
|
|
|
|
{ X86::IDIV32r, X86::CDQ, Copy, X86::EDX, S }, // SRem
|
|
|
|
{ X86::DIV32r, X86::MOV32r0, Copy, X86::EAX, U }, // UDiv
|
|
|
|
{ X86::DIV32r, X86::MOV32r0, Copy, X86::EDX, U }, // URem
|
|
|
|
}
|
|
|
|
}, // i32
|
|
|
|
{ &X86::GR64RegClass, X86::RAX, X86::RDX, {
|
|
|
|
{ X86::IDIV64r, X86::CQO, Copy, X86::RAX, S }, // SDiv
|
|
|
|
{ X86::IDIV64r, X86::CQO, Copy, X86::RDX, S }, // SRem
|
|
|
|
{ X86::DIV64r, X86::MOV64r0, Copy, X86::RAX, U }, // UDiv
|
|
|
|
{ X86::DIV64r, X86::MOV64r0, Copy, X86::RDX, U }, // URem
|
|
|
|
}
|
|
|
|
}, // i64
|
|
|
|
};
|
|
|
|
|
|
|
|
MVT VT;
|
|
|
|
if (!isTypeLegal(I->getType(), VT))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned TypeIndex, OpIndex;
|
|
|
|
switch (VT.SimpleTy) {
|
|
|
|
default: return false;
|
|
|
|
case MVT::i8: TypeIndex = 0; break;
|
|
|
|
case MVT::i16: TypeIndex = 1; break;
|
|
|
|
case MVT::i32: TypeIndex = 2; break;
|
|
|
|
case MVT::i64: TypeIndex = 3;
|
|
|
|
if (!Subtarget->is64Bit())
|
|
|
|
return false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (I->getOpcode()) {
|
|
|
|
default: llvm_unreachable("Unexpected div/rem opcode");
|
|
|
|
case Instruction::SDiv: OpIndex = 0; break;
|
|
|
|
case Instruction::SRem: OpIndex = 1; break;
|
|
|
|
case Instruction::UDiv: OpIndex = 2; break;
|
|
|
|
case Instruction::URem: OpIndex = 3; break;
|
|
|
|
}
|
|
|
|
|
|
|
|
const DivRemEntry &TypeEntry = OpTable[TypeIndex];
|
|
|
|
const DivRemEntry::DivRemResult &OpEntry = TypeEntry.ResultTable[OpIndex];
|
|
|
|
unsigned Op0Reg = getRegForValue(I->getOperand(0));
|
|
|
|
if (Op0Reg == 0)
|
|
|
|
return false;
|
|
|
|
unsigned Op1Reg = getRegForValue(I->getOperand(1));
|
|
|
|
if (Op1Reg == 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Move op0 into low-order input register.
|
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
|
|
|
TII.get(OpEntry.OpCopy), TypeEntry.LowInReg).addReg(Op0Reg);
|
|
|
|
// Zero-extend or sign-extend into high-order input register.
|
|
|
|
if (OpEntry.OpSignExtend) {
|
|
|
|
if (OpEntry.IsOpSigned)
|
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
|
|
|
TII.get(OpEntry.OpSignExtend));
|
|
|
|
else
|
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
|
|
|
TII.get(OpEntry.OpSignExtend), TypeEntry.HighInReg);
|
|
|
|
}
|
|
|
|
// Generate the DIV/IDIV instruction.
|
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
|
|
|
TII.get(OpEntry.OpDivRem)).addReg(Op1Reg);
|
|
|
|
// Copy output register into result register.
|
|
|
|
unsigned ResultReg = createResultReg(TypeEntry.RC);
|
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
|
|
|
TII.get(Copy), ResultReg).addReg(OpEntry.DivRemResultReg);
|
|
|
|
UpdateValueMap(I, ResultReg);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-04-15 09:51:59 +08:00
|
|
|
bool X86FastISel::X86SelectSelect(const Instruction *I) {
|
2010-11-03 19:35:31 +08:00
|
|
|
MVT VT;
|
|
|
|
if (!isTypeLegal(I->getType(), VT))
|
2008-10-15 13:07:36 +08:00
|
|
|
return false;
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2010-09-30 07:00:29 +08:00
|
|
|
// We only use cmov here, if we don't have a cmov instruction bail.
|
|
|
|
if (!Subtarget->hasCMov()) return false;
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2008-09-06 02:30:08 +08:00
|
|
|
unsigned Opc = 0;
|
|
|
|
const TargetRegisterClass *RC = NULL;
|
2010-11-03 19:35:31 +08:00
|
|
|
if (VT == MVT::i16) {
|
2008-09-06 05:13:04 +08:00
|
|
|
Opc = X86::CMOVE16rr;
|
2008-09-06 02:30:08 +08:00
|
|
|
RC = &X86::GR16RegClass;
|
2010-11-03 19:35:31 +08:00
|
|
|
} else if (VT == MVT::i32) {
|
2008-09-06 05:13:04 +08:00
|
|
|
Opc = X86::CMOVE32rr;
|
2008-09-06 02:30:08 +08:00
|
|
|
RC = &X86::GR32RegClass;
|
2010-11-03 19:35:31 +08:00
|
|
|
} else if (VT == MVT::i64) {
|
2008-09-06 05:13:04 +08:00
|
|
|
Opc = X86::CMOVE64rr;
|
2008-09-06 02:30:08 +08:00
|
|
|
RC = &X86::GR64RegClass;
|
|
|
|
} else {
|
2010-11-23 11:31:01 +08:00
|
|
|
return false;
|
2008-09-06 02:30:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned Op0Reg = getRegForValue(I->getOperand(0));
|
|
|
|
if (Op0Reg == 0) return false;
|
|
|
|
unsigned Op1Reg = getRegForValue(I->getOperand(1));
|
|
|
|
if (Op1Reg == 0) return false;
|
|
|
|
unsigned Op2Reg = getRegForValue(I->getOperand(2));
|
|
|
|
if (Op2Reg == 0) return false;
|
|
|
|
|
2010-07-10 17:00:22 +08:00
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TEST8rr))
|
|
|
|
.addReg(Op0Reg).addReg(Op0Reg);
|
2008-09-06 02:30:08 +08:00
|
|
|
unsigned ResultReg = createResultReg(RC);
|
2010-07-10 17:00:22 +08:00
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg)
|
|
|
|
.addReg(Op1Reg).addReg(Op2Reg);
|
2008-09-06 02:30:08 +08:00
|
|
|
UpdateValueMap(I, ResultReg);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-04-15 09:51:59 +08:00
|
|
|
bool X86FastISel::X86SelectFPExt(const Instruction *I) {
|
2008-10-15 13:07:36 +08:00
|
|
|
// fpext from float to double.
|
2011-09-03 08:46:42 +08:00
|
|
|
if (X86ScalarSSEf64 &&
|
2009-10-05 13:54:46 +08:00
|
|
|
I->getType()->isDoubleTy()) {
|
2010-04-15 09:51:59 +08:00
|
|
|
const Value *V = I->getOperand(0);
|
2009-10-05 13:54:46 +08:00
|
|
|
if (V->getType()->isFloatTy()) {
|
2008-10-15 13:07:36 +08:00
|
|
|
unsigned OpReg = getRegForValue(V);
|
|
|
|
if (OpReg == 0) return false;
|
2012-04-20 14:31:50 +08:00
|
|
|
unsigned ResultReg = createResultReg(&X86::FR64RegClass);
|
2010-07-10 17:00:22 +08:00
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
|
|
|
TII.get(X86::CVTSS2SDrr), ResultReg)
|
|
|
|
.addReg(OpReg);
|
2008-10-15 13:07:36 +08:00
|
|
|
UpdateValueMap(I, ResultReg);
|
|
|
|
return true;
|
2008-09-11 05:02:08 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-04-15 09:51:59 +08:00
|
|
|
bool X86FastISel::X86SelectFPTrunc(const Instruction *I) {
|
2011-09-03 08:46:42 +08:00
|
|
|
if (X86ScalarSSEf64) {
|
2009-10-05 13:54:46 +08:00
|
|
|
if (I->getType()->isFloatTy()) {
|
2010-04-15 09:51:59 +08:00
|
|
|
const Value *V = I->getOperand(0);
|
2009-10-05 13:54:46 +08:00
|
|
|
if (V->getType()->isDoubleTy()) {
|
2008-09-11 05:02:08 +08:00
|
|
|
unsigned OpReg = getRegForValue(V);
|
|
|
|
if (OpReg == 0) return false;
|
2012-04-20 14:31:50 +08:00
|
|
|
unsigned ResultReg = createResultReg(&X86::FR32RegClass);
|
2010-07-10 17:00:22 +08:00
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
|
|
|
TII.get(X86::CVTSD2SSrr), ResultReg)
|
|
|
|
.addReg(OpReg);
|
2008-09-11 05:02:08 +08:00
|
|
|
UpdateValueMap(I, ResultReg);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-04-15 09:51:59 +08:00
|
|
|
bool X86FastISel::X86SelectTrunc(const Instruction *I) {
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
|
|
|
|
EVT DstVT = TLI.getValueType(I->getType());
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2011-05-26 07:49:02 +08:00
|
|
|
// This code only handles truncation to byte.
|
2009-08-12 04:47:22 +08:00
|
|
|
if (DstVT != MVT::i8 && DstVT != MVT::i1)
|
2008-09-07 16:47:42 +08:00
|
|
|
return false;
|
2011-05-26 07:49:02 +08:00
|
|
|
if (!TLI.isTypeLegal(SrcVT))
|
2008-09-07 16:47:42 +08:00
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned InputReg = getRegForValue(I->getOperand(0));
|
|
|
|
if (!InputReg)
|
|
|
|
// Unhandled operand. Halt "fast" selection and bail.
|
|
|
|
return false;
|
|
|
|
|
2011-05-26 07:49:02 +08:00
|
|
|
if (SrcVT == MVT::i8) {
|
|
|
|
// Truncate from i8 to i1; no code needed.
|
|
|
|
UpdateValueMap(I, InputReg);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!Subtarget->is64Bit()) {
|
|
|
|
// If we're on x86-32; we can't extract an i8 from a general register.
|
|
|
|
// First issue a copy to GR16_ABCD or GR32_ABCD.
|
2012-04-20 14:31:50 +08:00
|
|
|
const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16) ?
|
|
|
|
(const TargetRegisterClass*)&X86::GR16_ABCDRegClass :
|
|
|
|
(const TargetRegisterClass*)&X86::GR32_ABCDRegClass;
|
2011-05-26 07:49:02 +08:00
|
|
|
unsigned CopyReg = createResultReg(CopyRC);
|
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
|
|
|
|
CopyReg).addReg(InputReg);
|
|
|
|
InputReg = CopyReg;
|
|
|
|
}
|
2008-09-07 16:47:42 +08:00
|
|
|
|
2011-05-26 07:49:02 +08:00
|
|
|
// Issue an extract_subreg.
|
2009-08-12 04:47:22 +08:00
|
|
|
unsigned ResultReg = FastEmitInst_extractsubreg(MVT::i8,
|
2011-05-26 07:49:02 +08:00
|
|
|
InputReg, /*Kill=*/true,
|
2010-05-24 22:48:17 +08:00
|
|
|
X86::sub_8bit);
|
2008-09-07 16:47:42 +08:00
|
|
|
if (!ResultReg)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
UpdateValueMap(I, ResultReg);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2011-05-21 06:21:04 +08:00
|
|
|
bool X86FastISel::IsMemcpySmall(uint64_t Len) {
|
|
|
|
return Len <= (Subtarget->is64Bit() ? 32 : 16);
|
|
|
|
}
|
|
|
|
|
2011-04-27 09:45:07 +08:00
|
|
|
bool X86FastISel::TryEmitSmallMemcpy(X86AddressMode DestAM,
|
|
|
|
X86AddressMode SrcAM, uint64_t Len) {
|
2011-05-21 06:21:04 +08:00
|
|
|
|
2011-04-27 09:45:07 +08:00
|
|
|
// Make sure we don't bloat code by inlining very large memcpy's.
|
2011-05-21 06:21:04 +08:00
|
|
|
if (!IsMemcpySmall(Len))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
bool i64Legal = Subtarget->is64Bit();
|
2011-04-27 09:45:07 +08:00
|
|
|
|
|
|
|
// We don't care about alignment here since we just emit integer accesses.
|
|
|
|
while (Len) {
|
|
|
|
MVT VT;
|
|
|
|
if (Len >= 8 && i64Legal)
|
|
|
|
VT = MVT::i64;
|
|
|
|
else if (Len >= 4)
|
|
|
|
VT = MVT::i32;
|
|
|
|
else if (Len >= 2)
|
|
|
|
VT = MVT::i16;
|
|
|
|
else {
|
|
|
|
VT = MVT::i8;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned Reg;
|
|
|
|
bool RV = X86FastEmitLoad(VT, SrcAM, Reg);
|
|
|
|
RV &= X86FastEmitStore(VT, Reg, DestAM);
|
|
|
|
assert(RV && "Failed to emit load or store??");
|
|
|
|
|
|
|
|
unsigned Size = VT.getSizeInBits()/8;
|
|
|
|
Len -= Size;
|
|
|
|
DestAM.Disp += Size;
|
|
|
|
SrcAM.Disp += Size;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-04-15 09:51:59 +08:00
|
|
|
bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
|
2008-12-09 10:42:50 +08:00
|
|
|
// FIXME: Handle more intrinsics.
|
2009-04-12 15:36:01 +08:00
|
|
|
switch (I.getIntrinsicID()) {
|
2008-12-09 10:42:50 +08:00
|
|
|
default: return false;
|
2011-04-19 13:52:03 +08:00
|
|
|
case Intrinsic::memcpy: {
|
|
|
|
const MemCpyInst &MCI = cast<MemCpyInst>(I);
|
|
|
|
// Don't handle volatile or variable length memcpys.
|
2011-06-11 07:39:36 +08:00
|
|
|
if (MCI.isVolatile())
|
2011-04-19 13:52:03 +08:00
|
|
|
return false;
|
2011-04-27 09:45:07 +08:00
|
|
|
|
2011-06-11 07:39:36 +08:00
|
|
|
if (isa<ConstantInt>(MCI.getLength())) {
|
|
|
|
// Small memcpy's are common enough that we want to do them
|
|
|
|
// without a call if possible.
|
|
|
|
uint64_t Len = cast<ConstantInt>(MCI.getLength())->getZExtValue();
|
|
|
|
if (IsMemcpySmall(Len)) {
|
|
|
|
X86AddressMode DestAM, SrcAM;
|
|
|
|
if (!X86SelectAddress(MCI.getRawDest(), DestAM) ||
|
|
|
|
!X86SelectAddress(MCI.getRawSource(), SrcAM))
|
|
|
|
return false;
|
|
|
|
TryEmitSmallMemcpy(DestAM, SrcAM, Len);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
|
|
|
|
if (!MCI.getLength()->getType()->isIntegerTy(SizeWidth))
|
|
|
|
return false;
|
2011-06-09 07:55:35 +08:00
|
|
|
|
2011-06-11 07:39:36 +08:00
|
|
|
if (MCI.getSourceAddressSpace() > 255 || MCI.getDestAddressSpace() > 255)
|
2011-04-19 13:52:03 +08:00
|
|
|
return false;
|
2011-04-27 09:45:07 +08:00
|
|
|
|
2011-06-11 07:39:36 +08:00
|
|
|
return DoSelectCall(&I, "memcpy");
|
2011-04-19 13:52:03 +08:00
|
|
|
}
|
2011-06-11 07:39:36 +08:00
|
|
|
case Intrinsic::memset: {
|
|
|
|
const MemSetInst &MSI = cast<MemSetInst>(I);
|
|
|
|
|
2011-08-02 08:40:16 +08:00
|
|
|
if (MSI.isVolatile())
|
|
|
|
return false;
|
|
|
|
|
2011-06-11 07:39:36 +08:00
|
|
|
unsigned SizeWidth = Subtarget->is64Bit() ? 64 : 32;
|
|
|
|
if (!MSI.getLength()->getType()->isIntegerTy(SizeWidth))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (MSI.getDestAddressSpace() > 255)
|
|
|
|
return false;
|
2011-06-09 07:55:35 +08:00
|
|
|
|
2011-06-11 07:39:36 +08:00
|
|
|
return DoSelectCall(&I, "memset");
|
|
|
|
}
|
2010-03-19 04:27:26 +08:00
|
|
|
case Intrinsic::stackprotector: {
|
2012-05-12 03:43:29 +08:00
|
|
|
// Emit code to store the stack guard onto the stack.
|
2010-03-19 04:27:26 +08:00
|
|
|
EVT PtrTy = TLI.getPointerTy();
|
|
|
|
|
2010-06-26 19:51:52 +08:00
|
|
|
const Value *Op1 = I.getArgOperand(0); // The guard's value.
|
|
|
|
const AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
|
2010-03-19 04:27:26 +08:00
|
|
|
|
|
|
|
// Grab the frame index.
|
|
|
|
X86AddressMode AM;
|
|
|
|
if (!X86SelectAddress(Slot, AM)) return false;
|
2010-03-19 05:58:33 +08:00
|
|
|
if (!X86FastEmitStore(PtrTy, Op1, AM)) return false;
|
2010-03-19 04:27:26 +08:00
|
|
|
return true;
|
|
|
|
}
|
2010-01-26 08:09:58 +08:00
|
|
|
case Intrinsic::dbg_declare: {
|
2010-04-15 09:51:59 +08:00
|
|
|
const DbgDeclareInst *DI = cast<DbgDeclareInst>(&I);
|
2010-01-26 08:09:58 +08:00
|
|
|
X86AddressMode AM;
|
2010-01-30 05:21:28 +08:00
|
|
|
assert(DI->getAddress() && "Null address should be checked earlier!");
|
2010-01-26 08:09:58 +08:00
|
|
|
if (!X86SelectAddress(DI->getAddress(), AM))
|
|
|
|
return false;
|
2011-06-29 03:10:37 +08:00
|
|
|
const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
|
2010-02-19 02:51:15 +08:00
|
|
|
// FIXME may need to add RegState::Debug to any registers produced,
|
|
|
|
// although ESP/EBP should be the only ones at the moment.
|
2010-07-10 17:00:22 +08:00
|
|
|
addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II), AM).
|
|
|
|
addImm(0).addMetadata(DI->getVariable());
|
2010-01-26 08:09:58 +08:00
|
|
|
return true;
|
|
|
|
}
|
2010-01-19 06:11:29 +08:00
|
|
|
case Intrinsic::trap: {
|
2010-07-10 17:00:22 +08:00
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::TRAP));
|
2010-01-19 06:11:29 +08:00
|
|
|
return true;
|
|
|
|
}
|
2008-12-09 10:42:50 +08:00
|
|
|
case Intrinsic::sadd_with_overflow:
|
|
|
|
case Intrinsic::uadd_with_overflow: {
|
2011-04-19 13:52:03 +08:00
|
|
|
// FIXME: Should fold immediates.
|
2011-06-09 07:55:35 +08:00
|
|
|
|
2008-12-09 15:55:31 +08:00
|
|
|
// Replace "add with overflow" intrinsics with an "add" instruction followed
|
2011-05-17 05:06:17 +08:00
|
|
|
// by a seto/setc instruction.
|
2008-12-09 10:42:50 +08:00
|
|
|
const Function *Callee = I.getCalledFunction();
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *RetTy =
|
2008-12-09 10:42:50 +08:00
|
|
|
cast<StructType>(Callee->getReturnType())->getTypeAtIndex(unsigned(0));
|
|
|
|
|
2010-11-03 19:35:31 +08:00
|
|
|
MVT VT;
|
2008-12-09 10:42:50 +08:00
|
|
|
if (!isTypeLegal(RetTy, VT))
|
|
|
|
return false;
|
|
|
|
|
2010-06-26 19:51:52 +08:00
|
|
|
const Value *Op1 = I.getArgOperand(0);
|
|
|
|
const Value *Op2 = I.getArgOperand(1);
|
2008-12-09 10:42:50 +08:00
|
|
|
unsigned Reg1 = getRegForValue(Op1);
|
|
|
|
unsigned Reg2 = getRegForValue(Op2);
|
|
|
|
|
|
|
|
if (Reg1 == 0 || Reg2 == 0)
|
|
|
|
// FIXME: Handle values *not* in registers.
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned OpC = 0;
|
2009-08-12 04:47:22 +08:00
|
|
|
if (VT == MVT::i32)
|
2008-12-09 10:42:50 +08:00
|
|
|
OpC = X86::ADD32rr;
|
2009-08-12 04:47:22 +08:00
|
|
|
else if (VT == MVT::i64)
|
2008-12-09 10:42:50 +08:00
|
|
|
OpC = X86::ADD64rr;
|
|
|
|
else
|
|
|
|
return false;
|
|
|
|
|
2011-05-17 05:06:17 +08:00
|
|
|
// The call to CreateRegs builds two sequential registers, to store the
|
2012-07-23 16:51:15 +08:00
|
|
|
// both the returned values.
|
2011-05-17 05:06:17 +08:00
|
|
|
unsigned ResultReg = FuncInfo.CreateRegs(I.getType());
|
2010-07-10 17:00:22 +08:00
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpC), ResultReg)
|
|
|
|
.addReg(Reg1).addReg(Reg2);
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2009-04-12 15:36:01 +08:00
|
|
|
unsigned Opc = X86::SETBr;
|
|
|
|
if (I.getIntrinsicID() == Intrinsic::sadd_with_overflow)
|
|
|
|
Opc = X86::SETOr;
|
2011-05-17 05:06:17 +08:00
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg+1);
|
|
|
|
|
|
|
|
UpdateValueMap(&I, ResultReg, 2);
|
2008-12-09 10:42:50 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-02-26 05:59:35 +08:00
|
|
|
bool X86FastISel::FastLowerArguments() {
|
|
|
|
if (!FuncInfo.CanLowerReturn)
|
|
|
|
return false;
|
|
|
|
|
2013-04-03 00:31:41 +08:00
|
|
|
if (Subtarget->isTargetWin64())
|
2013-03-15 05:25:04 +08:00
|
|
|
return false;
|
|
|
|
|
2013-02-26 05:59:35 +08:00
|
|
|
const Function *F = FuncInfo.Fn;
|
|
|
|
if (F->isVarArg())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
CallingConv::ID CC = F->getCallingConv();
|
|
|
|
if (CC != CallingConv::C)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!Subtarget->is64Bit())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Only handle simple cases. i.e. Up to 6 i32/i64 scalar arguments.
|
|
|
|
unsigned Idx = 1;
|
|
|
|
for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
|
|
|
|
I != E; ++I, ++Idx) {
|
|
|
|
if (Idx > 6)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (F->getAttributes().hasAttribute(Idx, Attribute::ByVal) ||
|
|
|
|
F->getAttributes().hasAttribute(Idx, Attribute::InReg) ||
|
|
|
|
F->getAttributes().hasAttribute(Idx, Attribute::StructRet) ||
|
|
|
|
F->getAttributes().hasAttribute(Idx, Attribute::Nest))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
Type *ArgTy = I->getType();
|
|
|
|
if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
EVT ArgVT = TLI.getValueType(ArgTy);
|
2013-02-26 09:05:31 +08:00
|
|
|
if (!ArgVT.isSimple()) return false;
|
2013-02-26 05:59:35 +08:00
|
|
|
switch (ArgVT.getSimpleVT().SimpleTy) {
|
|
|
|
case MVT::i32:
|
|
|
|
case MVT::i64:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static const uint16_t GPR32ArgRegs[] = {
|
|
|
|
X86::EDI, X86::ESI, X86::EDX, X86::ECX, X86::R8D, X86::R9D
|
|
|
|
};
|
|
|
|
static const uint16_t GPR64ArgRegs[] = {
|
|
|
|
X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8 , X86::R9
|
|
|
|
};
|
|
|
|
|
|
|
|
Idx = 0;
|
|
|
|
const TargetRegisterClass *RC32 = TLI.getRegClassFor(MVT::i32);
|
|
|
|
const TargetRegisterClass *RC64 = TLI.getRegClassFor(MVT::i64);
|
|
|
|
for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
|
|
|
|
I != E; ++I, ++Idx) {
|
|
|
|
if (I->use_empty())
|
|
|
|
continue;
|
|
|
|
bool is32Bit = TLI.getValueType(I->getType()) == MVT::i32;
|
|
|
|
const TargetRegisterClass *RC = is32Bit ? RC32 : RC64;
|
|
|
|
unsigned SrcReg = is32Bit ? GPR32ArgRegs[Idx] : GPR64ArgRegs[Idx];
|
|
|
|
unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
|
|
|
|
// FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
|
|
|
|
// Without this, EmitLiveInCopies may eliminate the livein if its only
|
|
|
|
// use is a bitcast (which isn't turned into an instruction).
|
|
|
|
unsigned ResultReg = createResultReg(RC);
|
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
|
|
|
|
ResultReg).addReg(DstReg, getKillRegState(true));
|
|
|
|
UpdateValueMap(I, ResultReg);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2010-04-15 09:51:59 +08:00
|
|
|
bool X86FastISel::X86SelectCall(const Instruction *I) {
|
|
|
|
const CallInst *CI = cast<CallInst>(I);
|
2010-06-26 19:51:52 +08:00
|
|
|
const Value *Callee = CI->getCalledValue();
|
2008-09-07 17:09:33 +08:00
|
|
|
|
|
|
|
// Can't handle inline asm yet.
|
|
|
|
if (isa<InlineAsm>(Callee))
|
|
|
|
return false;
|
|
|
|
|
2008-12-09 10:42:50 +08:00
|
|
|
// Handle intrinsic calls.
|
2010-04-15 09:51:59 +08:00
|
|
|
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI))
|
2009-04-12 15:36:01 +08:00
|
|
|
return X86VisitIntrinsicCall(*II);
|
2008-09-07 17:09:33 +08:00
|
|
|
|
2012-12-11 08:18:02 +08:00
|
|
|
// Allow SelectionDAG isel to handle tail calls.
|
|
|
|
if (cast<CallInst>(I)->isTailCall())
|
|
|
|
return false;
|
|
|
|
|
2011-06-11 07:39:36 +08:00
|
|
|
return DoSelectCall(I, 0);
|
|
|
|
}
|
|
|
|
|
2012-07-25 23:42:45 +08:00
|
|
|
static unsigned computeBytesPoppedByCallee(const X86Subtarget &Subtarget,
|
|
|
|
const ImmutableCallSite &CS) {
|
2012-07-25 21:35:45 +08:00
|
|
|
if (Subtarget.is64Bit())
|
|
|
|
return 0;
|
|
|
|
if (Subtarget.isTargetWindows())
|
|
|
|
return 0;
|
|
|
|
CallingConv::ID CC = CS.getCallingConv();
|
|
|
|
if (CC == CallingConv::Fast || CC == CallingConv::GHC)
|
|
|
|
return 0;
|
2012-12-19 15:18:57 +08:00
|
|
|
if (!CS.paramHasAttr(1, Attribute::StructRet))
|
2012-07-25 21:35:45 +08:00
|
|
|
return 0;
|
2012-12-19 15:18:57 +08:00
|
|
|
if (CS.paramHasAttr(1, Attribute::InReg))
|
2012-07-25 21:41:10 +08:00
|
|
|
return 0;
|
2012-07-25 21:35:45 +08:00
|
|
|
return 4;
|
|
|
|
}
|
|
|
|
|
2011-06-11 07:39:36 +08:00
|
|
|
// Select either a call, or an llvm.memcpy/memmove/memset intrinsic
|
|
|
|
bool X86FastISel::DoSelectCall(const Instruction *I, const char *MemIntName) {
|
|
|
|
const CallInst *CI = cast<CallInst>(I);
|
|
|
|
const Value *Callee = CI->getCalledValue();
|
|
|
|
|
2008-09-07 17:09:33 +08:00
|
|
|
// Handle only C and fastcc calling conventions for now.
|
2010-04-15 09:51:59 +08:00
|
|
|
ImmutableCallSite CS(CI);
|
2009-09-02 16:44:58 +08:00
|
|
|
CallingConv::ID CC = CS.getCallingConv();
|
2011-04-19 12:42:38 +08:00
|
|
|
if (CC != CallingConv::C && CC != CallingConv::Fast &&
|
2008-09-07 17:09:33 +08:00
|
|
|
CC != CallingConv::X86_FastCall)
|
|
|
|
return false;
|
|
|
|
|
2010-01-27 08:00:57 +08:00
|
|
|
// fastcc with -tailcallopt is intended to provide a guaranteed
|
|
|
|
// tail call optimization. Fastisel doesn't know how to do that.
|
2011-12-03 06:16:29 +08:00
|
|
|
if (CC == CallingConv::Fast && TM.Options.GuaranteedTailCallOpt)
|
2010-01-27 08:00:57 +08:00
|
|
|
return false;
|
|
|
|
|
2011-07-18 12:54:35 +08:00
|
|
|
PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
|
|
|
|
FunctionType *FTy = cast<FunctionType>(PT->getElementType());
|
2011-04-20 01:22:22 +08:00
|
|
|
bool isVarArg = FTy->isVarArg();
|
|
|
|
|
|
|
|
// Don't know how to handle Win64 varargs yet. Nothing special needed for
|
|
|
|
// x86-32. Special handling for x86-64 is implemented.
|
|
|
|
if (isVarArg && Subtarget->isTargetWin64())
|
2008-09-07 17:09:33 +08:00
|
|
|
return false;
|
|
|
|
|
2010-05-28 02:43:40 +08:00
|
|
|
// Fast-isel doesn't know about callee-pop yet.
|
2011-06-24 01:54:54 +08:00
|
|
|
if (X86::isCalleePop(CC, Subtarget->is64Bit(), isVarArg,
|
2011-12-03 06:16:29 +08:00
|
|
|
TM.Options.GuaranteedTailCallOpt))
|
2010-05-28 02:43:40 +08:00
|
|
|
return false;
|
|
|
|
|
2011-05-18 02:29:03 +08:00
|
|
|
// Check whether the function can return without sret-demotion.
|
|
|
|
SmallVector<ISD::OutputArg, 4> Outs;
|
2012-12-30 21:01:51 +08:00
|
|
|
GetReturnInfo(I->getType(), CS.getAttributes(), Outs, TLI);
|
2011-05-18 02:29:03 +08:00
|
|
|
bool CanLowerReturn = TLI.CanLowerReturn(CS.getCallingConv(),
|
2012-07-19 08:11:40 +08:00
|
|
|
*FuncInfo.MF, FTy->isVarArg(),
|
|
|
|
Outs, FTy->getContext());
|
2011-05-18 02:29:03 +08:00
|
|
|
if (!CanLowerReturn)
|
2011-05-17 10:36:59 +08:00
|
|
|
return false;
|
|
|
|
|
2008-09-18 05:18:49 +08:00
|
|
|
// Materialize callee address in a register. FIXME: GV address can be
|
|
|
|
// handled with a CALLpcrel32 instead.
|
2008-09-20 06:16:54 +08:00
|
|
|
X86AddressMode CalleeAM;
|
2009-07-10 13:33:42 +08:00
|
|
|
if (!X86SelectCallAddress(Callee, CalleeAM))
|
2008-09-20 06:16:54 +08:00
|
|
|
return false;
|
2008-09-18 05:18:49 +08:00
|
|
|
unsigned CalleeOp = 0;
|
2010-04-15 09:51:59 +08:00
|
|
|
const GlobalValue *GV = 0;
|
2009-06-27 12:50:14 +08:00
|
|
|
if (CalleeAM.GV != 0) {
|
2008-09-20 06:16:54 +08:00
|
|
|
GV = CalleeAM.GV;
|
2009-06-27 12:50:14 +08:00
|
|
|
} else if (CalleeAM.Base.Reg != 0) {
|
|
|
|
CalleeOp = CalleeAM.Base.Reg;
|
2008-09-20 06:16:54 +08:00
|
|
|
} else
|
|
|
|
return false;
|
2008-09-18 05:18:49 +08:00
|
|
|
|
2008-09-07 17:09:33 +08:00
|
|
|
// Deal with call operands first.
|
2010-04-15 09:51:59 +08:00
|
|
|
SmallVector<const Value *, 8> ArgVals;
|
2008-10-15 13:38:32 +08:00
|
|
|
SmallVector<unsigned, 8> Args;
|
2010-11-03 19:35:31 +08:00
|
|
|
SmallVector<MVT, 8> ArgVTs;
|
2008-10-15 13:38:32 +08:00
|
|
|
SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
|
2012-02-15 08:36:26 +08:00
|
|
|
unsigned arg_size = CS.arg_size();
|
|
|
|
Args.reserve(arg_size);
|
|
|
|
ArgVals.reserve(arg_size);
|
|
|
|
ArgVTs.reserve(arg_size);
|
|
|
|
ArgFlags.reserve(arg_size);
|
2010-04-15 09:51:59 +08:00
|
|
|
for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
|
2008-09-07 17:09:33 +08:00
|
|
|
i != e; ++i) {
|
2011-06-11 07:39:36 +08:00
|
|
|
// If we're lowering a mem intrinsic instead of a regular call, skip the
|
|
|
|
// last two arguments, which should not passed to the underlying functions.
|
|
|
|
if (MemIntName && e-i <= 2)
|
|
|
|
break;
|
2011-04-19 12:42:38 +08:00
|
|
|
Value *ArgVal = *i;
|
2008-09-07 17:09:33 +08:00
|
|
|
ISD::ArgFlagsTy Flags;
|
|
|
|
unsigned AttrInd = i - CS.arg_begin() + 1;
|
2012-12-19 15:18:57 +08:00
|
|
|
if (CS.paramHasAttr(AttrInd, Attribute::SExt))
|
2008-09-07 17:09:33 +08:00
|
|
|
Flags.setSExt();
|
2012-12-19 15:18:57 +08:00
|
|
|
if (CS.paramHasAttr(AttrInd, Attribute::ZExt))
|
2008-09-07 17:09:33 +08:00
|
|
|
Flags.setZExt();
|
|
|
|
|
2012-12-19 15:18:57 +08:00
|
|
|
if (CS.paramHasAttr(AttrInd, Attribute::ByVal)) {
|
2011-07-18 12:54:35 +08:00
|
|
|
PointerType *Ty = cast<PointerType>(ArgVal->getType());
|
|
|
|
Type *ElementTy = Ty->getElementType();
|
2011-05-21 06:21:04 +08:00
|
|
|
unsigned FrameSize = TD.getTypeAllocSize(ElementTy);
|
|
|
|
unsigned FrameAlign = CS.getParamAlignment(AttrInd);
|
|
|
|
if (!FrameAlign)
|
|
|
|
FrameAlign = TLI.getByValTypeAlignment(ElementTy);
|
|
|
|
Flags.setByVal();
|
|
|
|
Flags.setByValSize(FrameSize);
|
|
|
|
Flags.setByValAlign(FrameAlign);
|
|
|
|
if (!IsMemcpySmall(FrameSize))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2012-12-19 15:18:57 +08:00
|
|
|
if (CS.paramHasAttr(AttrInd, Attribute::InReg))
|
2011-05-21 06:21:04 +08:00
|
|
|
Flags.setInReg();
|
2012-12-19 15:18:57 +08:00
|
|
|
if (CS.paramHasAttr(AttrInd, Attribute::Nest))
|
2011-05-21 06:21:04 +08:00
|
|
|
Flags.setNest();
|
|
|
|
|
2011-04-19 12:42:38 +08:00
|
|
|
// If this is an i1/i8/i16 argument, promote to i32 to avoid an extra
|
|
|
|
// instruction. This is safe because it is common to all fastisel supported
|
|
|
|
// calling conventions on x86.
|
|
|
|
if (ConstantInt *CI = dyn_cast<ConstantInt>(ArgVal)) {
|
|
|
|
if (CI->getBitWidth() == 1 || CI->getBitWidth() == 8 ||
|
|
|
|
CI->getBitWidth() == 16) {
|
|
|
|
if (Flags.isSExt())
|
|
|
|
ArgVal = ConstantExpr::getSExt(CI,Type::getInt32Ty(CI->getContext()));
|
|
|
|
else
|
|
|
|
ArgVal = ConstantExpr::getZExt(CI,Type::getInt32Ty(CI->getContext()));
|
|
|
|
}
|
|
|
|
}
|
2011-06-09 07:55:35 +08:00
|
|
|
|
2011-04-19 13:09:50 +08:00
|
|
|
unsigned ArgReg;
|
2011-06-09 07:55:35 +08:00
|
|
|
|
2011-04-19 13:15:59 +08:00
|
|
|
// Passing bools around ends up doing a trunc to i1 and passing it.
|
|
|
|
// Codegen this as an argument + "and 1".
|
2011-04-19 13:09:50 +08:00
|
|
|
if (ArgVal->getType()->isIntegerTy(1) && isa<TruncInst>(ArgVal) &&
|
|
|
|
cast<TruncInst>(ArgVal)->getParent() == I->getParent() &&
|
|
|
|
ArgVal->hasOneUse()) {
|
|
|
|
ArgVal = cast<TruncInst>(ArgVal)->getOperand(0);
|
|
|
|
ArgReg = getRegForValue(ArgVal);
|
|
|
|
if (ArgReg == 0) return false;
|
2011-06-09 07:55:35 +08:00
|
|
|
|
2011-04-19 13:09:50 +08:00
|
|
|
MVT ArgVT;
|
|
|
|
if (!isTypeLegal(ArgVal->getType(), ArgVT)) return false;
|
2011-06-09 07:55:35 +08:00
|
|
|
|
2011-04-19 13:09:50 +08:00
|
|
|
ArgReg = FastEmit_ri(ArgVT, ArgVT, ISD::AND, ArgReg,
|
|
|
|
ArgVal->hasOneUse(), 1);
|
|
|
|
} else {
|
|
|
|
ArgReg = getRegForValue(ArgVal);
|
|
|
|
}
|
2011-04-19 12:42:38 +08:00
|
|
|
|
2011-04-19 13:15:59 +08:00
|
|
|
if (ArgReg == 0) return false;
|
|
|
|
|
2011-07-18 12:54:35 +08:00
|
|
|
Type *ArgTy = ArgVal->getType();
|
2010-11-03 19:35:31 +08:00
|
|
|
MVT ArgVT;
|
2008-10-15 13:07:36 +08:00
|
|
|
if (!isTypeLegal(ArgTy, ArgVT))
|
2008-09-07 17:09:33 +08:00
|
|
|
return false;
|
2011-05-21 06:21:04 +08:00
|
|
|
if (ArgVT == MVT::x86mmx)
|
|
|
|
return false;
|
2008-09-07 17:09:33 +08:00
|
|
|
unsigned OriginalAlignment = TD.getABITypeAlignment(ArgTy);
|
|
|
|
Flags.setOrigAlign(OriginalAlignment);
|
|
|
|
|
2011-04-19 13:09:50 +08:00
|
|
|
Args.push_back(ArgReg);
|
2011-04-19 12:42:38 +08:00
|
|
|
ArgVals.push_back(ArgVal);
|
2008-09-07 17:09:33 +08:00
|
|
|
ArgVTs.push_back(ArgVT);
|
|
|
|
ArgFlags.push_back(Flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Analyze operands of the call, assigning locations to each operand.
|
|
|
|
SmallVector<CCValAssign, 16> ArgLocs;
|
2011-06-09 07:55:35 +08:00
|
|
|
CCState CCInfo(CC, isVarArg, *FuncInfo.MF, TM, ArgLocs,
|
2012-07-19 08:11:40 +08:00
|
|
|
I->getParent()->getContext());
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2010-06-02 05:09:47 +08:00
|
|
|
// Allocate shadow area for Win64
|
2011-04-19 12:42:38 +08:00
|
|
|
if (Subtarget->isTargetWin64())
|
2010-11-23 11:31:01 +08:00
|
|
|
CCInfo.AllocateStack(32, 8);
|
2010-06-02 05:09:47 +08:00
|
|
|
|
2010-10-31 21:21:44 +08:00
|
|
|
CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CC_X86);
|
2008-09-07 17:09:33 +08:00
|
|
|
|
|
|
|
// Get a count of how many bytes are to be pushed on the stack.
|
|
|
|
unsigned NumBytes = CCInfo.getNextStackOffset();
|
|
|
|
|
|
|
|
// Issue CALLSEQ_START
|
2011-06-29 05:14:33 +08:00
|
|
|
unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
|
2010-07-10 17:00:22 +08:00
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackDown))
|
|
|
|
.addImm(NumBytes);
|
2008-09-07 17:09:33 +08:00
|
|
|
|
2008-10-15 13:30:52 +08:00
|
|
|
// Process argument: walk the register/memloc assignments, inserting
|
2008-09-07 17:09:33 +08:00
|
|
|
// copies / loads.
|
|
|
|
SmallVector<unsigned, 4> RegArgs;
|
|
|
|
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
|
|
|
|
CCValAssign &VA = ArgLocs[i];
|
|
|
|
unsigned Arg = Args[VA.getValNo()];
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT ArgVT = ArgVTs[VA.getValNo()];
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2008-09-07 17:09:33 +08:00
|
|
|
// Promote the value if needed.
|
|
|
|
switch (VA.getLocInfo()) {
|
|
|
|
case CCValAssign::Full: break;
|
2008-09-08 14:35:17 +08:00
|
|
|
case CCValAssign::SExt: {
|
2011-05-21 06:21:04 +08:00
|
|
|
assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
|
|
|
|
"Unexpected extend");
|
2008-09-08 14:35:17 +08:00
|
|
|
bool Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(),
|
|
|
|
Arg, ArgVT, Arg);
|
2011-01-06 06:26:52 +08:00
|
|
|
assert(Emitted && "Failed to emit a sext!"); (void)Emitted;
|
2008-09-08 14:35:17 +08:00
|
|
|
ArgVT = VA.getLocVT();
|
2008-09-07 17:09:33 +08:00
|
|
|
break;
|
2008-09-08 14:35:17 +08:00
|
|
|
}
|
|
|
|
case CCValAssign::ZExt: {
|
2011-05-21 06:21:04 +08:00
|
|
|
assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
|
|
|
|
"Unexpected extend");
|
2008-09-08 14:35:17 +08:00
|
|
|
bool Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(),
|
|
|
|
Arg, ArgVT, Arg);
|
2011-01-06 06:26:52 +08:00
|
|
|
assert(Emitted && "Failed to emit a zext!"); (void)Emitted;
|
2008-09-08 14:35:17 +08:00
|
|
|
ArgVT = VA.getLocVT();
|
2008-09-07 17:09:33 +08:00
|
|
|
break;
|
2008-09-08 14:35:17 +08:00
|
|
|
}
|
|
|
|
case CCValAssign::AExt: {
|
2011-05-21 06:21:04 +08:00
|
|
|
assert(VA.getLocVT().isInteger() && !VA.getLocVT().isVector() &&
|
|
|
|
"Unexpected extend");
|
2008-09-08 14:35:17 +08:00
|
|
|
bool Emitted = X86FastEmitExtend(ISD::ANY_EXTEND, VA.getLocVT(),
|
|
|
|
Arg, ArgVT, Arg);
|
2008-09-11 10:41:37 +08:00
|
|
|
if (!Emitted)
|
|
|
|
Emitted = X86FastEmitExtend(ISD::ZERO_EXTEND, VA.getLocVT(),
|
2008-10-15 13:07:36 +08:00
|
|
|
Arg, ArgVT, Arg);
|
2008-09-11 10:41:37 +08:00
|
|
|
if (!Emitted)
|
|
|
|
Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(),
|
|
|
|
Arg, ArgVT, Arg);
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2011-01-06 06:26:52 +08:00
|
|
|
assert(Emitted && "Failed to emit a aext!"); (void)Emitted;
|
2008-09-08 14:35:17 +08:00
|
|
|
ArgVT = VA.getLocVT();
|
2008-09-07 17:09:33 +08:00
|
|
|
break;
|
|
|
|
}
|
2009-08-05 13:33:42 +08:00
|
|
|
case CCValAssign::BCvt: {
|
2010-11-03 19:35:31 +08:00
|
|
|
unsigned BC = FastEmit_r(ArgVT.getSimpleVT(), VA.getLocVT(),
|
2010-11-23 11:31:01 +08:00
|
|
|
ISD::BITCAST, Arg, /*TODO: Kill=*/false);
|
2009-08-05 13:33:42 +08:00
|
|
|
assert(BC != 0 && "Failed to emit a bitcast!");
|
|
|
|
Arg = BC;
|
|
|
|
ArgVT = VA.getLocVT();
|
|
|
|
break;
|
|
|
|
}
|
2012-07-12 03:58:38 +08:00
|
|
|
case CCValAssign::VExt:
|
|
|
|
// VExt has not been implemented, so this should be impossible to reach
|
|
|
|
// for now. However, fallback to Selection DAG isel once implemented.
|
|
|
|
return false;
|
|
|
|
case CCValAssign::Indirect:
|
|
|
|
// FIXME: Indirect doesn't need extending, but fast-isel doesn't fully
|
|
|
|
// support this.
|
|
|
|
return false;
|
2008-09-08 14:35:17 +08:00
|
|
|
}
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2008-09-07 17:09:33 +08:00
|
|
|
if (VA.isRegLoc()) {
|
2010-07-11 11:31:00 +08:00
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
|
|
|
|
VA.getLocReg()).addReg(Arg);
|
2008-09-07 17:09:33 +08:00
|
|
|
RegArgs.push_back(VA.getLocReg());
|
|
|
|
} else {
|
|
|
|
unsigned LocMemOffset = VA.getLocMemOffset();
|
2008-09-11 04:11:02 +08:00
|
|
|
X86AddressMode AM;
|
2012-11-01 11:47:50 +08:00
|
|
|
AM.Base.Reg = RegInfo->getStackRegister();
|
2008-09-11 04:11:02 +08:00
|
|
|
AM.Disp = LocMemOffset;
|
2010-04-15 09:51:59 +08:00
|
|
|
const Value *ArgVal = ArgVals[VA.getValNo()];
|
2011-05-21 06:21:04 +08:00
|
|
|
ISD::ArgFlagsTy Flags = ArgFlags[VA.getValNo()];
|
|
|
|
|
|
|
|
if (Flags.isByVal()) {
|
|
|
|
X86AddressMode SrcAM;
|
|
|
|
SrcAM.Base.Reg = Arg;
|
|
|
|
bool Res = TryEmitSmallMemcpy(AM, SrcAM, Flags.getByValSize());
|
|
|
|
assert(Res && "memcpy length already checked!"); (void)Res;
|
|
|
|
} else if (isa<ConstantInt>(ArgVal) || isa<ConstantPointerNull>(ArgVal)) {
|
|
|
|
// If this is a really simple value, emit this with the Value* version
|
2011-10-12 08:14:12 +08:00
|
|
|
// of X86FastEmitStore. If it isn't simple, we don't want to do this,
|
2011-05-21 06:21:04 +08:00
|
|
|
// as it can cause us to reevaluate the argument.
|
2011-10-19 06:11:33 +08:00
|
|
|
if (!X86FastEmitStore(ArgVT, ArgVal, AM))
|
|
|
|
return false;
|
2011-05-21 06:21:04 +08:00
|
|
|
} else {
|
2011-10-19 06:11:33 +08:00
|
|
|
if (!X86FastEmitStore(ArgVT, Arg, AM))
|
|
|
|
return false;
|
2011-05-21 06:21:04 +08:00
|
|
|
}
|
2008-09-07 17:09:33 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-09-25 23:24:26 +08:00
|
|
|
// ELF / PIC requires GOT in the EBX register before function calls via PLT
|
2010-11-23 11:31:01 +08:00
|
|
|
// GOT pointer.
|
2009-07-09 12:39:06 +08:00
|
|
|
if (Subtarget->isPICStyleGOT()) {
|
2010-07-08 00:29:44 +08:00
|
|
|
unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
|
2010-07-11 11:31:00 +08:00
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
|
|
|
|
X86::EBX).addReg(Base);
|
2008-09-25 23:24:26 +08:00
|
|
|
}
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2011-04-20 01:22:22 +08:00
|
|
|
if (Subtarget->is64Bit() && isVarArg && !Subtarget->isTargetWin64()) {
|
|
|
|
// Count the number of XMM registers allocated.
|
2012-03-11 15:57:25 +08:00
|
|
|
static const uint16_t XMMArgRegs[] = {
|
2011-04-20 01:22:22 +08:00
|
|
|
X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
|
|
|
|
X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
|
|
|
|
};
|
|
|
|
unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
|
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::MOV8ri),
|
|
|
|
X86::AL).addImm(NumXMMRegs);
|
|
|
|
}
|
|
|
|
|
2008-09-07 17:09:33 +08:00
|
|
|
// Issue the call.
|
2009-07-09 14:34:26 +08:00
|
|
|
MachineInstrBuilder MIB;
|
|
|
|
if (CalleeOp) {
|
|
|
|
// Register-indirect call.
|
2010-07-22 08:09:39 +08:00
|
|
|
unsigned CallOpc;
|
2012-02-17 01:56:02 +08:00
|
|
|
if (Subtarget->is64Bit())
|
2010-07-22 08:09:39 +08:00
|
|
|
CallOpc = X86::CALL64r;
|
|
|
|
else
|
|
|
|
CallOpc = X86::CALL32r;
|
2010-07-10 17:00:22 +08:00
|
|
|
MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc))
|
|
|
|
.addReg(CalleeOp);
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2009-07-09 14:34:26 +08:00
|
|
|
} else {
|
|
|
|
// Direct call.
|
|
|
|
assert(GV && "Not a direct call");
|
2010-07-22 08:09:39 +08:00
|
|
|
unsigned CallOpc;
|
2012-02-17 01:56:02 +08:00
|
|
|
if (Subtarget->is64Bit())
|
2010-07-22 08:09:39 +08:00
|
|
|
CallOpc = X86::CALL64pcrel32;
|
|
|
|
else
|
|
|
|
CallOpc = X86::CALLpcrel32;
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2009-07-09 14:34:26 +08:00
|
|
|
// See if we need any target-specific flags on the GV operand.
|
|
|
|
unsigned char OpFlags = 0;
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2009-07-09 14:34:26 +08:00
|
|
|
// On ELF targets, in both X86-64 and X86-32 mode, direct calls to
|
|
|
|
// external symbols most go through the PLT in PIC mode. If the symbol
|
|
|
|
// has hidden or protected visibility, or if it is static or local, then
|
|
|
|
// we don't need to use the PLT - we can directly call it.
|
|
|
|
if (Subtarget->isTargetELF() &&
|
|
|
|
TM.getRelocationModel() == Reloc::PIC_ &&
|
|
|
|
GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
|
|
|
|
OpFlags = X86II::MO_PLT;
|
2009-07-11 04:47:30 +08:00
|
|
|
} else if (Subtarget->isPICStyleStubAny() &&
|
2009-07-09 14:34:26 +08:00
|
|
|
(GV->isDeclaration() || GV->isWeakForLinker()) &&
|
2011-04-20 08:14:25 +08:00
|
|
|
(!Subtarget->getTargetTriple().isMacOSX() ||
|
|
|
|
Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
|
2009-07-09 14:34:26 +08:00
|
|
|
// PC-relative references to external symbols should go through $stub,
|
|
|
|
// unless we're building with the leopard linker or later, which
|
|
|
|
// automatically synthesizes these stubs.
|
|
|
|
OpFlags = X86II::MO_DARWIN_STUB;
|
|
|
|
}
|
2010-11-23 11:31:01 +08:00
|
|
|
|
|
|
|
|
2011-06-11 07:39:36 +08:00
|
|
|
MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc));
|
|
|
|
if (MemIntName)
|
2011-06-11 09:55:07 +08:00
|
|
|
MIB.addExternalSymbol(MemIntName, OpFlags);
|
2011-06-11 07:39:36 +08:00
|
|
|
else
|
|
|
|
MIB.addGlobalAddress(GV, 0, OpFlags);
|
2009-07-09 14:34:26 +08:00
|
|
|
}
|
2008-09-25 23:24:26 +08:00
|
|
|
|
2012-07-05 07:53:27 +08:00
|
|
|
// Add a register mask with the call-preserved registers.
|
|
|
|
// Proper defs for return values will be added by setPhysRegsDeadExcept().
|
|
|
|
MIB.addRegMask(TRI.getCallPreservedMask(CS.getCallingConv()));
|
|
|
|
|
2008-09-25 23:24:26 +08:00
|
|
|
// Add an implicit use GOT pointer in EBX.
|
2009-07-09 12:39:06 +08:00
|
|
|
if (Subtarget->isPICStyleGOT())
|
2012-07-05 07:53:27 +08:00
|
|
|
MIB.addReg(X86::EBX, RegState::Implicit);
|
2008-09-25 23:24:26 +08:00
|
|
|
|
2011-04-20 01:22:22 +08:00
|
|
|
if (Subtarget->is64Bit() && isVarArg && !Subtarget->isTargetWin64())
|
2012-07-05 07:53:27 +08:00
|
|
|
MIB.addReg(X86::AL, RegState::Implicit);
|
2011-04-20 01:22:22 +08:00
|
|
|
|
2008-09-07 17:09:33 +08:00
|
|
|
// Add implicit physical register uses to the call.
|
2008-10-08 06:10:33 +08:00
|
|
|
for (unsigned i = 0, e = RegArgs.size(); i != e; ++i)
|
2012-07-05 07:53:27 +08:00
|
|
|
MIB.addReg(RegArgs[i], RegState::Implicit);
|
Enable register mask operands for x86 calls.
Call instructions no longer have a list of 43 call-clobbered registers.
Instead, they get a single register mask operand with a bit vector of
call-preserved registers.
This saves a lot of memory, 42 x 32 bytes = 1344 bytes per call
instruction, and it speeds up building call instructions because those
43 imp-def operands no longer need to be added to use-def lists. (And
removed and shifted and re-added for every explicit call operand).
Passes like LiveVariables, LiveIntervals, RAGreedy, PEI, and
BranchFolding are significantly faster because they can deal with call
clobbers in bulk.
Overall, clang -O2 is between 0% and 8% faster, uniformly distributed
depending on call density in the compiled code. Debug builds using
clang -O0 are 0% - 3% faster.
I have verified that this patch doesn't change the assembly generated
for the LLVM nightly test suite when building with -disable-copyprop
and -disable-branch-fold.
Branch folding behaves slightly differently in a few cases because call
instructions have different hash values now.
Copy propagation flushes its data structures when it crosses a register
mask operand. This causes it to leave a few dead copies behind, on the
order of 20 instruction across the entire nightly test suite, including
SPEC. Fixing this properly would require the pass to use different data
structures.
llvm-svn: 150638
2012-02-16 08:02:50 +08:00
|
|
|
|
2008-09-07 17:09:33 +08:00
|
|
|
// Issue CALLSEQ_END
|
2011-06-29 05:14:33 +08:00
|
|
|
unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
|
2012-07-25 23:42:45 +08:00
|
|
|
const unsigned NumBytesCallee = computeBytesPoppedByCallee(*Subtarget, CS);
|
2010-07-10 17:00:22 +08:00
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(AdjStackUp))
|
2011-04-29 04:19:12 +08:00
|
|
|
.addImm(NumBytes).addImm(NumBytesCallee);
|
2008-09-07 17:09:33 +08:00
|
|
|
|
2011-05-18 02:29:03 +08:00
|
|
|
// Build info for return calling conv lowering code.
|
|
|
|
// FIXME: This is practically a copy-paste from TargetLowering::LowerCallTo.
|
|
|
|
SmallVector<ISD::InputArg, 32> Ins;
|
|
|
|
SmallVector<EVT, 4> RetTys;
|
|
|
|
ComputeValueVTs(TLI, I->getType(), RetTys);
|
|
|
|
for (unsigned i = 0, e = RetTys.size(); i != e; ++i) {
|
|
|
|
EVT VT = RetTys[i];
|
2012-12-19 19:48:16 +08:00
|
|
|
MVT RegisterVT = TLI.getRegisterType(I->getParent()->getContext(), VT);
|
2011-05-18 02:29:03 +08:00
|
|
|
unsigned NumRegs = TLI.getNumRegisters(I->getParent()->getContext(), VT);
|
|
|
|
for (unsigned j = 0; j != NumRegs; ++j) {
|
|
|
|
ISD::InputArg MyFlags;
|
2012-12-19 19:48:16 +08:00
|
|
|
MyFlags.VT = RegisterVT;
|
2011-05-18 02:29:03 +08:00
|
|
|
MyFlags.Used = !CS.getInstruction()->use_empty();
|
2012-12-19 15:18:57 +08:00
|
|
|
if (CS.paramHasAttr(0, Attribute::SExt))
|
2011-05-18 02:29:03 +08:00
|
|
|
MyFlags.Flags.setSExt();
|
2012-12-19 15:18:57 +08:00
|
|
|
if (CS.paramHasAttr(0, Attribute::ZExt))
|
2011-05-18 02:29:03 +08:00
|
|
|
MyFlags.Flags.setZExt();
|
2012-12-19 15:18:57 +08:00
|
|
|
if (CS.paramHasAttr(0, Attribute::InReg))
|
2011-05-18 02:29:03 +08:00
|
|
|
MyFlags.Flags.setInReg();
|
|
|
|
Ins.push_back(MyFlags);
|
|
|
|
}
|
|
|
|
}
|
2011-05-17 10:36:59 +08:00
|
|
|
|
2011-05-18 02:29:03 +08:00
|
|
|
// Now handle call return values.
|
|
|
|
SmallVector<unsigned, 4> UsedRegs;
|
|
|
|
SmallVector<CCValAssign, 16> RVLocs;
|
2011-06-09 07:55:35 +08:00
|
|
|
CCState CCRetInfo(CC, false, *FuncInfo.MF, TM, RVLocs,
|
2012-07-19 08:11:40 +08:00
|
|
|
I->getParent()->getContext());
|
2011-05-18 02:29:03 +08:00
|
|
|
unsigned ResultReg = FuncInfo.CreateRegs(I->getType());
|
|
|
|
CCRetInfo.AnalyzeCallResult(Ins, RetCC_X86);
|
|
|
|
for (unsigned i = 0; i != RVLocs.size(); ++i) {
|
|
|
|
EVT CopyVT = RVLocs[i].getValVT();
|
|
|
|
unsigned CopyReg = ResultReg + i;
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2008-09-07 17:09:33 +08:00
|
|
|
// If this is a call to a function that returns an fp value on the x87 fp
|
|
|
|
// stack, but where we prefer to use the value in xmm registers, copy it
|
|
|
|
// out as F80 and use a truncate to move it from fp stack reg to xmm reg.
|
2011-05-18 02:29:03 +08:00
|
|
|
if ((RVLocs[i].getLocReg() == X86::ST0 ||
|
2011-06-29 02:32:28 +08:00
|
|
|
RVLocs[i].getLocReg() == X86::ST1)) {
|
2011-07-01 07:42:18 +08:00
|
|
|
if (isScalarFPTypeInSSEReg(RVLocs[i].getValVT())) {
|
2011-06-29 02:32:28 +08:00
|
|
|
CopyVT = MVT::f80;
|
2012-04-20 14:31:50 +08:00
|
|
|
CopyReg = createResultReg(&X86::RFP80RegClass);
|
2011-07-01 07:42:18 +08:00
|
|
|
}
|
2011-06-29 02:32:28 +08:00
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::FpPOP_RETVAL),
|
|
|
|
CopyReg);
|
|
|
|
} else {
|
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
|
|
|
|
CopyReg).addReg(RVLocs[i].getLocReg());
|
|
|
|
UsedRegs.push_back(RVLocs[i].getLocReg());
|
2008-09-07 17:09:33 +08:00
|
|
|
}
|
|
|
|
|
2011-05-18 02:29:03 +08:00
|
|
|
if (CopyVT != RVLocs[i].getValVT()) {
|
2008-09-07 17:09:33 +08:00
|
|
|
// Round the F80 the right size, which also moves to the appropriate xmm
|
|
|
|
// register. This is accomplished by storing the F80 value in memory and
|
|
|
|
// then loading it back. Ewww...
|
2011-05-18 02:29:03 +08:00
|
|
|
EVT ResVT = RVLocs[i].getValVT();
|
2009-08-12 04:47:22 +08:00
|
|
|
unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
|
2008-09-07 17:09:33 +08:00
|
|
|
unsigned MemSize = ResVT.getSizeInBits()/8;
|
2009-11-13 04:49:22 +08:00
|
|
|
int FI = MFI.CreateStackObject(MemSize, MemSize, false);
|
2010-07-10 17:00:22 +08:00
|
|
|
addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
|
|
|
TII.get(Opc)), FI)
|
2011-05-18 02:29:03 +08:00
|
|
|
.addReg(CopyReg);
|
2009-08-12 04:47:22 +08:00
|
|
|
Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm;
|
2010-07-10 17:00:22 +08:00
|
|
|
addFrameReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
2011-05-18 02:29:03 +08:00
|
|
|
TII.get(Opc), ResultReg + i), FI);
|
2008-09-09 01:15:42 +08:00
|
|
|
}
|
2011-05-17 10:36:59 +08:00
|
|
|
}
|
2011-05-17 08:13:47 +08:00
|
|
|
|
2011-05-18 02:29:03 +08:00
|
|
|
if (RVLocs.size())
|
|
|
|
UpdateValueMap(I, ResultReg, RVLocs.size());
|
|
|
|
|
2010-06-19 07:28:01 +08:00
|
|
|
// Set all unused physreg defs as dead.
|
|
|
|
static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
|
|
|
|
|
2008-09-07 17:09:33 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-08-29 07:21:34 +08:00
|
|
|
bool
|
2010-04-15 09:51:59 +08:00
|
|
|
X86FastISel::TargetSelectInstruction(const Instruction *I) {
|
2008-08-29 07:21:34 +08:00
|
|
|
switch (I->getOpcode()) {
|
|
|
|
default: break;
|
2008-09-03 14:44:39 +08:00
|
|
|
case Instruction::Load:
|
2008-09-04 07:12:08 +08:00
|
|
|
return X86SelectLoad(I);
|
2008-09-05 00:48:33 +08:00
|
|
|
case Instruction::Store:
|
|
|
|
return X86SelectStore(I);
|
2010-07-10 17:00:22 +08:00
|
|
|
case Instruction::Ret:
|
|
|
|
return X86SelectRet(I);
|
2008-09-05 07:26:51 +08:00
|
|
|
case Instruction::ICmp:
|
|
|
|
case Instruction::FCmp:
|
|
|
|
return X86SelectCmp(I);
|
2008-09-05 09:06:14 +08:00
|
|
|
case Instruction::ZExt:
|
|
|
|
return X86SelectZExt(I);
|
|
|
|
case Instruction::Br:
|
|
|
|
return X86SelectBranch(I);
|
2008-09-07 17:09:33 +08:00
|
|
|
case Instruction::Call:
|
|
|
|
return X86SelectCall(I);
|
2008-09-06 02:30:08 +08:00
|
|
|
case Instruction::LShr:
|
|
|
|
case Instruction::AShr:
|
|
|
|
case Instruction::Shl:
|
|
|
|
return X86SelectShift(I);
|
2013-04-18 04:10:13 +08:00
|
|
|
case Instruction::SDiv:
|
|
|
|
case Instruction::UDiv:
|
|
|
|
case Instruction::SRem:
|
|
|
|
case Instruction::URem:
|
|
|
|
return X86SelectDivRem(I);
|
2008-09-06 02:30:08 +08:00
|
|
|
case Instruction::Select:
|
|
|
|
return X86SelectSelect(I);
|
2008-09-07 16:47:42 +08:00
|
|
|
case Instruction::Trunc:
|
|
|
|
return X86SelectTrunc(I);
|
2008-09-11 05:02:08 +08:00
|
|
|
case Instruction::FPExt:
|
|
|
|
return X86SelectFPExt(I);
|
|
|
|
case Instruction::FPTrunc:
|
|
|
|
return X86SelectFPTrunc(I);
|
2009-03-14 07:53:06 +08:00
|
|
|
case Instruction::IntToPtr: // Deliberate fall-through.
|
|
|
|
case Instruction::PtrToInt: {
|
2009-08-11 06:56:29 +08:00
|
|
|
EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
|
|
|
|
EVT DstVT = TLI.getValueType(I->getType());
|
2009-03-14 07:53:06 +08:00
|
|
|
if (DstVT.bitsGT(SrcVT))
|
|
|
|
return X86SelectZExt(I);
|
|
|
|
if (DstVT.bitsLT(SrcVT))
|
|
|
|
return X86SelectTrunc(I);
|
|
|
|
unsigned Reg = getRegForValue(I->getOperand(0));
|
|
|
|
if (Reg == 0) return false;
|
|
|
|
UpdateValueMap(I, Reg);
|
|
|
|
return true;
|
|
|
|
}
|
2008-08-29 07:21:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-04-15 09:51:59 +08:00
|
|
|
unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
|
2010-11-03 19:35:31 +08:00
|
|
|
MVT VT;
|
2008-10-15 13:07:36 +08:00
|
|
|
if (!isTypeLegal(C->getType(), VT))
|
2012-08-30 08:30:16 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
// Can't handle alternate code models yet.
|
|
|
|
if (TM.getCodeModel() != CodeModel::Small)
|
|
|
|
return 0;
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2008-09-05 08:06:23 +08:00
|
|
|
// Get opcode and regclass of the output for the given load instruction.
|
|
|
|
unsigned Opc = 0;
|
|
|
|
const TargetRegisterClass *RC = NULL;
|
2010-11-03 19:35:31 +08:00
|
|
|
switch (VT.SimpleTy) {
|
2012-08-30 08:30:16 +08:00
|
|
|
default: return 0;
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::i8:
|
2008-09-05 08:06:23 +08:00
|
|
|
Opc = X86::MOV8rm;
|
2012-04-20 14:31:50 +08:00
|
|
|
RC = &X86::GR8RegClass;
|
2008-09-05 08:06:23 +08:00
|
|
|
break;
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::i16:
|
2008-09-05 08:06:23 +08:00
|
|
|
Opc = X86::MOV16rm;
|
2012-04-20 14:31:50 +08:00
|
|
|
RC = &X86::GR16RegClass;
|
2008-09-05 08:06:23 +08:00
|
|
|
break;
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::i32:
|
2008-09-05 08:06:23 +08:00
|
|
|
Opc = X86::MOV32rm;
|
2012-04-20 14:31:50 +08:00
|
|
|
RC = &X86::GR32RegClass;
|
2008-09-05 08:06:23 +08:00
|
|
|
break;
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::i64:
|
2008-09-05 08:06:23 +08:00
|
|
|
// Must be in x86-64 mode.
|
|
|
|
Opc = X86::MOV64rm;
|
2012-04-20 14:31:50 +08:00
|
|
|
RC = &X86::GR64RegClass;
|
2008-09-05 08:06:23 +08:00
|
|
|
break;
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::f32:
|
2011-09-03 08:46:42 +08:00
|
|
|
if (X86ScalarSSEf32) {
|
|
|
|
Opc = Subtarget->hasAVX() ? X86::VMOVSSrm : X86::MOVSSrm;
|
2012-04-20 14:31:50 +08:00
|
|
|
RC = &X86::FR32RegClass;
|
2008-09-05 08:06:23 +08:00
|
|
|
} else {
|
|
|
|
Opc = X86::LD_Fp32m;
|
2012-04-20 14:31:50 +08:00
|
|
|
RC = &X86::RFP32RegClass;
|
2008-09-05 08:06:23 +08:00
|
|
|
}
|
|
|
|
break;
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::f64:
|
2011-09-03 08:46:42 +08:00
|
|
|
if (X86ScalarSSEf64) {
|
|
|
|
Opc = Subtarget->hasAVX() ? X86::VMOVSDrm : X86::MOVSDrm;
|
2012-04-20 14:31:50 +08:00
|
|
|
RC = &X86::FR64RegClass;
|
2008-09-05 08:06:23 +08:00
|
|
|
} else {
|
|
|
|
Opc = X86::LD_Fp64m;
|
2012-04-20 14:31:50 +08:00
|
|
|
RC = &X86::RFP64RegClass;
|
2008-09-05 08:06:23 +08:00
|
|
|
}
|
|
|
|
break;
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::f80:
|
2008-09-26 09:39:32 +08:00
|
|
|
// No f80 support yet.
|
2012-08-30 08:30:16 +08:00
|
|
|
return 0;
|
2008-09-05 08:06:23 +08:00
|
|
|
}
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2008-09-20 06:16:54 +08:00
|
|
|
// Materialize addresses with LEA instructions.
|
2008-09-05 08:06:23 +08:00
|
|
|
if (isa<GlobalValue>(C)) {
|
2008-09-20 06:16:54 +08:00
|
|
|
X86AddressMode AM;
|
2009-07-10 13:33:42 +08:00
|
|
|
if (X86SelectAddress(C, AM)) {
|
2011-04-18 01:12:08 +08:00
|
|
|
// If the expression is just a basereg, then we're done, otherwise we need
|
|
|
|
// to emit an LEA.
|
|
|
|
if (AM.BaseType == X86AddressMode::RegBase &&
|
|
|
|
AM.IndexReg == 0 && AM.Disp == 0 && AM.GV == 0)
|
|
|
|
return AM.Base.Reg;
|
2011-06-09 07:55:35 +08:00
|
|
|
|
2011-04-18 01:12:08 +08:00
|
|
|
Opc = TLI.getPointerTy() == MVT::i32 ? X86::LEA32r : X86::LEA64r;
|
2008-09-20 06:16:54 +08:00
|
|
|
unsigned ResultReg = createResultReg(RC);
|
2010-07-10 17:00:22 +08:00
|
|
|
addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
|
|
|
TII.get(Opc), ResultReg), AM);
|
2008-09-05 08:06:23 +08:00
|
|
|
return ResultReg;
|
2008-09-20 06:16:54 +08:00
|
|
|
}
|
2008-09-06 05:00:03 +08:00
|
|
|
return 0;
|
2008-09-05 08:06:23 +08:00
|
|
|
}
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2008-09-06 09:11:01 +08:00
|
|
|
// MachineConstantPool wants an explicit alignment.
|
2009-03-13 15:51:59 +08:00
|
|
|
unsigned Align = TD.getPrefTypeAlignment(C->getType());
|
2008-09-06 09:11:01 +08:00
|
|
|
if (Align == 0) {
|
|
|
|
// Alignment of vector types. FIXME!
|
2009-05-09 15:06:46 +08:00
|
|
|
Align = TD.getTypeAllocSize(C->getType());
|
2008-09-06 09:11:01 +08:00
|
|
|
}
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2008-09-30 09:21:32 +08:00
|
|
|
// x86-32 PIC requires a PIC base register for constant pools.
|
|
|
|
unsigned PICBase = 0;
|
2009-06-27 09:31:51 +08:00
|
|
|
unsigned char OpFlag = 0;
|
2009-07-11 05:00:45 +08:00
|
|
|
if (Subtarget->isPICStyleStubPIC()) { // Not dynamic-no-pic
|
2009-07-09 12:39:06 +08:00
|
|
|
OpFlag = X86II::MO_PIC_BASE_OFFSET;
|
2010-07-08 00:29:44 +08:00
|
|
|
PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
|
2009-07-09 12:39:06 +08:00
|
|
|
} else if (Subtarget->isPICStyleGOT()) {
|
|
|
|
OpFlag = X86II::MO_GOTOFF;
|
2010-07-08 00:29:44 +08:00
|
|
|
PICBase = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
|
2009-07-09 12:39:06 +08:00
|
|
|
} else if (Subtarget->isPICStyleRIPRel() &&
|
|
|
|
TM.getCodeModel() == CodeModel::Small) {
|
|
|
|
PICBase = X86::RIP;
|
2009-06-27 09:31:51 +08:00
|
|
|
}
|
2008-09-30 09:21:32 +08:00
|
|
|
|
|
|
|
// Create the load from the constant pool.
|
2008-09-11 04:11:02 +08:00
|
|
|
unsigned MCPOffset = MCP.getConstantPoolIndex(C, Align);
|
2008-09-20 06:16:54 +08:00
|
|
|
unsigned ResultReg = createResultReg(RC);
|
2010-07-10 17:00:22 +08:00
|
|
|
addConstantPoolReference(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
|
|
|
TII.get(Opc), ResultReg),
|
2009-06-27 09:31:51 +08:00
|
|
|
MCPOffset, PICBase, OpFlag);
|
2008-09-30 09:21:32 +08:00
|
|
|
|
2008-09-05 08:06:23 +08:00
|
|
|
return ResultReg;
|
|
|
|
}
|
|
|
|
|
2010-04-15 09:51:59 +08:00
|
|
|
unsigned X86FastISel::TargetMaterializeAlloca(const AllocaInst *C) {
|
2008-10-03 09:27:49 +08:00
|
|
|
// Fail on dynamic allocas. At this point, getRegForValue has already
|
|
|
|
// checked its CSE maps, so if we're here trying to handle a dynamic
|
|
|
|
// alloca, we're not going to succeed. X86SelectAddress has a
|
|
|
|
// check for dynamic allocas, because it's called directly from
|
|
|
|
// various places, but TargetMaterializeAlloca also needs a check
|
|
|
|
// in order to avoid recursion between getRegForValue,
|
|
|
|
// X86SelectAddrss, and TargetMaterializeAlloca.
|
2010-07-08 00:29:44 +08:00
|
|
|
if (!FuncInfo.StaticAllocaMap.count(C))
|
2008-10-03 09:27:49 +08:00
|
|
|
return 0;
|
|
|
|
|
2008-09-11 04:11:02 +08:00
|
|
|
X86AddressMode AM;
|
2009-07-10 13:33:42 +08:00
|
|
|
if (!X86SelectAddress(C, AM))
|
2008-09-11 04:11:02 +08:00
|
|
|
return 0;
|
|
|
|
unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
|
2012-02-22 13:59:10 +08:00
|
|
|
const TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
|
2008-09-11 04:11:02 +08:00
|
|
|
unsigned ResultReg = createResultReg(RC);
|
2010-07-10 17:00:22 +08:00
|
|
|
addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
|
|
|
TII.get(Opc), ResultReg), AM);
|
2008-09-11 04:11:02 +08:00
|
|
|
return ResultReg;
|
|
|
|
}
|
|
|
|
|
2011-04-28 06:41:55 +08:00
|
|
|
unsigned X86FastISel::TargetMaterializeFloatZero(const ConstantFP *CF) {
|
|
|
|
MVT VT;
|
|
|
|
if (!isTypeLegal(CF->getType(), VT))
|
2012-11-16 03:40:29 +08:00
|
|
|
return 0;
|
2011-04-28 06:41:55 +08:00
|
|
|
|
|
|
|
// Get opcode and regclass for the given zero.
|
|
|
|
unsigned Opc = 0;
|
|
|
|
const TargetRegisterClass *RC = NULL;
|
|
|
|
switch (VT.SimpleTy) {
|
2012-11-16 03:40:29 +08:00
|
|
|
default: return 0;
|
2012-08-12 01:53:00 +08:00
|
|
|
case MVT::f32:
|
|
|
|
if (X86ScalarSSEf32) {
|
|
|
|
Opc = X86::FsFLD0SS;
|
|
|
|
RC = &X86::FR32RegClass;
|
|
|
|
} else {
|
|
|
|
Opc = X86::LD_Fp032;
|
|
|
|
RC = &X86::RFP32RegClass;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case MVT::f64:
|
|
|
|
if (X86ScalarSSEf64) {
|
|
|
|
Opc = X86::FsFLD0SD;
|
|
|
|
RC = &X86::FR64RegClass;
|
|
|
|
} else {
|
|
|
|
Opc = X86::LD_Fp064;
|
|
|
|
RC = &X86::RFP64RegClass;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case MVT::f80:
|
|
|
|
// No f80 support yet.
|
2012-11-16 03:40:29 +08:00
|
|
|
return 0;
|
2011-04-28 06:41:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned ResultReg = createResultReg(RC);
|
|
|
|
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), ResultReg);
|
|
|
|
return ResultReg;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
implement rdar://6653118 - fastisel should fold loads where possible.
Since mem2reg isn't run at -O0, we get a ton of reloads from the stack,
for example, before, this code:
int foo(int x, int y, int z) {
return x+y+z;
}
used to compile into:
_foo: ## @foo
subq $12, %rsp
movl %edi, 8(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
movl 8(%rsp), %edx
movl 4(%rsp), %esi
addl %edx, %esi
movl (%rsp), %edx
addl %esi, %edx
movl %edx, %eax
addq $12, %rsp
ret
Now we produce:
_foo: ## @foo
subq $12, %rsp
movl %edi, 8(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
movl 8(%rsp), %edx
addl 4(%rsp), %edx ## Folded load
addl (%rsp), %edx ## Folded load
movl %edx, %eax
addq $12, %rsp
ret
Fewer instructions and less register use = faster compiles.
llvm-svn: 113102
2010-09-05 10:18:34 +08:00
|
|
|
/// TryToFoldLoad - The specified machine instr operand is a vreg, and that
|
|
|
|
/// vreg is being provided by the specified load instruction. If possible,
|
|
|
|
/// try to fold the load as an operand to the instruction, returning true if
|
|
|
|
/// possible.
|
|
|
|
bool X86FastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
|
|
|
|
const LoadInst *LI) {
|
|
|
|
X86AddressMode AM;
|
|
|
|
if (!X86SelectAddress(LI->getOperand(0), AM))
|
|
|
|
return false;
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2012-08-12 01:46:16 +08:00
|
|
|
const X86InstrInfo &XII = (const X86InstrInfo&)TII;
|
2010-11-23 11:31:01 +08:00
|
|
|
|
implement rdar://6653118 - fastisel should fold loads where possible.
Since mem2reg isn't run at -O0, we get a ton of reloads from the stack,
for example, before, this code:
int foo(int x, int y, int z) {
return x+y+z;
}
used to compile into:
_foo: ## @foo
subq $12, %rsp
movl %edi, 8(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
movl 8(%rsp), %edx
movl 4(%rsp), %esi
addl %edx, %esi
movl (%rsp), %edx
addl %esi, %edx
movl %edx, %eax
addq $12, %rsp
ret
Now we produce:
_foo: ## @foo
subq $12, %rsp
movl %edi, 8(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
movl 8(%rsp), %edx
addl 4(%rsp), %edx ## Folded load
addl (%rsp), %edx ## Folded load
movl %edx, %eax
addq $12, %rsp
ret
Fewer instructions and less register use = faster compiles.
llvm-svn: 113102
2010-09-05 10:18:34 +08:00
|
|
|
unsigned Size = TD.getTypeAllocSize(LI->getType());
|
|
|
|
unsigned Alignment = LI->getAlignment();
|
|
|
|
|
|
|
|
SmallVector<MachineOperand, 8> AddrOps;
|
|
|
|
AM.getFullAddress(AddrOps);
|
2010-11-23 11:31:01 +08:00
|
|
|
|
implement rdar://6653118 - fastisel should fold loads where possible.
Since mem2reg isn't run at -O0, we get a ton of reloads from the stack,
for example, before, this code:
int foo(int x, int y, int z) {
return x+y+z;
}
used to compile into:
_foo: ## @foo
subq $12, %rsp
movl %edi, 8(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
movl 8(%rsp), %edx
movl 4(%rsp), %esi
addl %edx, %esi
movl (%rsp), %edx
addl %esi, %edx
movl %edx, %eax
addq $12, %rsp
ret
Now we produce:
_foo: ## @foo
subq $12, %rsp
movl %edi, 8(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
movl 8(%rsp), %edx
addl 4(%rsp), %edx ## Folded load
addl (%rsp), %edx ## Folded load
movl %edx, %eax
addq $12, %rsp
ret
Fewer instructions and less register use = faster compiles.
llvm-svn: 113102
2010-09-05 10:18:34 +08:00
|
|
|
MachineInstr *Result =
|
|
|
|
XII.foldMemoryOperandImpl(*FuncInfo.MF, MI, OpNo, AddrOps, Size, Alignment);
|
|
|
|
if (Result == 0) return false;
|
2010-11-23 11:31:01 +08:00
|
|
|
|
2011-01-16 10:27:38 +08:00
|
|
|
FuncInfo.MBB->insert(FuncInfo.InsertPt, Result);
|
implement rdar://6653118 - fastisel should fold loads where possible.
Since mem2reg isn't run at -O0, we get a ton of reloads from the stack,
for example, before, this code:
int foo(int x, int y, int z) {
return x+y+z;
}
used to compile into:
_foo: ## @foo
subq $12, %rsp
movl %edi, 8(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
movl 8(%rsp), %edx
movl 4(%rsp), %esi
addl %edx, %esi
movl (%rsp), %edx
addl %esi, %edx
movl %edx, %eax
addq $12, %rsp
ret
Now we produce:
_foo: ## @foo
subq $12, %rsp
movl %edi, 8(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
movl 8(%rsp), %edx
addl 4(%rsp), %edx ## Folded load
addl (%rsp), %edx ## Folded load
movl %edx, %eax
addq $12, %rsp
ret
Fewer instructions and less register use = faster compiles.
llvm-svn: 113102
2010-09-05 10:18:34 +08:00
|
|
|
MI->eraseFromParent();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-09-03 08:03:49 +08:00
|
|
|
namespace llvm {
|
2012-08-03 12:06:28 +08:00
|
|
|
FastISel *X86::createFastISel(FunctionLoweringInfo &funcInfo,
|
|
|
|
const TargetLibraryInfo *libInfo) {
|
|
|
|
return new X86FastISel(funcInfo, libInfo);
|
2008-09-03 08:03:49 +08:00
|
|
|
}
|
2008-08-29 07:21:34 +08:00
|
|
|
}
|