2005-11-17 06:59:19 +08:00
|
|
|
//===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
|
2005-11-16 09:54:32 +08:00
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 04:36:04 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2005-11-16 09:54:32 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file defines a DAG pattern matching instruction selector for X86,
|
|
|
|
// converting from a legalized dag to a X86 dag.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2006-08-08 06:28:20 +08:00
|
|
|
#define DEBUG_TYPE "x86-isel"
|
2005-11-16 09:54:32 +08:00
|
|
|
#include "X86.h"
|
2006-01-11 14:09:51 +08:00
|
|
|
#include "X86InstrBuilder.h"
|
2006-03-14 07:20:37 +08:00
|
|
|
#include "X86ISelLowering.h"
|
2008-01-05 08:41:47 +08:00
|
|
|
#include "X86MachineFunctionInfo.h"
|
2006-01-11 09:15:34 +08:00
|
|
|
#include "X86RegisterInfo.h"
|
2005-11-16 09:54:32 +08:00
|
|
|
#include "X86Subtarget.h"
|
2006-03-14 07:20:37 +08:00
|
|
|
#include "X86TargetMachine.h"
|
2005-11-19 10:11:08 +08:00
|
|
|
#include "llvm/GlobalValue.h"
|
2006-01-11 09:15:34 +08:00
|
|
|
#include "llvm/Instructions.h"
|
2006-03-25 14:47:10 +08:00
|
|
|
#include "llvm/Intrinsics.h"
|
2006-01-11 09:15:34 +08:00
|
|
|
#include "llvm/Support/CFG.h"
|
2007-01-13 07:22:14 +08:00
|
|
|
#include "llvm/Type.h"
|
2005-11-19 10:11:08 +08:00
|
|
|
#include "llvm/CodeGen/MachineConstantPool.h"
|
2005-11-16 09:54:32 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
2006-01-11 04:26:56 +08:00
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
2006-01-11 09:15:34 +08:00
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
2007-12-31 12:13:23 +08:00
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2005-11-16 09:54:32 +08:00
|
|
|
#include "llvm/CodeGen/SelectionDAGISel.h"
|
|
|
|
#include "llvm/Target/TargetMachine.h"
|
2008-09-27 07:41:32 +08:00
|
|
|
#include "llvm/Target/TargetOptions.h"
|
2006-08-27 20:54:02 +08:00
|
|
|
#include "llvm/Support/Compiler.h"
|
2006-09-08 14:48:29 +08:00
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Support/MathExtras.h"
|
2008-08-12 07:46:25 +08:00
|
|
|
#include "llvm/Support/Streams.h"
|
2008-04-25 16:22:20 +08:00
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
2005-11-16 09:54:32 +08:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
|
|
|
using namespace llvm;
|
|
|
|
|
2006-12-20 06:59:26 +08:00
|
|
|
STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
|
|
|
|
|
2005-11-16 09:54:32 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Pattern Matcher Implementation
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2005-11-19 10:11:08 +08:00
|
|
|
namespace {
|
|
|
|
/// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
|
2008-07-28 05:46:04 +08:00
|
|
|
/// SDValue's instead of register numbers for the leaves of the matched
|
2005-11-19 10:11:08 +08:00
|
|
|
/// tree.
|
|
|
|
struct X86ISelAddressMode {
|
|
|
|
enum {
|
|
|
|
RegBase,
|
2006-05-25 01:04:05 +08:00
|
|
|
FrameIndexBase
|
2005-11-19 10:11:08 +08:00
|
|
|
} BaseType;
|
|
|
|
|
|
|
|
struct { // This is really a union, discriminated by BaseType!
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Reg;
|
2005-11-19 10:11:08 +08:00
|
|
|
int FrameIndex;
|
|
|
|
} Base;
|
|
|
|
|
Fix a x86-64 codegen deficiency. Allow gv + offset when using rip addressing mode.
Before:
_main:
subq $8, %rsp
leaq _X(%rip), %rax
movsd 8(%rax), %xmm1
movss _X(%rip), %xmm0
call _t
xorl %ecx, %ecx
movl %ecx, %eax
addq $8, %rsp
ret
Now:
_main:
subq $8, %rsp
movsd _X+8(%rip), %xmm1
movss _X(%rip), %xmm0
call _t
xorl %ecx, %ecx
movl %ecx, %eax
addq $8, %rsp
ret
Notice there is another idiotic codegen issue that needs to be fixed asap:
xorl %ecx, %ecx
movl %ecx, %eax
llvm-svn: 46850
2008-02-07 16:53:49 +08:00
|
|
|
bool isRIPRel; // RIP as base?
|
2005-11-19 10:11:08 +08:00
|
|
|
unsigned Scale;
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue IndexReg;
|
2008-11-11 23:52:29 +08:00
|
|
|
int32_t Disp;
|
2005-11-19 10:11:08 +08:00
|
|
|
GlobalValue *GV;
|
2006-02-25 18:09:08 +08:00
|
|
|
Constant *CP;
|
2006-09-08 14:48:29 +08:00
|
|
|
const char *ES;
|
|
|
|
int JT;
|
2006-02-25 18:09:08 +08:00
|
|
|
unsigned Align; // CP alignment.
|
2005-11-19 10:11:08 +08:00
|
|
|
|
|
|
|
X86ISelAddressMode()
|
2006-09-08 14:48:29 +08:00
|
|
|
: BaseType(RegBase), isRIPRel(false), Scale(1), IndexReg(), Disp(0),
|
|
|
|
GV(0), CP(0), ES(0), JT(-1), Align(0) {
|
2005-11-19 10:11:08 +08:00
|
|
|
}
|
2008-08-12 07:46:25 +08:00
|
|
|
void dump() {
|
|
|
|
cerr << "X86ISelAddressMode " << this << "\n";
|
2008-08-31 23:37:04 +08:00
|
|
|
cerr << "Base.Reg ";
|
|
|
|
if (Base.Reg.getNode() != 0) Base.Reg.getNode()->dump();
|
|
|
|
else cerr << "nul";
|
2008-08-12 07:46:25 +08:00
|
|
|
cerr << " Base.FrameIndex " << Base.FrameIndex << "\n";
|
|
|
|
cerr << "isRIPRel " << isRIPRel << " Scale" << Scale << "\n";
|
2008-08-31 23:37:04 +08:00
|
|
|
cerr << "IndexReg ";
|
|
|
|
if (IndexReg.getNode() != 0) IndexReg.getNode()->dump();
|
|
|
|
else cerr << "nul";
|
2008-08-12 07:46:25 +08:00
|
|
|
cerr << " Disp " << Disp << "\n";
|
|
|
|
cerr << "GV "; if (GV) GV->dump();
|
|
|
|
else cerr << "nul";
|
|
|
|
cerr << " CP "; if (CP) CP->dump();
|
|
|
|
else cerr << "nul";
|
|
|
|
cerr << "\n";
|
|
|
|
cerr << "ES "; if (ES) cerr << ES; else cerr << "nul";
|
|
|
|
cerr << " JT" << JT << " Align" << Align << "\n";
|
|
|
|
}
|
2005-11-19 10:11:08 +08:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2005-11-16 09:54:32 +08:00
|
|
|
namespace {
|
|
|
|
//===--------------------------------------------------------------------===//
|
|
|
|
/// ISel - X86 specific code to select X86 machine instructions for
|
|
|
|
/// SelectionDAG operations.
|
|
|
|
///
|
2006-06-29 07:27:49 +08:00
|
|
|
class VISIBILITY_HIDDEN X86DAGToDAGISel : public SelectionDAGISel {
|
2006-09-08 14:48:29 +08:00
|
|
|
/// TM - Keep a reference to X86TargetMachine.
|
|
|
|
///
|
|
|
|
X86TargetMachine &TM;
|
|
|
|
|
2005-11-16 09:54:32 +08:00
|
|
|
/// X86Lowering - This object fully describes how to lower LLVM code to an
|
|
|
|
/// X86-specific SelectionDAG.
|
2008-10-04 00:55:19 +08:00
|
|
|
X86TargetLowering &X86Lowering;
|
2005-11-16 09:54:32 +08:00
|
|
|
|
|
|
|
/// Subtarget - Keep a pointer to the X86Subtarget around so that we can
|
|
|
|
/// make the right decision when generating code for different targets.
|
|
|
|
const X86Subtarget *Subtarget;
|
2006-02-18 08:15:05 +08:00
|
|
|
|
2008-07-01 04:45:06 +08:00
|
|
|
/// CurBB - Current BB being isel'd.
|
|
|
|
///
|
|
|
|
MachineBasicBlock *CurBB;
|
|
|
|
|
2008-09-27 07:41:32 +08:00
|
|
|
/// OptForSize - If true, selector should try to optimize for code size
|
|
|
|
/// instead of performance.
|
|
|
|
bool OptForSize;
|
|
|
|
|
2005-11-16 09:54:32 +08:00
|
|
|
public:
|
2006-09-08 14:48:29 +08:00
|
|
|
X86DAGToDAGISel(X86TargetMachine &tm, bool fast)
|
2009-01-16 03:20:50 +08:00
|
|
|
: SelectionDAGISel(tm, fast),
|
2008-10-04 00:17:33 +08:00
|
|
|
TM(tm), X86Lowering(*TM.getTargetLowering()),
|
2008-09-27 07:41:32 +08:00
|
|
|
Subtarget(&TM.getSubtarget<X86Subtarget>()),
|
2008-10-02 07:18:38 +08:00
|
|
|
OptForSize(false) {}
|
2005-11-16 09:54:32 +08:00
|
|
|
|
|
|
|
virtual const char *getPassName() const {
|
|
|
|
return "X86 DAG->DAG Instruction Selection";
|
|
|
|
}
|
|
|
|
|
2008-07-01 04:45:06 +08:00
|
|
|
/// InstructionSelect - This callback is invoked by
|
2005-11-16 09:54:32 +08:00
|
|
|
/// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
|
2008-08-23 10:25:05 +08:00
|
|
|
virtual void InstructionSelect();
|
2008-07-01 04:45:06 +08:00
|
|
|
|
2007-09-26 05:52:30 +08:00
|
|
|
virtual void EmitFunctionEntryCode(Function &Fn, MachineFunction &MF);
|
|
|
|
|
2008-11-27 08:49:46 +08:00
|
|
|
virtual
|
|
|
|
bool IsLegalAndProfitableToFold(SDNode *N, SDNode *U, SDNode *Root) const;
|
2006-07-28 00:44:36 +08:00
|
|
|
|
2005-11-16 09:54:32 +08:00
|
|
|
// Include the pieces autogenerated from the target description.
|
|
|
|
#include "X86GenDAGISel.inc"
|
|
|
|
|
|
|
|
private:
|
2008-07-28 05:46:04 +08:00
|
|
|
SDNode *Select(SDValue N);
|
2008-10-03 02:53:47 +08:00
|
|
|
SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
|
2005-11-16 09:54:32 +08:00
|
|
|
|
2008-07-28 05:46:04 +08:00
|
|
|
bool MatchAddress(SDValue N, X86ISelAddressMode &AM,
|
2007-03-29 02:38:33 +08:00
|
|
|
bool isRoot = true, unsigned Depth = 0);
|
2008-07-28 05:46:04 +08:00
|
|
|
bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM,
|
2007-08-14 04:03:06 +08:00
|
|
|
bool isRoot, unsigned Depth);
|
2008-07-28 05:46:04 +08:00
|
|
|
bool SelectAddr(SDValue Op, SDValue N, SDValue &Base,
|
|
|
|
SDValue &Scale, SDValue &Index, SDValue &Disp);
|
|
|
|
bool SelectLEAAddr(SDValue Op, SDValue N, SDValue &Base,
|
|
|
|
SDValue &Scale, SDValue &Index, SDValue &Disp);
|
|
|
|
bool SelectScalarSSELoad(SDValue Op, SDValue Pred,
|
|
|
|
SDValue N, SDValue &Base, SDValue &Scale,
|
|
|
|
SDValue &Index, SDValue &Disp,
|
|
|
|
SDValue &InChain, SDValue &OutChain);
|
|
|
|
bool TryFoldLoad(SDValue P, SDValue N,
|
|
|
|
SDValue &Base, SDValue &Scale,
|
|
|
|
SDValue &Index, SDValue &Disp);
|
2008-08-23 10:25:05 +08:00
|
|
|
void PreprocessForRMW();
|
|
|
|
void PreprocessForFPConvert();
|
2006-08-08 06:28:20 +08:00
|
|
|
|
2006-06-09 02:03:49 +08:00
|
|
|
/// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
|
|
|
|
/// inline asm expressions.
|
2008-07-28 05:46:04 +08:00
|
|
|
virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
|
2006-06-09 02:03:49 +08:00
|
|
|
char ConstraintCode,
|
2008-08-23 10:25:05 +08:00
|
|
|
std::vector<SDValue> &OutOps);
|
2006-06-09 02:03:49 +08:00
|
|
|
|
2007-09-26 05:52:30 +08:00
|
|
|
void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI);
|
|
|
|
|
2008-07-28 05:46:04 +08:00
|
|
|
inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base,
|
|
|
|
SDValue &Scale, SDValue &Index,
|
|
|
|
SDValue &Disp) {
|
2005-12-13 05:49:40 +08:00
|
|
|
Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ?
|
2006-09-08 14:48:29 +08:00
|
|
|
CurDAG->getTargetFrameIndex(AM.Base.FrameIndex, TLI.getPointerTy()) :
|
|
|
|
AM.Base.Reg;
|
2005-12-17 17:13:43 +08:00
|
|
|
Scale = getI8Imm(AM.Scale);
|
2005-12-13 05:49:40 +08:00
|
|
|
Index = AM.IndexReg;
|
2006-09-08 14:48:29 +08:00
|
|
|
// These are 32-bit even in 64-bit mode since RIP relative offset
|
|
|
|
// is 32-bit.
|
|
|
|
if (AM.GV)
|
|
|
|
Disp = CurDAG->getTargetGlobalAddress(AM.GV, MVT::i32, AM.Disp);
|
|
|
|
else if (AM.CP)
|
2008-08-31 23:37:04 +08:00
|
|
|
Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
|
|
|
|
AM.Align, AM.Disp);
|
2006-09-08 14:48:29 +08:00
|
|
|
else if (AM.ES)
|
2008-09-17 05:48:12 +08:00
|
|
|
Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32);
|
2006-09-08 14:48:29 +08:00
|
|
|
else if (AM.JT != -1)
|
|
|
|
Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32);
|
|
|
|
else
|
2008-11-11 23:52:29 +08:00
|
|
|
Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32);
|
2005-12-13 05:49:40 +08:00
|
|
|
}
|
|
|
|
|
2005-11-19 10:11:08 +08:00
|
|
|
/// getI8Imm - Return a target constant with the specified value, of type
|
|
|
|
/// i8.
|
2008-07-28 05:46:04 +08:00
|
|
|
inline SDValue getI8Imm(unsigned Imm) {
|
2005-11-19 10:11:08 +08:00
|
|
|
return CurDAG->getTargetConstant(Imm, MVT::i8);
|
|
|
|
}
|
|
|
|
|
2005-11-16 09:54:32 +08:00
|
|
|
/// getI16Imm - Return a target constant with the specified value, of type
|
|
|
|
/// i16.
|
2008-07-28 05:46:04 +08:00
|
|
|
inline SDValue getI16Imm(unsigned Imm) {
|
2005-11-16 09:54:32 +08:00
|
|
|
return CurDAG->getTargetConstant(Imm, MVT::i16);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// getI32Imm - Return a target constant with the specified value, of type
|
|
|
|
/// i32.
|
2008-07-28 05:46:04 +08:00
|
|
|
inline SDValue getI32Imm(unsigned Imm) {
|
2005-11-16 09:54:32 +08:00
|
|
|
return CurDAG->getTargetConstant(Imm, MVT::i32);
|
|
|
|
}
|
2006-02-11 06:24:32 +08:00
|
|
|
|
2008-09-24 02:22:58 +08:00
|
|
|
/// getGlobalBaseReg - Return an SDNode that returns the value of
|
|
|
|
/// the global base register. Output instructions required to
|
|
|
|
/// initialize the global base register, if necessary.
|
|
|
|
///
|
2006-08-26 13:34:46 +08:00
|
|
|
SDNode *getGlobalBaseReg();
|
2006-02-18 08:15:05 +08:00
|
|
|
|
2008-08-21 05:27:32 +08:00
|
|
|
/// getTruncateTo8Bit - return an SDNode that implements a subreg based
|
|
|
|
/// truncate of the specified operand to i8. This can be done with tablegen,
|
|
|
|
/// except that this code uses MVT::Flag in a tricky way that happens to
|
|
|
|
/// improve scheduling in some cases.
|
|
|
|
SDNode *getTruncateTo8Bit(SDValue N0);
|
2007-08-11 05:48:46 +08:00
|
|
|
|
2006-02-11 06:46:26 +08:00
|
|
|
#ifndef NDEBUG
|
|
|
|
unsigned Indent;
|
|
|
|
#endif
|
2005-11-16 09:54:32 +08:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2008-08-31 23:37:04 +08:00
|
|
|
/// findFlagUse - Return use of MVT::Flag value produced by the specified
|
|
|
|
/// SDNode.
|
2008-04-25 16:22:20 +08:00
|
|
|
///
|
2006-10-10 09:46:56 +08:00
|
|
|
static SDNode *findFlagUse(SDNode *N) {
|
|
|
|
unsigned FlagResNo = N->getNumValues()-1;
|
|
|
|
for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
|
2008-07-28 04:43:25 +08:00
|
|
|
SDNode *User = *I;
|
2006-10-10 09:46:56 +08:00
|
|
|
for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Op = User->getOperand(i);
|
2008-08-29 05:40:38 +08:00
|
|
|
if (Op.getNode() == N && Op.getResNo() == FlagResNo)
|
2006-10-10 09:46:56 +08:00
|
|
|
return User;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2008-04-25 16:22:20 +08:00
|
|
|
/// findNonImmUse - Return true by reference in "found" if "Use" is an
|
|
|
|
/// non-immediate use of "Def". This function recursively traversing
|
|
|
|
/// up the operand chain ignoring certain nodes.
|
2006-10-14 16:33:25 +08:00
|
|
|
static void findNonImmUse(SDNode *Use, SDNode* Def, SDNode *ImmedUse,
|
2008-09-17 09:39:10 +08:00
|
|
|
SDNode *Root, bool &found,
|
2008-04-25 16:22:20 +08:00
|
|
|
SmallPtrSet<SDNode*, 16> &Visited) {
|
2006-08-08 08:31:00 +08:00
|
|
|
if (found ||
|
2008-10-01 02:30:35 +08:00
|
|
|
Use->getNodeId() < Def->getNodeId() ||
|
2008-04-25 16:22:20 +08:00
|
|
|
!Visited.insert(Use))
|
2006-08-08 08:31:00 +08:00
|
|
|
return;
|
2008-04-25 16:22:20 +08:00
|
|
|
|
2006-10-14 16:33:25 +08:00
|
|
|
for (unsigned i = 0, e = Use->getNumOperands(); !found && i != e; ++i) {
|
2008-08-29 05:40:38 +08:00
|
|
|
SDNode *N = Use->getOperand(i).getNode();
|
2006-10-14 16:33:25 +08:00
|
|
|
if (N == Def) {
|
2008-09-17 09:39:10 +08:00
|
|
|
if (Use == ImmedUse || Use == Root)
|
2008-04-25 16:55:28 +08:00
|
|
|
continue; // We are not looking for immediate use.
|
2008-09-17 09:39:10 +08:00
|
|
|
assert(N != Root);
|
2006-08-08 08:31:00 +08:00
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
2008-04-25 16:22:20 +08:00
|
|
|
|
|
|
|
// Traverse up the operand chain.
|
2008-09-17 09:39:10 +08:00
|
|
|
findNonImmUse(N, Def, ImmedUse, Root, found, Visited);
|
2006-08-08 08:31:00 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-10-14 16:33:25 +08:00
|
|
|
/// isNonImmUse - Start searching from Root up the DAG to check is Def can
|
|
|
|
/// be reached. Return true if that's the case. However, ignore direct uses
|
|
|
|
/// by ImmedUse (which would be U in the example illustrated in
|
2008-11-27 08:49:46 +08:00
|
|
|
/// IsLegalAndProfitableToFold) and by Root (which can happen in the store
|
|
|
|
/// case).
|
2006-10-14 16:33:25 +08:00
|
|
|
/// FIXME: to be really generic, we should allow direct use by any node
|
|
|
|
/// that is being folded. But realisticly since we only fold loads which
|
|
|
|
/// have one non-chain use, we only need to watch out for load/op/store
|
|
|
|
/// and load/op/cmp case where the root (store / cmp) may reach the load via
|
|
|
|
/// its chain operand.
|
2008-09-17 09:39:10 +08:00
|
|
|
static inline bool isNonImmUse(SDNode *Root, SDNode *Def, SDNode *ImmedUse) {
|
2008-04-25 16:22:20 +08:00
|
|
|
SmallPtrSet<SDNode*, 16> Visited;
|
2006-08-08 08:31:00 +08:00
|
|
|
bool found = false;
|
2008-09-17 09:39:10 +08:00
|
|
|
findNonImmUse(Root, Def, ImmedUse, Root, found, Visited);
|
2006-08-08 08:31:00 +08:00
|
|
|
return found;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-11-27 08:49:46 +08:00
|
|
|
bool X86DAGToDAGISel::IsLegalAndProfitableToFold(SDNode *N, SDNode *U,
|
|
|
|
SDNode *Root) const {
|
2008-08-14 03:55:00 +08:00
|
|
|
if (Fast) return false;
|
2006-10-14 16:33:25 +08:00
|
|
|
|
2008-11-27 08:49:46 +08:00
|
|
|
if (U == Root)
|
|
|
|
switch (U->getOpcode()) {
|
|
|
|
default: break;
|
|
|
|
case ISD::ADD:
|
|
|
|
case ISD::ADDC:
|
|
|
|
case ISD::ADDE:
|
|
|
|
case ISD::AND:
|
|
|
|
case ISD::OR:
|
|
|
|
case ISD::XOR: {
|
|
|
|
// If the other operand is a 8-bit immediate we should fold the immediate
|
|
|
|
// instead. This reduces code size.
|
|
|
|
// e.g.
|
|
|
|
// movl 4(%esp), %eax
|
|
|
|
// addl $4, %eax
|
|
|
|
// vs.
|
|
|
|
// movl $4, %eax
|
|
|
|
// addl 4(%esp), %eax
|
|
|
|
// The former is 2 bytes shorter. In case where the increment is 1, then
|
|
|
|
// the saving can be 4 bytes (by using incl %eax).
|
|
|
|
ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(U->getOperand(1));
|
|
|
|
if (Imm) {
|
|
|
|
if (U->getValueType(0) == MVT::i64) {
|
|
|
|
if ((int32_t)Imm->getZExtValue() == (int64_t)Imm->getZExtValue())
|
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
if ((int8_t)Imm->getZExtValue() == (int64_t)Imm->getZExtValue())
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-09-17 09:39:10 +08:00
|
|
|
// If Root use can somehow reach N through a path that that doesn't contain
|
|
|
|
// U then folding N would create a cycle. e.g. In the following
|
|
|
|
// diagram, Root can reach N through X. If N is folded into into Root, then
|
|
|
|
// X is both a predecessor and a successor of U.
|
2006-07-28 00:44:36 +08:00
|
|
|
//
|
2008-09-17 09:39:10 +08:00
|
|
|
// [N*] //
|
|
|
|
// ^ ^ //
|
|
|
|
// / \ //
|
|
|
|
// [U*] [X]? //
|
|
|
|
// ^ ^ //
|
|
|
|
// \ / //
|
|
|
|
// \ / //
|
|
|
|
// [Root*] //
|
2006-10-14 16:33:25 +08:00
|
|
|
//
|
2008-09-17 09:39:10 +08:00
|
|
|
// * indicates nodes to be folded together.
|
2006-10-14 16:33:25 +08:00
|
|
|
//
|
2008-09-17 09:39:10 +08:00
|
|
|
// If Root produces a flag, then it gets (even more) interesting. Since it
|
|
|
|
// will be "glued" together with its flag use in the scheduler, we need to
|
|
|
|
// check if it might reach N.
|
|
|
|
//
|
|
|
|
// [N*] //
|
|
|
|
// ^ ^ //
|
|
|
|
// / \ //
|
|
|
|
// [U*] [X]? //
|
|
|
|
// ^ ^ //
|
|
|
|
// \ \ //
|
|
|
|
// \ | //
|
|
|
|
// [Root*] | //
|
|
|
|
// ^ | //
|
|
|
|
// f | //
|
|
|
|
// | / //
|
|
|
|
// [Y] / //
|
|
|
|
// ^ / //
|
|
|
|
// f / //
|
|
|
|
// | / //
|
|
|
|
// [FU] //
|
|
|
|
//
|
|
|
|
// If FU (flag use) indirectly reaches N (the load), and Root folds N
|
|
|
|
// (call it Fold), then X is a predecessor of FU and a successor of
|
|
|
|
// Fold. But since Fold and FU are flagged together, this will create
|
|
|
|
// a cycle in the scheduling graph.
|
|
|
|
|
2008-06-06 20:08:01 +08:00
|
|
|
MVT VT = Root->getValueType(Root->getNumValues()-1);
|
2008-09-17 09:39:10 +08:00
|
|
|
while (VT == MVT::Flag) {
|
2006-10-14 16:33:25 +08:00
|
|
|
SDNode *FU = findFlagUse(Root);
|
|
|
|
if (FU == NULL)
|
|
|
|
break;
|
2008-09-17 09:39:10 +08:00
|
|
|
Root = FU;
|
2006-10-14 16:33:25 +08:00
|
|
|
VT = Root->getValueType(Root->getNumValues()-1);
|
2006-10-10 09:46:56 +08:00
|
|
|
}
|
2006-10-14 16:33:25 +08:00
|
|
|
|
2008-09-17 09:39:10 +08:00
|
|
|
return !isNonImmUse(Root, N, U);
|
2006-07-28 00:44:36 +08:00
|
|
|
}
|
|
|
|
|
2006-08-29 04:10:17 +08:00
|
|
|
/// MoveBelowTokenFactor - Replace TokenFactor operand with load's chain operand
|
|
|
|
/// and move load below the TokenFactor. Replace store's chain operand with
|
|
|
|
/// load's chain result.
|
2008-08-23 10:25:05 +08:00
|
|
|
static void MoveBelowTokenFactor(SelectionDAG *CurDAG, SDValue Load,
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Store, SDValue TF) {
|
2008-08-26 05:27:18 +08:00
|
|
|
SmallVector<SDValue, 4> Ops;
|
2008-08-29 05:40:38 +08:00
|
|
|
for (unsigned i = 0, e = TF.getNode()->getNumOperands(); i != e; ++i)
|
|
|
|
if (Load.getNode() == TF.getOperand(i).getNode())
|
2008-08-26 05:27:18 +08:00
|
|
|
Ops.push_back(Load.getOperand(0));
|
2006-08-29 04:10:17 +08:00
|
|
|
else
|
2008-08-26 05:27:18 +08:00
|
|
|
Ops.push_back(TF.getOperand(i));
|
2008-08-23 10:25:05 +08:00
|
|
|
CurDAG->UpdateNodeOperands(TF, &Ops[0], Ops.size());
|
|
|
|
CurDAG->UpdateNodeOperands(Load, TF, Load.getOperand(1), Load.getOperand(2));
|
|
|
|
CurDAG->UpdateNodeOperands(Store, Load.getValue(1), Store.getOperand(1),
|
|
|
|
Store.getOperand(2), Store.getOperand(3));
|
2006-08-29 04:10:17 +08:00
|
|
|
}
|
|
|
|
|
2008-05-24 05:23:16 +08:00
|
|
|
/// isRMWLoad - Return true if N is a load that's part of RMW sub-DAG.
|
|
|
|
///
|
2008-07-28 05:46:04 +08:00
|
|
|
static bool isRMWLoad(SDValue N, SDValue Chain, SDValue Address,
|
|
|
|
SDValue &Load) {
|
2008-05-24 05:23:16 +08:00
|
|
|
if (N.getOpcode() == ISD::BIT_CONVERT)
|
|
|
|
N = N.getOperand(0);
|
|
|
|
|
|
|
|
LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
|
|
|
|
if (!LD || LD->isVolatile())
|
|
|
|
return false;
|
|
|
|
if (LD->getAddressingMode() != ISD::UNINDEXED)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
ISD::LoadExtType ExtType = LD->getExtensionType();
|
|
|
|
if (ExtType != ISD::NON_EXTLOAD && ExtType != ISD::EXTLOAD)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (N.hasOneUse() &&
|
|
|
|
N.getOperand(1) == Address &&
|
2008-08-29 05:40:38 +08:00
|
|
|
N.getNode()->isOperandOf(Chain.getNode())) {
|
2008-05-24 05:23:16 +08:00
|
|
|
Load = N;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2008-08-26 05:27:18 +08:00
|
|
|
/// MoveBelowCallSeqStart - Replace CALLSEQ_START operand with load's chain
|
|
|
|
/// operand and move load below the call's chain operand.
|
|
|
|
static void MoveBelowCallSeqStart(SelectionDAG *CurDAG, SDValue Load,
|
|
|
|
SDValue Call, SDValue Chain) {
|
|
|
|
SmallVector<SDValue, 8> Ops;
|
2008-08-29 05:40:38 +08:00
|
|
|
for (unsigned i = 0, e = Chain.getNode()->getNumOperands(); i != e; ++i)
|
|
|
|
if (Load.getNode() == Chain.getOperand(i).getNode())
|
2008-08-26 05:27:18 +08:00
|
|
|
Ops.push_back(Load.getOperand(0));
|
|
|
|
else
|
|
|
|
Ops.push_back(Chain.getOperand(i));
|
|
|
|
CurDAG->UpdateNodeOperands(Chain, &Ops[0], Ops.size());
|
|
|
|
CurDAG->UpdateNodeOperands(Load, Call.getOperand(0),
|
|
|
|
Load.getOperand(1), Load.getOperand(2));
|
|
|
|
Ops.clear();
|
2008-08-29 05:40:38 +08:00
|
|
|
Ops.push_back(SDValue(Load.getNode(), 1));
|
|
|
|
for (unsigned i = 1, e = Call.getNode()->getNumOperands(); i != e; ++i)
|
2008-08-26 05:27:18 +08:00
|
|
|
Ops.push_back(Call.getOperand(i));
|
|
|
|
CurDAG->UpdateNodeOperands(Call, &Ops[0], Ops.size());
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isCalleeLoad - Return true if call address is a load and it can be
|
|
|
|
/// moved below CALLSEQ_START and the chains leading up to the call.
|
|
|
|
/// Return the CALLSEQ_START by reference as a second output.
|
|
|
|
static bool isCalleeLoad(SDValue Callee, SDValue &Chain) {
|
2008-08-29 05:40:38 +08:00
|
|
|
if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
|
2008-08-26 05:27:18 +08:00
|
|
|
return false;
|
2008-08-29 05:40:38 +08:00
|
|
|
LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
|
2008-08-26 05:27:18 +08:00
|
|
|
if (!LD ||
|
|
|
|
LD->isVolatile() ||
|
|
|
|
LD->getAddressingMode() != ISD::UNINDEXED ||
|
|
|
|
LD->getExtensionType() != ISD::NON_EXTLOAD)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Now let's find the callseq_start.
|
|
|
|
while (Chain.getOpcode() != ISD::CALLSEQ_START) {
|
|
|
|
if (!Chain.hasOneUse())
|
|
|
|
return false;
|
|
|
|
Chain = Chain.getOperand(0);
|
|
|
|
}
|
2008-08-29 05:40:38 +08:00
|
|
|
return Chain.getOperand(0).getNode() == Callee.getNode();
|
2008-08-26 05:27:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
Significantly simplify and improve handling of FP function results on x86-32.
This case returns the value in ST(0) and then has to convert it to an SSE
register. This causes significant codegen ugliness in some cases. For
example in the trivial fp-stack-direct-ret.ll testcase we used to generate:
_bar:
subl $28, %esp
call L_foo$stub
fstpl 16(%esp)
movsd 16(%esp), %xmm0
movsd %xmm0, 8(%esp)
fldl 8(%esp)
addl $28, %esp
ret
because we move the result of foo() into an XMM register, then have to
move it back for the return of bar.
Instead of hacking ever-more special cases into the call result lowering code
we take a much simpler approach: on x86-32, fp return is modeled as always
returning into an f80 register which is then truncated to f32 or f64 as needed.
Similarly for a result, we model it as an extension to f80 + return.
This exposes the truncate and extensions to the dag combiner, allowing target
independent code to hack on them, eliminating them in this case. This gives
us this code for the example above:
_bar:
subl $12, %esp
call L_foo$stub
addl $12, %esp
ret
The nasty aspect of this is that these conversions are not legal, but we want
the second pass of dag combiner (post-legalize) to be able to hack on them.
To handle this, we lie to legalize and say they are legal, then custom expand
them on entry to the isel pass (PreprocessForFPConvert). This is gross, but
less gross than the code it is replacing :)
This also allows us to generate better code in several other cases. For
example on fp-stack-ret-conv.ll, we now generate:
_test:
subl $12, %esp
call L_foo$stub
fstps 8(%esp)
movl 16(%esp), %eax
cvtss2sd 8(%esp), %xmm0
movsd %xmm0, (%eax)
addl $12, %esp
ret
where before we produced (incidentally, the old bad code is identical to what
gcc produces):
_test:
subl $12, %esp
call L_foo$stub
fstpl (%esp)
cvtsd2ss (%esp), %xmm0
cvtss2sd %xmm0, %xmm0
movl 16(%esp), %eax
movsd %xmm0, (%eax)
addl $12, %esp
ret
Note that we generate slightly worse code on pr1505b.ll due to a scheduling
deficiency that is unrelated to this patch.
llvm-svn: 46307
2008-01-24 16:07:48 +08:00
|
|
|
/// PreprocessForRMW - Preprocess the DAG to make instruction selection better.
|
|
|
|
/// This is only run if not in -fast mode (aka -O0).
|
|
|
|
/// This allows the instruction selector to pick more read-modify-write
|
|
|
|
/// instructions. This is a common case:
|
2006-08-29 04:10:17 +08:00
|
|
|
///
|
|
|
|
/// [Load chain]
|
|
|
|
/// ^
|
|
|
|
/// |
|
|
|
|
/// [Load]
|
|
|
|
/// ^ ^
|
|
|
|
/// | |
|
|
|
|
/// / \-
|
|
|
|
/// / |
|
|
|
|
/// [TokenFactor] [Op]
|
|
|
|
/// ^ ^
|
|
|
|
/// | |
|
|
|
|
/// \ /
|
|
|
|
/// \ /
|
|
|
|
/// [Store]
|
|
|
|
///
|
|
|
|
/// The fact the store's chain operand != load's chain will prevent the
|
|
|
|
/// (store (op (load))) instruction from being selected. We can transform it to:
|
|
|
|
///
|
|
|
|
/// [Load chain]
|
|
|
|
/// ^
|
|
|
|
/// |
|
|
|
|
/// [TokenFactor]
|
|
|
|
/// ^
|
|
|
|
/// |
|
|
|
|
/// [Load]
|
|
|
|
/// ^ ^
|
|
|
|
/// | |
|
|
|
|
/// | \-
|
|
|
|
/// | |
|
|
|
|
/// | [Op]
|
|
|
|
/// | ^
|
|
|
|
/// | |
|
|
|
|
/// \ /
|
|
|
|
/// \ /
|
|
|
|
/// [Store]
|
2008-08-23 10:25:05 +08:00
|
|
|
void X86DAGToDAGISel::PreprocessForRMW() {
|
|
|
|
for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
|
|
|
|
E = CurDAG->allnodes_end(); I != E; ++I) {
|
2008-08-26 05:27:18 +08:00
|
|
|
if (I->getOpcode() == X86ISD::CALL) {
|
|
|
|
/// Also try moving call address load from outside callseq_start to just
|
|
|
|
/// before the call to allow it to be folded.
|
|
|
|
///
|
|
|
|
/// [Load chain]
|
|
|
|
/// ^
|
|
|
|
/// |
|
|
|
|
/// [Load]
|
|
|
|
/// ^ ^
|
|
|
|
/// | |
|
|
|
|
/// / \--
|
|
|
|
/// / |
|
|
|
|
///[CALLSEQ_START] |
|
|
|
|
/// ^ |
|
|
|
|
/// | |
|
|
|
|
/// [LOAD/C2Reg] |
|
|
|
|
/// | |
|
|
|
|
/// \ /
|
|
|
|
/// \ /
|
|
|
|
/// [CALL]
|
|
|
|
SDValue Chain = I->getOperand(0);
|
|
|
|
SDValue Load = I->getOperand(1);
|
|
|
|
if (!isCalleeLoad(Load, Chain))
|
|
|
|
continue;
|
|
|
|
MoveBelowCallSeqStart(CurDAG, Load, SDValue(I, 0), Chain);
|
|
|
|
++NumLoadMoved;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2006-10-14 05:14:26 +08:00
|
|
|
if (!ISD::isNON_TRUNCStore(I))
|
2006-08-29 04:10:17 +08:00
|
|
|
continue;
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Chain = I->getOperand(0);
|
2008-08-26 05:27:18 +08:00
|
|
|
|
2008-08-29 05:40:38 +08:00
|
|
|
if (Chain.getNode()->getOpcode() != ISD::TokenFactor)
|
2006-08-29 04:10:17 +08:00
|
|
|
continue;
|
|
|
|
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue N1 = I->getOperand(1);
|
|
|
|
SDValue N2 = I->getOperand(2);
|
2008-06-06 20:08:01 +08:00
|
|
|
if ((N1.getValueType().isFloatingPoint() &&
|
|
|
|
!N1.getValueType().isVector()) ||
|
2006-08-30 02:37:37 +08:00
|
|
|
!N1.hasOneUse())
|
2006-08-29 04:10:17 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
bool RModW = false;
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Load;
|
2008-08-29 05:40:38 +08:00
|
|
|
unsigned Opcode = N1.getNode()->getOpcode();
|
2006-08-29 04:10:17 +08:00
|
|
|
switch (Opcode) {
|
2008-08-26 05:27:18 +08:00
|
|
|
case ISD::ADD:
|
|
|
|
case ISD::MUL:
|
|
|
|
case ISD::AND:
|
|
|
|
case ISD::OR:
|
|
|
|
case ISD::XOR:
|
|
|
|
case ISD::ADDC:
|
|
|
|
case ISD::ADDE:
|
|
|
|
case ISD::VECTOR_SHUFFLE: {
|
|
|
|
SDValue N10 = N1.getOperand(0);
|
|
|
|
SDValue N11 = N1.getOperand(1);
|
|
|
|
RModW = isRMWLoad(N10, Chain, N2, Load);
|
|
|
|
if (!RModW)
|
|
|
|
RModW = isRMWLoad(N11, Chain, N2, Load);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case ISD::SUB:
|
|
|
|
case ISD::SHL:
|
|
|
|
case ISD::SRA:
|
|
|
|
case ISD::SRL:
|
|
|
|
case ISD::ROTL:
|
|
|
|
case ISD::ROTR:
|
|
|
|
case ISD::SUBC:
|
|
|
|
case ISD::SUBE:
|
|
|
|
case X86ISD::SHLD:
|
|
|
|
case X86ISD::SHRD: {
|
|
|
|
SDValue N10 = N1.getOperand(0);
|
|
|
|
RModW = isRMWLoad(N10, Chain, N2, Load);
|
|
|
|
break;
|
|
|
|
}
|
2006-08-29 04:10:17 +08:00
|
|
|
}
|
|
|
|
|
2006-08-29 14:44:17 +08:00
|
|
|
if (RModW) {
|
2008-08-23 10:25:05 +08:00
|
|
|
MoveBelowTokenFactor(CurDAG, Load, SDValue(I, 0), Chain);
|
2006-08-29 14:44:17 +08:00
|
|
|
++NumLoadMoved;
|
|
|
|
}
|
2006-08-29 04:10:17 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Significantly simplify and improve handling of FP function results on x86-32.
This case returns the value in ST(0) and then has to convert it to an SSE
register. This causes significant codegen ugliness in some cases. For
example in the trivial fp-stack-direct-ret.ll testcase we used to generate:
_bar:
subl $28, %esp
call L_foo$stub
fstpl 16(%esp)
movsd 16(%esp), %xmm0
movsd %xmm0, 8(%esp)
fldl 8(%esp)
addl $28, %esp
ret
because we move the result of foo() into an XMM register, then have to
move it back for the return of bar.
Instead of hacking ever-more special cases into the call result lowering code
we take a much simpler approach: on x86-32, fp return is modeled as always
returning into an f80 register which is then truncated to f32 or f64 as needed.
Similarly for a result, we model it as an extension to f80 + return.
This exposes the truncate and extensions to the dag combiner, allowing target
independent code to hack on them, eliminating them in this case. This gives
us this code for the example above:
_bar:
subl $12, %esp
call L_foo$stub
addl $12, %esp
ret
The nasty aspect of this is that these conversions are not legal, but we want
the second pass of dag combiner (post-legalize) to be able to hack on them.
To handle this, we lie to legalize and say they are legal, then custom expand
them on entry to the isel pass (PreprocessForFPConvert). This is gross, but
less gross than the code it is replacing :)
This also allows us to generate better code in several other cases. For
example on fp-stack-ret-conv.ll, we now generate:
_test:
subl $12, %esp
call L_foo$stub
fstps 8(%esp)
movl 16(%esp), %eax
cvtss2sd 8(%esp), %xmm0
movsd %xmm0, (%eax)
addl $12, %esp
ret
where before we produced (incidentally, the old bad code is identical to what
gcc produces):
_test:
subl $12, %esp
call L_foo$stub
fstpl (%esp)
cvtsd2ss (%esp), %xmm0
cvtss2sd %xmm0, %xmm0
movl 16(%esp), %eax
movsd %xmm0, (%eax)
addl $12, %esp
ret
Note that we generate slightly worse code on pr1505b.ll due to a scheduling
deficiency that is unrelated to this patch.
llvm-svn: 46307
2008-01-24 16:07:48 +08:00
|
|
|
|
|
|
|
/// PreprocessForFPConvert - Walk over the dag lowering fpround and fpextend
|
|
|
|
/// nodes that target the FP stack to be store and load to the stack. This is a
|
|
|
|
/// gross hack. We would like to simply mark these as being illegal, but when
|
|
|
|
/// we do that, legalize produces these when it expands calls, then expands
|
|
|
|
/// these in the same legalize pass. We would like dag combine to be able to
|
|
|
|
/// hack on these between the call expansion and the node legalization. As such
|
|
|
|
/// this pass basically does "really late" legalization of these inline with the
|
|
|
|
/// X86 isel pass.
|
2008-08-23 10:25:05 +08:00
|
|
|
void X86DAGToDAGISel::PreprocessForFPConvert() {
|
|
|
|
for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
|
|
|
|
E = CurDAG->allnodes_end(); I != E; ) {
|
Significantly simplify and improve handling of FP function results on x86-32.
This case returns the value in ST(0) and then has to convert it to an SSE
register. This causes significant codegen ugliness in some cases. For
example in the trivial fp-stack-direct-ret.ll testcase we used to generate:
_bar:
subl $28, %esp
call L_foo$stub
fstpl 16(%esp)
movsd 16(%esp), %xmm0
movsd %xmm0, 8(%esp)
fldl 8(%esp)
addl $28, %esp
ret
because we move the result of foo() into an XMM register, then have to
move it back for the return of bar.
Instead of hacking ever-more special cases into the call result lowering code
we take a much simpler approach: on x86-32, fp return is modeled as always
returning into an f80 register which is then truncated to f32 or f64 as needed.
Similarly for a result, we model it as an extension to f80 + return.
This exposes the truncate and extensions to the dag combiner, allowing target
independent code to hack on them, eliminating them in this case. This gives
us this code for the example above:
_bar:
subl $12, %esp
call L_foo$stub
addl $12, %esp
ret
The nasty aspect of this is that these conversions are not legal, but we want
the second pass of dag combiner (post-legalize) to be able to hack on them.
To handle this, we lie to legalize and say they are legal, then custom expand
them on entry to the isel pass (PreprocessForFPConvert). This is gross, but
less gross than the code it is replacing :)
This also allows us to generate better code in several other cases. For
example on fp-stack-ret-conv.ll, we now generate:
_test:
subl $12, %esp
call L_foo$stub
fstps 8(%esp)
movl 16(%esp), %eax
cvtss2sd 8(%esp), %xmm0
movsd %xmm0, (%eax)
addl $12, %esp
ret
where before we produced (incidentally, the old bad code is identical to what
gcc produces):
_test:
subl $12, %esp
call L_foo$stub
fstpl (%esp)
cvtsd2ss (%esp), %xmm0
cvtss2sd %xmm0, %xmm0
movl 16(%esp), %eax
movsd %xmm0, (%eax)
addl $12, %esp
ret
Note that we generate slightly worse code on pr1505b.ll due to a scheduling
deficiency that is unrelated to this patch.
llvm-svn: 46307
2008-01-24 16:07:48 +08:00
|
|
|
SDNode *N = I++; // Preincrement iterator to avoid invalidation issues.
|
|
|
|
if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// If the source and destination are SSE registers, then this is a legal
|
|
|
|
// conversion that should not be lowered.
|
2008-06-06 20:08:01 +08:00
|
|
|
MVT SrcVT = N->getOperand(0).getValueType();
|
|
|
|
MVT DstVT = N->getValueType(0);
|
Significantly simplify and improve handling of FP function results on x86-32.
This case returns the value in ST(0) and then has to convert it to an SSE
register. This causes significant codegen ugliness in some cases. For
example in the trivial fp-stack-direct-ret.ll testcase we used to generate:
_bar:
subl $28, %esp
call L_foo$stub
fstpl 16(%esp)
movsd 16(%esp), %xmm0
movsd %xmm0, 8(%esp)
fldl 8(%esp)
addl $28, %esp
ret
because we move the result of foo() into an XMM register, then have to
move it back for the return of bar.
Instead of hacking ever-more special cases into the call result lowering code
we take a much simpler approach: on x86-32, fp return is modeled as always
returning into an f80 register which is then truncated to f32 or f64 as needed.
Similarly for a result, we model it as an extension to f80 + return.
This exposes the truncate and extensions to the dag combiner, allowing target
independent code to hack on them, eliminating them in this case. This gives
us this code for the example above:
_bar:
subl $12, %esp
call L_foo$stub
addl $12, %esp
ret
The nasty aspect of this is that these conversions are not legal, but we want
the second pass of dag combiner (post-legalize) to be able to hack on them.
To handle this, we lie to legalize and say they are legal, then custom expand
them on entry to the isel pass (PreprocessForFPConvert). This is gross, but
less gross than the code it is replacing :)
This also allows us to generate better code in several other cases. For
example on fp-stack-ret-conv.ll, we now generate:
_test:
subl $12, %esp
call L_foo$stub
fstps 8(%esp)
movl 16(%esp), %eax
cvtss2sd 8(%esp), %xmm0
movsd %xmm0, (%eax)
addl $12, %esp
ret
where before we produced (incidentally, the old bad code is identical to what
gcc produces):
_test:
subl $12, %esp
call L_foo$stub
fstpl (%esp)
cvtsd2ss (%esp), %xmm0
cvtss2sd %xmm0, %xmm0
movl 16(%esp), %eax
movsd %xmm0, (%eax)
addl $12, %esp
ret
Note that we generate slightly worse code on pr1505b.ll due to a scheduling
deficiency that is unrelated to this patch.
llvm-svn: 46307
2008-01-24 16:07:48 +08:00
|
|
|
bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT);
|
|
|
|
bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT);
|
|
|
|
if (SrcIsSSE && DstIsSSE)
|
|
|
|
continue;
|
|
|
|
|
2008-03-09 15:05:32 +08:00
|
|
|
if (!SrcIsSSE && !DstIsSSE) {
|
|
|
|
// If this is an FPStack extension, it is a noop.
|
|
|
|
if (N->getOpcode() == ISD::FP_EXTEND)
|
|
|
|
continue;
|
|
|
|
// If this is a value-preserving FPStack truncation, it is a noop.
|
|
|
|
if (N->getConstantOperandVal(1))
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
Significantly simplify and improve handling of FP function results on x86-32.
This case returns the value in ST(0) and then has to convert it to an SSE
register. This causes significant codegen ugliness in some cases. For
example in the trivial fp-stack-direct-ret.ll testcase we used to generate:
_bar:
subl $28, %esp
call L_foo$stub
fstpl 16(%esp)
movsd 16(%esp), %xmm0
movsd %xmm0, 8(%esp)
fldl 8(%esp)
addl $28, %esp
ret
because we move the result of foo() into an XMM register, then have to
move it back for the return of bar.
Instead of hacking ever-more special cases into the call result lowering code
we take a much simpler approach: on x86-32, fp return is modeled as always
returning into an f80 register which is then truncated to f32 or f64 as needed.
Similarly for a result, we model it as an extension to f80 + return.
This exposes the truncate and extensions to the dag combiner, allowing target
independent code to hack on them, eliminating them in this case. This gives
us this code for the example above:
_bar:
subl $12, %esp
call L_foo$stub
addl $12, %esp
ret
The nasty aspect of this is that these conversions are not legal, but we want
the second pass of dag combiner (post-legalize) to be able to hack on them.
To handle this, we lie to legalize and say they are legal, then custom expand
them on entry to the isel pass (PreprocessForFPConvert). This is gross, but
less gross than the code it is replacing :)
This also allows us to generate better code in several other cases. For
example on fp-stack-ret-conv.ll, we now generate:
_test:
subl $12, %esp
call L_foo$stub
fstps 8(%esp)
movl 16(%esp), %eax
cvtss2sd 8(%esp), %xmm0
movsd %xmm0, (%eax)
addl $12, %esp
ret
where before we produced (incidentally, the old bad code is identical to what
gcc produces):
_test:
subl $12, %esp
call L_foo$stub
fstpl (%esp)
cvtsd2ss (%esp), %xmm0
cvtss2sd %xmm0, %xmm0
movl 16(%esp), %eax
movsd %xmm0, (%eax)
addl $12, %esp
ret
Note that we generate slightly worse code on pr1505b.ll due to a scheduling
deficiency that is unrelated to this patch.
llvm-svn: 46307
2008-01-24 16:07:48 +08:00
|
|
|
// Here we could have an FP stack truncation or an FPStack <-> SSE convert.
|
|
|
|
// FPStack has extload and truncstore. SSE can fold direct loads into other
|
|
|
|
// operations. Based on this, decide what we want to do.
|
2008-06-06 20:08:01 +08:00
|
|
|
MVT MemVT;
|
Significantly simplify and improve handling of FP function results on x86-32.
This case returns the value in ST(0) and then has to convert it to an SSE
register. This causes significant codegen ugliness in some cases. For
example in the trivial fp-stack-direct-ret.ll testcase we used to generate:
_bar:
subl $28, %esp
call L_foo$stub
fstpl 16(%esp)
movsd 16(%esp), %xmm0
movsd %xmm0, 8(%esp)
fldl 8(%esp)
addl $28, %esp
ret
because we move the result of foo() into an XMM register, then have to
move it back for the return of bar.
Instead of hacking ever-more special cases into the call result lowering code
we take a much simpler approach: on x86-32, fp return is modeled as always
returning into an f80 register which is then truncated to f32 or f64 as needed.
Similarly for a result, we model it as an extension to f80 + return.
This exposes the truncate and extensions to the dag combiner, allowing target
independent code to hack on them, eliminating them in this case. This gives
us this code for the example above:
_bar:
subl $12, %esp
call L_foo$stub
addl $12, %esp
ret
The nasty aspect of this is that these conversions are not legal, but we want
the second pass of dag combiner (post-legalize) to be able to hack on them.
To handle this, we lie to legalize and say they are legal, then custom expand
them on entry to the isel pass (PreprocessForFPConvert). This is gross, but
less gross than the code it is replacing :)
This also allows us to generate better code in several other cases. For
example on fp-stack-ret-conv.ll, we now generate:
_test:
subl $12, %esp
call L_foo$stub
fstps 8(%esp)
movl 16(%esp), %eax
cvtss2sd 8(%esp), %xmm0
movsd %xmm0, (%eax)
addl $12, %esp
ret
where before we produced (incidentally, the old bad code is identical to what
gcc produces):
_test:
subl $12, %esp
call L_foo$stub
fstpl (%esp)
cvtsd2ss (%esp), %xmm0
cvtss2sd %xmm0, %xmm0
movl 16(%esp), %eax
movsd %xmm0, (%eax)
addl $12, %esp
ret
Note that we generate slightly worse code on pr1505b.ll due to a scheduling
deficiency that is unrelated to this patch.
llvm-svn: 46307
2008-01-24 16:07:48 +08:00
|
|
|
if (N->getOpcode() == ISD::FP_ROUND)
|
|
|
|
MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
|
|
|
|
else
|
|
|
|
MemVT = SrcIsSSE ? SrcVT : DstVT;
|
|
|
|
|
2008-08-23 10:25:05 +08:00
|
|
|
SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
|
Significantly simplify and improve handling of FP function results on x86-32.
This case returns the value in ST(0) and then has to convert it to an SSE
register. This causes significant codegen ugliness in some cases. For
example in the trivial fp-stack-direct-ret.ll testcase we used to generate:
_bar:
subl $28, %esp
call L_foo$stub
fstpl 16(%esp)
movsd 16(%esp), %xmm0
movsd %xmm0, 8(%esp)
fldl 8(%esp)
addl $28, %esp
ret
because we move the result of foo() into an XMM register, then have to
move it back for the return of bar.
Instead of hacking ever-more special cases into the call result lowering code
we take a much simpler approach: on x86-32, fp return is modeled as always
returning into an f80 register which is then truncated to f32 or f64 as needed.
Similarly for a result, we model it as an extension to f80 + return.
This exposes the truncate and extensions to the dag combiner, allowing target
independent code to hack on them, eliminating them in this case. This gives
us this code for the example above:
_bar:
subl $12, %esp
call L_foo$stub
addl $12, %esp
ret
The nasty aspect of this is that these conversions are not legal, but we want
the second pass of dag combiner (post-legalize) to be able to hack on them.
To handle this, we lie to legalize and say they are legal, then custom expand
them on entry to the isel pass (PreprocessForFPConvert). This is gross, but
less gross than the code it is replacing :)
This also allows us to generate better code in several other cases. For
example on fp-stack-ret-conv.ll, we now generate:
_test:
subl $12, %esp
call L_foo$stub
fstps 8(%esp)
movl 16(%esp), %eax
cvtss2sd 8(%esp), %xmm0
movsd %xmm0, (%eax)
addl $12, %esp
ret
where before we produced (incidentally, the old bad code is identical to what
gcc produces):
_test:
subl $12, %esp
call L_foo$stub
fstpl (%esp)
cvtsd2ss (%esp), %xmm0
cvtss2sd %xmm0, %xmm0
movl 16(%esp), %eax
movsd %xmm0, (%eax)
addl $12, %esp
ret
Note that we generate slightly worse code on pr1505b.ll due to a scheduling
deficiency that is unrelated to this patch.
llvm-svn: 46307
2008-01-24 16:07:48 +08:00
|
|
|
|
|
|
|
// FIXME: optimize the case where the src/dest is a load or store?
|
2008-08-23 10:25:05 +08:00
|
|
|
SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(),
|
|
|
|
N->getOperand(0),
|
|
|
|
MemTmp, NULL, 0, MemVT);
|
|
|
|
SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, DstVT, Store, MemTmp,
|
|
|
|
NULL, 0, MemVT);
|
Significantly simplify and improve handling of FP function results on x86-32.
This case returns the value in ST(0) and then has to convert it to an SSE
register. This causes significant codegen ugliness in some cases. For
example in the trivial fp-stack-direct-ret.ll testcase we used to generate:
_bar:
subl $28, %esp
call L_foo$stub
fstpl 16(%esp)
movsd 16(%esp), %xmm0
movsd %xmm0, 8(%esp)
fldl 8(%esp)
addl $28, %esp
ret
because we move the result of foo() into an XMM register, then have to
move it back for the return of bar.
Instead of hacking ever-more special cases into the call result lowering code
we take a much simpler approach: on x86-32, fp return is modeled as always
returning into an f80 register which is then truncated to f32 or f64 as needed.
Similarly for a result, we model it as an extension to f80 + return.
This exposes the truncate and extensions to the dag combiner, allowing target
independent code to hack on them, eliminating them in this case. This gives
us this code for the example above:
_bar:
subl $12, %esp
call L_foo$stub
addl $12, %esp
ret
The nasty aspect of this is that these conversions are not legal, but we want
the second pass of dag combiner (post-legalize) to be able to hack on them.
To handle this, we lie to legalize and say they are legal, then custom expand
them on entry to the isel pass (PreprocessForFPConvert). This is gross, but
less gross than the code it is replacing :)
This also allows us to generate better code in several other cases. For
example on fp-stack-ret-conv.ll, we now generate:
_test:
subl $12, %esp
call L_foo$stub
fstps 8(%esp)
movl 16(%esp), %eax
cvtss2sd 8(%esp), %xmm0
movsd %xmm0, (%eax)
addl $12, %esp
ret
where before we produced (incidentally, the old bad code is identical to what
gcc produces):
_test:
subl $12, %esp
call L_foo$stub
fstpl (%esp)
cvtsd2ss (%esp), %xmm0
cvtss2sd %xmm0, %xmm0
movl 16(%esp), %eax
movsd %xmm0, (%eax)
addl $12, %esp
ret
Note that we generate slightly worse code on pr1505b.ll due to a scheduling
deficiency that is unrelated to this patch.
llvm-svn: 46307
2008-01-24 16:07:48 +08:00
|
|
|
|
|
|
|
// We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
|
|
|
|
// extload we created. This will cause general havok on the dag because
|
|
|
|
// anything below the conversion could be folded into other existing nodes.
|
|
|
|
// To avoid invalidating 'I', back it up to the convert node.
|
|
|
|
--I;
|
2008-08-23 10:25:05 +08:00
|
|
|
CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
|
Significantly simplify and improve handling of FP function results on x86-32.
This case returns the value in ST(0) and then has to convert it to an SSE
register. This causes significant codegen ugliness in some cases. For
example in the trivial fp-stack-direct-ret.ll testcase we used to generate:
_bar:
subl $28, %esp
call L_foo$stub
fstpl 16(%esp)
movsd 16(%esp), %xmm0
movsd %xmm0, 8(%esp)
fldl 8(%esp)
addl $28, %esp
ret
because we move the result of foo() into an XMM register, then have to
move it back for the return of bar.
Instead of hacking ever-more special cases into the call result lowering code
we take a much simpler approach: on x86-32, fp return is modeled as always
returning into an f80 register which is then truncated to f32 or f64 as needed.
Similarly for a result, we model it as an extension to f80 + return.
This exposes the truncate and extensions to the dag combiner, allowing target
independent code to hack on them, eliminating them in this case. This gives
us this code for the example above:
_bar:
subl $12, %esp
call L_foo$stub
addl $12, %esp
ret
The nasty aspect of this is that these conversions are not legal, but we want
the second pass of dag combiner (post-legalize) to be able to hack on them.
To handle this, we lie to legalize and say they are legal, then custom expand
them on entry to the isel pass (PreprocessForFPConvert). This is gross, but
less gross than the code it is replacing :)
This also allows us to generate better code in several other cases. For
example on fp-stack-ret-conv.ll, we now generate:
_test:
subl $12, %esp
call L_foo$stub
fstps 8(%esp)
movl 16(%esp), %eax
cvtss2sd 8(%esp), %xmm0
movsd %xmm0, (%eax)
addl $12, %esp
ret
where before we produced (incidentally, the old bad code is identical to what
gcc produces):
_test:
subl $12, %esp
call L_foo$stub
fstpl (%esp)
cvtsd2ss (%esp), %xmm0
cvtss2sd %xmm0, %xmm0
movl 16(%esp), %eax
movsd %xmm0, (%eax)
addl $12, %esp
ret
Note that we generate slightly worse code on pr1505b.ll due to a scheduling
deficiency that is unrelated to this patch.
llvm-svn: 46307
2008-01-24 16:07:48 +08:00
|
|
|
|
|
|
|
// Now that we did that, the node is dead. Increment the iterator to the
|
|
|
|
// next node to process, then delete N.
|
|
|
|
++I;
|
2008-08-23 10:25:05 +08:00
|
|
|
CurDAG->DeleteNode(N);
|
Significantly simplify and improve handling of FP function results on x86-32.
This case returns the value in ST(0) and then has to convert it to an SSE
register. This causes significant codegen ugliness in some cases. For
example in the trivial fp-stack-direct-ret.ll testcase we used to generate:
_bar:
subl $28, %esp
call L_foo$stub
fstpl 16(%esp)
movsd 16(%esp), %xmm0
movsd %xmm0, 8(%esp)
fldl 8(%esp)
addl $28, %esp
ret
because we move the result of foo() into an XMM register, then have to
move it back for the return of bar.
Instead of hacking ever-more special cases into the call result lowering code
we take a much simpler approach: on x86-32, fp return is modeled as always
returning into an f80 register which is then truncated to f32 or f64 as needed.
Similarly for a result, we model it as an extension to f80 + return.
This exposes the truncate and extensions to the dag combiner, allowing target
independent code to hack on them, eliminating them in this case. This gives
us this code for the example above:
_bar:
subl $12, %esp
call L_foo$stub
addl $12, %esp
ret
The nasty aspect of this is that these conversions are not legal, but we want
the second pass of dag combiner (post-legalize) to be able to hack on them.
To handle this, we lie to legalize and say they are legal, then custom expand
them on entry to the isel pass (PreprocessForFPConvert). This is gross, but
less gross than the code it is replacing :)
This also allows us to generate better code in several other cases. For
example on fp-stack-ret-conv.ll, we now generate:
_test:
subl $12, %esp
call L_foo$stub
fstps 8(%esp)
movl 16(%esp), %eax
cvtss2sd 8(%esp), %xmm0
movsd %xmm0, (%eax)
addl $12, %esp
ret
where before we produced (incidentally, the old bad code is identical to what
gcc produces):
_test:
subl $12, %esp
call L_foo$stub
fstpl (%esp)
cvtsd2ss (%esp), %xmm0
cvtss2sd %xmm0, %xmm0
movl 16(%esp), %eax
movsd %xmm0, (%eax)
addl $12, %esp
ret
Note that we generate slightly worse code on pr1505b.ll due to a scheduling
deficiency that is unrelated to this patch.
llvm-svn: 46307
2008-01-24 16:07:48 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-11-16 09:54:32 +08:00
|
|
|
/// InstructionSelectBasicBlock - This callback is invoked by SelectionDAGISel
|
|
|
|
/// when it has created a SelectionDAG for us to codegen.
|
2008-08-23 10:25:05 +08:00
|
|
|
void X86DAGToDAGISel::InstructionSelect() {
|
2008-07-01 04:45:06 +08:00
|
|
|
CurBB = BB; // BB can change as result of isel.
|
2008-10-07 02:03:39 +08:00
|
|
|
const Function *F = CurDAG->getMachineFunction().getFunction();
|
|
|
|
OptForSize = F->hasFnAttr(Attribute::OptimizeForSize);
|
2005-11-16 09:54:32 +08:00
|
|
|
|
2008-07-01 04:45:06 +08:00
|
|
|
DEBUG(BB->dump());
|
2008-08-14 03:55:00 +08:00
|
|
|
if (!Fast)
|
2008-08-23 10:25:05 +08:00
|
|
|
PreprocessForRMW();
|
Significantly simplify and improve handling of FP function results on x86-32.
This case returns the value in ST(0) and then has to convert it to an SSE
register. This causes significant codegen ugliness in some cases. For
example in the trivial fp-stack-direct-ret.ll testcase we used to generate:
_bar:
subl $28, %esp
call L_foo$stub
fstpl 16(%esp)
movsd 16(%esp), %xmm0
movsd %xmm0, 8(%esp)
fldl 8(%esp)
addl $28, %esp
ret
because we move the result of foo() into an XMM register, then have to
move it back for the return of bar.
Instead of hacking ever-more special cases into the call result lowering code
we take a much simpler approach: on x86-32, fp return is modeled as always
returning into an f80 register which is then truncated to f32 or f64 as needed.
Similarly for a result, we model it as an extension to f80 + return.
This exposes the truncate and extensions to the dag combiner, allowing target
independent code to hack on them, eliminating them in this case. This gives
us this code for the example above:
_bar:
subl $12, %esp
call L_foo$stub
addl $12, %esp
ret
The nasty aspect of this is that these conversions are not legal, but we want
the second pass of dag combiner (post-legalize) to be able to hack on them.
To handle this, we lie to legalize and say they are legal, then custom expand
them on entry to the isel pass (PreprocessForFPConvert). This is gross, but
less gross than the code it is replacing :)
This also allows us to generate better code in several other cases. For
example on fp-stack-ret-conv.ll, we now generate:
_test:
subl $12, %esp
call L_foo$stub
fstps 8(%esp)
movl 16(%esp), %eax
cvtss2sd 8(%esp), %xmm0
movsd %xmm0, (%eax)
addl $12, %esp
ret
where before we produced (incidentally, the old bad code is identical to what
gcc produces):
_test:
subl $12, %esp
call L_foo$stub
fstpl (%esp)
cvtsd2ss (%esp), %xmm0
cvtss2sd %xmm0, %xmm0
movl 16(%esp), %eax
movsd %xmm0, (%eax)
addl $12, %esp
ret
Note that we generate slightly worse code on pr1505b.ll due to a scheduling
deficiency that is unrelated to this patch.
llvm-svn: 46307
2008-01-24 16:07:48 +08:00
|
|
|
|
|
|
|
// FIXME: This should only happen when not -fast.
|
2008-08-23 10:25:05 +08:00
|
|
|
PreprocessForFPConvert();
|
2006-08-29 04:10:17 +08:00
|
|
|
|
2005-11-16 09:54:32 +08:00
|
|
|
// Codegen the basic block.
|
2006-02-11 06:24:32 +08:00
|
|
|
#ifndef NDEBUG
|
2006-11-17 15:52:03 +08:00
|
|
|
DOUT << "===== Instruction selection begins:\n";
|
2006-02-11 06:46:26 +08:00
|
|
|
Indent = 0;
|
2006-02-11 06:24:32 +08:00
|
|
|
#endif
|
2008-10-28 05:56:29 +08:00
|
|
|
SelectRoot(*CurDAG);
|
2006-02-11 06:24:32 +08:00
|
|
|
#ifndef NDEBUG
|
2006-11-17 15:52:03 +08:00
|
|
|
DOUT << "===== Instruction selection ends:\n";
|
2006-02-11 06:24:32 +08:00
|
|
|
#endif
|
2006-07-28 08:10:59 +08:00
|
|
|
|
2008-08-23 10:25:05 +08:00
|
|
|
CurDAG->RemoveDeadNodes();
|
2008-07-01 04:45:06 +08:00
|
|
|
}
|
2005-11-16 09:54:32 +08:00
|
|
|
|
2007-09-26 05:52:30 +08:00
|
|
|
/// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
|
|
|
|
/// the main function.
|
|
|
|
void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
|
|
|
|
MachineFrameInfo *MFI) {
|
|
|
|
const TargetInstrInfo *TII = TM.getInstrInfo();
|
|
|
|
if (Subtarget->isTargetCygMing())
|
|
|
|
BuildMI(BB, TII->get(X86::CALLpcrel32)).addExternalSymbol("__main");
|
|
|
|
}
|
|
|
|
|
|
|
|
void X86DAGToDAGISel::EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) {
|
|
|
|
// If this is main, emit special code for main.
|
|
|
|
MachineBasicBlock *BB = MF.begin();
|
|
|
|
if (Fn.hasExternalLinkage() && Fn.getName() == "main")
|
|
|
|
EmitSpecialCodeForMain(BB, MF.getFrameInfo());
|
|
|
|
}
|
|
|
|
|
2005-11-19 10:11:08 +08:00
|
|
|
/// MatchAddress - Add the specified node to the specified addressing mode,
|
|
|
|
/// returning true if it cannot be done. This just pattern matches for the
|
2007-12-08 15:22:58 +08:00
|
|
|
/// addressing mode.
|
2008-07-28 05:46:04 +08:00
|
|
|
bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM,
|
2007-03-29 02:36:33 +08:00
|
|
|
bool isRoot, unsigned Depth) {
|
Teach DAGCombine to fold constant offsets into GlobalAddress nodes,
and add a TargetLowering hook for it to use to determine when this
is legal (i.e. not in PIC mode, etc.)
This allows instruction selection to emit folded constant offsets
in more cases, such as the included testcase, eliminating the need
for explicit arithmetic instructions.
This eliminates the need for the C++ code in X86ISelDAGToDAG.cpp
that attempted to achieve the same effect, but wasn't as effective.
Also, fix handling of offsets in GlobalAddressSDNodes in several
places, including changing GlobalAddressSDNode's offset from
int to int64_t.
The Mips, Alpha, Sparc, and CellSPU targets appear to be
unaware of GlobalAddress offsets currently, so set the hook to
false on those targets.
llvm-svn: 57748
2008-10-18 10:06:02 +08:00
|
|
|
bool is64Bit = Subtarget->is64Bit();
|
2008-09-24 08:05:32 +08:00
|
|
|
DOUT << "MatchAddress: "; DEBUG(AM.dump());
|
2007-08-14 04:03:06 +08:00
|
|
|
// Limit recursion.
|
|
|
|
if (Depth > 5)
|
|
|
|
return MatchAddressBase(N, AM, isRoot, Depth);
|
2007-03-29 02:36:33 +08:00
|
|
|
|
2006-09-08 14:48:29 +08:00
|
|
|
// RIP relative addressing: %rip + 32-bit displacement!
|
|
|
|
if (AM.isRIPRel) {
|
|
|
|
if (!AM.ES && AM.JT != -1 && N.getOpcode() == ISD::Constant) {
|
2008-11-11 23:52:29 +08:00
|
|
|
uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
|
Teach DAGCombine to fold constant offsets into GlobalAddress nodes,
and add a TargetLowering hook for it to use to determine when this
is legal (i.e. not in PIC mode, etc.)
This allows instruction selection to emit folded constant offsets
in more cases, such as the included testcase, eliminating the need
for explicit arithmetic instructions.
This eliminates the need for the C++ code in X86ISelDAGToDAG.cpp
that attempted to achieve the same effect, but wasn't as effective.
Also, fix handling of offsets in GlobalAddressSDNodes in several
places, including changing GlobalAddressSDNode's offset from
int to int64_t.
The Mips, Alpha, Sparc, and CellSPU targets appear to be
unaware of GlobalAddress offsets currently, so set the hook to
false on those targets.
llvm-svn: 57748
2008-10-18 10:06:02 +08:00
|
|
|
if (!is64Bit || isInt32(AM.Disp + Val)) {
|
2006-09-08 14:48:29 +08:00
|
|
|
AM.Disp += Val;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2005-11-19 10:11:08 +08:00
|
|
|
switch (N.getOpcode()) {
|
|
|
|
default: break;
|
2006-09-08 14:48:29 +08:00
|
|
|
case ISD::Constant: {
|
2008-11-11 23:52:29 +08:00
|
|
|
uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
|
Teach DAGCombine to fold constant offsets into GlobalAddress nodes,
and add a TargetLowering hook for it to use to determine when this
is legal (i.e. not in PIC mode, etc.)
This allows instruction selection to emit folded constant offsets
in more cases, such as the included testcase, eliminating the need
for explicit arithmetic instructions.
This eliminates the need for the C++ code in X86ISelDAGToDAG.cpp
that attempted to achieve the same effect, but wasn't as effective.
Also, fix handling of offsets in GlobalAddressSDNodes in several
places, including changing GlobalAddressSDNode's offset from
int to int64_t.
The Mips, Alpha, Sparc, and CellSPU targets appear to be
unaware of GlobalAddress offsets currently, so set the hook to
false on those targets.
llvm-svn: 57748
2008-10-18 10:06:02 +08:00
|
|
|
if (!is64Bit || isInt32(AM.Disp + Val)) {
|
2006-09-08 14:48:29 +08:00
|
|
|
AM.Disp += Val;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2005-12-08 10:01:35 +08:00
|
|
|
|
2006-12-05 12:01:03 +08:00
|
|
|
case X86ISD::Wrapper: {
|
Teach DAGCombine to fold constant offsets into GlobalAddress nodes,
and add a TargetLowering hook for it to use to determine when this
is legal (i.e. not in PIC mode, etc.)
This allows instruction selection to emit folded constant offsets
in more cases, such as the included testcase, eliminating the need
for explicit arithmetic instructions.
This eliminates the need for the C++ code in X86ISelDAGToDAG.cpp
that attempted to achieve the same effect, but wasn't as effective.
Also, fix handling of offsets in GlobalAddressSDNodes in several
places, including changing GlobalAddressSDNode's offset from
int to int64_t.
The Mips, Alpha, Sparc, and CellSPU targets appear to be
unaware of GlobalAddress offsets currently, so set the hook to
false on those targets.
llvm-svn: 57748
2008-10-18 10:06:02 +08:00
|
|
|
DOUT << "Wrapper: 64bit " << is64Bit;
|
|
|
|
DOUT << " AM "; DEBUG(AM.dump()); DOUT << "\n";
|
2006-12-01 05:55:46 +08:00
|
|
|
// Under X86-64 non-small code model, GV (and friends) are 64-bits.
|
Fix a x86-64 codegen deficiency. Allow gv + offset when using rip addressing mode.
Before:
_main:
subq $8, %rsp
leaq _X(%rip), %rax
movsd 8(%rax), %xmm1
movss _X(%rip), %xmm0
call _t
xorl %ecx, %ecx
movl %ecx, %eax
addq $8, %rsp
ret
Now:
_main:
subq $8, %rsp
movsd _X+8(%rip), %xmm1
movss _X(%rip), %xmm0
call _t
xorl %ecx, %ecx
movl %ecx, %eax
addq $8, %rsp
ret
Notice there is another idiotic codegen issue that needs to be fixed asap:
xorl %ecx, %ecx
movl %ecx, %eax
llvm-svn: 46850
2008-02-07 16:53:49 +08:00
|
|
|
// Also, base and index reg must be 0 in order to use rip as base.
|
|
|
|
if (is64Bit && (TM.getCodeModel() != CodeModel::Small ||
|
2008-08-29 05:40:38 +08:00
|
|
|
AM.Base.Reg.getNode() || AM.IndexReg.getNode()))
|
2006-12-01 05:55:46 +08:00
|
|
|
break;
|
2006-12-06 03:50:18 +08:00
|
|
|
if (AM.GV != 0 || AM.CP != 0 || AM.ES != 0 || AM.JT != -1)
|
|
|
|
break;
|
2006-09-08 14:48:29 +08:00
|
|
|
// If value is available in a register both base and index components have
|
|
|
|
// been picked, we can't fit the result available in the register in the
|
|
|
|
// addressing mode. Duplicate GlobalAddress or ConstantPool as displacement.
|
Eliminate the ISel priority queue, which used the topological order for a
priority function. Instead, just iterate over the AllNodes list, which is
already in topological order. This eliminates a fair amount of bookkeeping,
and speeds up the isel phase by about 15% on many testcases.
The impact on most targets is that AddToISelQueue calls can be simply removed.
In the x86 target, there are two additional notable changes.
The rule-bending AND+SHIFT optimization in MatchAddress that creates new
pre-isel nodes during isel is now a little more verbose, but more robust.
Instead of either creating an invalid DAG or creating an invalid topological
sort, as it has historically done, it can now just insert the new nodes into
the node list at a position where they will be consistent with the topological
ordering.
Also, the address-matching code has logic that checked to see if a node was
"already selected". However, when a node is selected, it has all its uses
taken away via ReplaceAllUsesWith or equivalent, so it won't recieve any
further visits from MatchAddress. This code is now removed.
llvm-svn: 58748
2008-11-05 12:14:16 +08:00
|
|
|
{
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue N0 = N.getOperand(0);
|
2006-12-06 03:50:18 +08:00
|
|
|
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
|
2008-11-11 23:52:29 +08:00
|
|
|
uint64_t Offset = G->getOffset();
|
|
|
|
if (!is64Bit || isInt32(AM.Disp + Offset)) {
|
Teach DAGCombine to fold constant offsets into GlobalAddress nodes,
and add a TargetLowering hook for it to use to determine when this
is legal (i.e. not in PIC mode, etc.)
This allows instruction selection to emit folded constant offsets
in more cases, such as the included testcase, eliminating the need
for explicit arithmetic instructions.
This eliminates the need for the C++ code in X86ISelDAGToDAG.cpp
that attempted to achieve the same effect, but wasn't as effective.
Also, fix handling of offsets in GlobalAddressSDNodes in several
places, including changing GlobalAddressSDNode's offset from
int to int64_t.
The Mips, Alpha, Sparc, and CellSPU targets appear to be
unaware of GlobalAddress offsets currently, so set the hook to
false on those targets.
llvm-svn: 57748
2008-10-18 10:06:02 +08:00
|
|
|
GlobalValue *GV = G->getGlobal();
|
|
|
|
AM.GV = GV;
|
2008-11-11 23:52:29 +08:00
|
|
|
AM.Disp += Offset;
|
Teach DAGCombine to fold constant offsets into GlobalAddress nodes,
and add a TargetLowering hook for it to use to determine when this
is legal (i.e. not in PIC mode, etc.)
This allows instruction selection to emit folded constant offsets
in more cases, such as the included testcase, eliminating the need
for explicit arithmetic instructions.
This eliminates the need for the C++ code in X86ISelDAGToDAG.cpp
that attempted to achieve the same effect, but wasn't as effective.
Also, fix handling of offsets in GlobalAddressSDNodes in several
places, including changing GlobalAddressSDNode's offset from
int to int64_t.
The Mips, Alpha, Sparc, and CellSPU targets appear to be
unaware of GlobalAddress offsets currently, so set the hook to
false on those targets.
llvm-svn: 57748
2008-10-18 10:06:02 +08:00
|
|
|
AM.isRIPRel = TM.symbolicAddressesAreRIPRel();
|
|
|
|
return false;
|
|
|
|
}
|
2006-12-06 03:50:18 +08:00
|
|
|
} else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
|
2008-11-11 23:52:29 +08:00
|
|
|
uint64_t Offset = CP->getOffset();
|
|
|
|
if (!is64Bit || isInt32(AM.Disp + Offset)) {
|
Teach DAGCombine to fold constant offsets into GlobalAddress nodes,
and add a TargetLowering hook for it to use to determine when this
is legal (i.e. not in PIC mode, etc.)
This allows instruction selection to emit folded constant offsets
in more cases, such as the included testcase, eliminating the need
for explicit arithmetic instructions.
This eliminates the need for the C++ code in X86ISelDAGToDAG.cpp
that attempted to achieve the same effect, but wasn't as effective.
Also, fix handling of offsets in GlobalAddressSDNodes in several
places, including changing GlobalAddressSDNode's offset from
int to int64_t.
The Mips, Alpha, Sparc, and CellSPU targets appear to be
unaware of GlobalAddress offsets currently, so set the hook to
false on those targets.
llvm-svn: 57748
2008-10-18 10:06:02 +08:00
|
|
|
AM.CP = CP->getConstVal();
|
|
|
|
AM.Align = CP->getAlignment();
|
2008-11-11 23:52:29 +08:00
|
|
|
AM.Disp += Offset;
|
Teach DAGCombine to fold constant offsets into GlobalAddress nodes,
and add a TargetLowering hook for it to use to determine when this
is legal (i.e. not in PIC mode, etc.)
This allows instruction selection to emit folded constant offsets
in more cases, such as the included testcase, eliminating the need
for explicit arithmetic instructions.
This eliminates the need for the C++ code in X86ISelDAGToDAG.cpp
that attempted to achieve the same effect, but wasn't as effective.
Also, fix handling of offsets in GlobalAddressSDNodes in several
places, including changing GlobalAddressSDNode's offset from
int to int64_t.
The Mips, Alpha, Sparc, and CellSPU targets appear to be
unaware of GlobalAddress offsets currently, so set the hook to
false on those targets.
llvm-svn: 57748
2008-10-18 10:06:02 +08:00
|
|
|
AM.isRIPRel = TM.symbolicAddressesAreRIPRel();
|
|
|
|
return false;
|
|
|
|
}
|
2008-09-17 05:48:12 +08:00
|
|
|
} else if (ExternalSymbolSDNode *S =dyn_cast<ExternalSymbolSDNode>(N0)) {
|
Fix a x86-64 codegen deficiency. Allow gv + offset when using rip addressing mode.
Before:
_main:
subq $8, %rsp
leaq _X(%rip), %rax
movsd 8(%rax), %xmm1
movss _X(%rip), %xmm0
call _t
xorl %ecx, %ecx
movl %ecx, %eax
addq $8, %rsp
ret
Now:
_main:
subq $8, %rsp
movsd _X+8(%rip), %xmm1
movss _X(%rip), %xmm0
call _t
xorl %ecx, %ecx
movl %ecx, %eax
addq $8, %rsp
ret
Notice there is another idiotic codegen issue that needs to be fixed asap:
xorl %ecx, %ecx
movl %ecx, %eax
llvm-svn: 46850
2008-02-07 16:53:49 +08:00
|
|
|
AM.ES = S->getSymbol();
|
2008-09-27 03:15:30 +08:00
|
|
|
AM.isRIPRel = TM.symbolicAddressesAreRIPRel();
|
Fix a x86-64 codegen deficiency. Allow gv + offset when using rip addressing mode.
Before:
_main:
subq $8, %rsp
leaq _X(%rip), %rax
movsd 8(%rax), %xmm1
movss _X(%rip), %xmm0
call _t
xorl %ecx, %ecx
movl %ecx, %eax
addq $8, %rsp
ret
Now:
_main:
subq $8, %rsp
movsd _X+8(%rip), %xmm1
movss _X(%rip), %xmm0
call _t
xorl %ecx, %ecx
movl %ecx, %eax
addq $8, %rsp
ret
Notice there is another idiotic codegen issue that needs to be fixed asap:
xorl %ecx, %ecx
movl %ecx, %eax
llvm-svn: 46850
2008-02-07 16:53:49 +08:00
|
|
|
return false;
|
2006-12-06 03:50:18 +08:00
|
|
|
} else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
|
Fix a x86-64 codegen deficiency. Allow gv + offset when using rip addressing mode.
Before:
_main:
subq $8, %rsp
leaq _X(%rip), %rax
movsd 8(%rax), %xmm1
movss _X(%rip), %xmm0
call _t
xorl %ecx, %ecx
movl %ecx, %eax
addq $8, %rsp
ret
Now:
_main:
subq $8, %rsp
movsd _X+8(%rip), %xmm1
movss _X(%rip), %xmm0
call _t
xorl %ecx, %ecx
movl %ecx, %eax
addq $8, %rsp
ret
Notice there is another idiotic codegen issue that needs to be fixed asap:
xorl %ecx, %ecx
movl %ecx, %eax
llvm-svn: 46850
2008-02-07 16:53:49 +08:00
|
|
|
AM.JT = J->getIndex();
|
2008-09-27 03:15:30 +08:00
|
|
|
AM.isRIPRel = TM.symbolicAddressesAreRIPRel();
|
Fix a x86-64 codegen deficiency. Allow gv + offset when using rip addressing mode.
Before:
_main:
subq $8, %rsp
leaq _X(%rip), %rax
movsd 8(%rax), %xmm1
movss _X(%rip), %xmm0
call _t
xorl %ecx, %ecx
movl %ecx, %eax
addq $8, %rsp
ret
Now:
_main:
subq $8, %rsp
movsd _X+8(%rip), %xmm1
movss _X(%rip), %xmm0
call _t
xorl %ecx, %ecx
movl %ecx, %eax
addq $8, %rsp
ret
Notice there is another idiotic codegen issue that needs to be fixed asap:
xorl %ecx, %ecx
movl %ecx, %eax
llvm-svn: 46850
2008-02-07 16:53:49 +08:00
|
|
|
return false;
|
2005-12-08 10:01:35 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2006-12-01 05:55:46 +08:00
|
|
|
}
|
2005-12-08 10:01:35 +08:00
|
|
|
|
2006-02-25 18:09:08 +08:00
|
|
|
case ISD::FrameIndex:
|
2008-08-31 23:37:04 +08:00
|
|
|
if (AM.BaseType == X86ISelAddressMode::RegBase
|
|
|
|
&& AM.Base.Reg.getNode() == 0) {
|
2006-02-25 18:09:08 +08:00
|
|
|
AM.BaseType = X86ISelAddressMode::FrameIndexBase;
|
|
|
|
AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
|
2005-12-17 17:13:43 +08:00
|
|
|
return false;
|
2005-11-19 10:11:08 +08:00
|
|
|
}
|
|
|
|
break;
|
2005-12-08 10:01:35 +08:00
|
|
|
|
2005-11-19 10:11:08 +08:00
|
|
|
case ISD::SHL:
|
Eliminate the ISel priority queue, which used the topological order for a
priority function. Instead, just iterate over the AllNodes list, which is
already in topological order. This eliminates a fair amount of bookkeeping,
and speeds up the isel phase by about 15% on many testcases.
The impact on most targets is that AddToISelQueue calls can be simply removed.
In the x86 target, there are two additional notable changes.
The rule-bending AND+SHIFT optimization in MatchAddress that creates new
pre-isel nodes during isel is now a little more verbose, but more robust.
Instead of either creating an invalid DAG or creating an invalid topological
sort, as it has historically done, it can now just insert the new nodes into
the node list at a position where they will be consistent with the topological
ordering.
Also, the address-matching code has logic that checked to see if a node was
"already selected". However, when a node is selected, it has all its uses
taken away via ReplaceAllUsesWith or equivalent, so it won't recieve any
further visits from MatchAddress. This code is now removed.
llvm-svn: 58748
2008-11-05 12:14:16 +08:00
|
|
|
if (AM.IndexReg.getNode() != 0 || AM.Scale != 1 || AM.isRIPRel)
|
2007-12-08 15:22:58 +08:00
|
|
|
break;
|
|
|
|
|
2008-08-31 23:37:04 +08:00
|
|
|
if (ConstantSDNode
|
|
|
|
*CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) {
|
2008-09-13 00:56:44 +08:00
|
|
|
unsigned Val = CN->getZExtValue();
|
2007-12-08 15:22:58 +08:00
|
|
|
if (Val == 1 || Val == 2 || Val == 3) {
|
|
|
|
AM.Scale = 1 << Val;
|
2008-08-29 05:40:38 +08:00
|
|
|
SDValue ShVal = N.getNode()->getOperand(0);
|
2007-12-08 15:22:58 +08:00
|
|
|
|
|
|
|
// Okay, we know that we have a scale by now. However, if the scaled
|
|
|
|
// value is an add of something and a constant, we can fold the
|
|
|
|
// constant into the disp field here.
|
2008-08-29 05:40:38 +08:00
|
|
|
if (ShVal.getNode()->getOpcode() == ISD::ADD && ShVal.hasOneUse() &&
|
|
|
|
isa<ConstantSDNode>(ShVal.getNode()->getOperand(1))) {
|
|
|
|
AM.IndexReg = ShVal.getNode()->getOperand(0);
|
2007-12-08 15:22:58 +08:00
|
|
|
ConstantSDNode *AddVal =
|
2008-08-29 05:40:38 +08:00
|
|
|
cast<ConstantSDNode>(ShVal.getNode()->getOperand(1));
|
2008-09-13 00:56:44 +08:00
|
|
|
uint64_t Disp = AM.Disp + (AddVal->getZExtValue() << Val);
|
Teach DAGCombine to fold constant offsets into GlobalAddress nodes,
and add a TargetLowering hook for it to use to determine when this
is legal (i.e. not in PIC mode, etc.)
This allows instruction selection to emit folded constant offsets
in more cases, such as the included testcase, eliminating the need
for explicit arithmetic instructions.
This eliminates the need for the C++ code in X86ISelDAGToDAG.cpp
that attempted to achieve the same effect, but wasn't as effective.
Also, fix handling of offsets in GlobalAddressSDNodes in several
places, including changing GlobalAddressSDNode's offset from
int to int64_t.
The Mips, Alpha, Sparc, and CellSPU targets appear to be
unaware of GlobalAddress offsets currently, so set the hook to
false on those targets.
llvm-svn: 57748
2008-10-18 10:06:02 +08:00
|
|
|
if (!is64Bit || isInt32(Disp))
|
2007-12-08 15:22:58 +08:00
|
|
|
AM.Disp = Disp;
|
|
|
|
else
|
2005-11-19 10:11:08 +08:00
|
|
|
AM.IndexReg = ShVal;
|
2007-12-08 15:22:58 +08:00
|
|
|
} else {
|
|
|
|
AM.IndexReg = ShVal;
|
2005-11-19 10:11:08 +08:00
|
|
|
}
|
2007-12-08 15:22:58 +08:00
|
|
|
return false;
|
2005-11-19 10:11:08 +08:00
|
|
|
}
|
|
|
|
break;
|
2007-12-08 15:22:58 +08:00
|
|
|
}
|
2005-12-08 10:01:35 +08:00
|
|
|
|
2007-10-23 04:22:24 +08:00
|
|
|
case ISD::SMUL_LOHI:
|
|
|
|
case ISD::UMUL_LOHI:
|
|
|
|
// A mul_lohi where we need the low part can be folded as a plain multiply.
|
2008-08-27 06:36:50 +08:00
|
|
|
if (N.getResNo() != 0) break;
|
2007-10-23 04:22:24 +08:00
|
|
|
// FALL THROUGH
|
2005-11-19 10:11:08 +08:00
|
|
|
case ISD::MUL:
|
|
|
|
// X*[3,5,9] -> X+X*[2,4,8]
|
Eliminate the ISel priority queue, which used the topological order for a
priority function. Instead, just iterate over the AllNodes list, which is
already in topological order. This eliminates a fair amount of bookkeeping,
and speeds up the isel phase by about 15% on many testcases.
The impact on most targets is that AddToISelQueue calls can be simply removed.
In the x86 target, there are two additional notable changes.
The rule-bending AND+SHIFT optimization in MatchAddress that creates new
pre-isel nodes during isel is now a little more verbose, but more robust.
Instead of either creating an invalid DAG or creating an invalid topological
sort, as it has historically done, it can now just insert the new nodes into
the node list at a position where they will be consistent with the topological
ordering.
Also, the address-matching code has logic that checked to see if a node was
"already selected". However, when a node is selected, it has all its uses
taken away via ReplaceAllUsesWith or equivalent, so it won't recieve any
further visits from MatchAddress. This code is now removed.
llvm-svn: 58748
2008-11-05 12:14:16 +08:00
|
|
|
if (AM.BaseType == X86ISelAddressMode::RegBase &&
|
2008-08-29 05:40:38 +08:00
|
|
|
AM.Base.Reg.getNode() == 0 &&
|
|
|
|
AM.IndexReg.getNode() == 0 &&
|
Fix a x86-64 codegen deficiency. Allow gv + offset when using rip addressing mode.
Before:
_main:
subq $8, %rsp
leaq _X(%rip), %rax
movsd 8(%rax), %xmm1
movss _X(%rip), %xmm0
call _t
xorl %ecx, %ecx
movl %ecx, %eax
addq $8, %rsp
ret
Now:
_main:
subq $8, %rsp
movsd _X+8(%rip), %xmm1
movss _X(%rip), %xmm0
call _t
xorl %ecx, %ecx
movl %ecx, %eax
addq $8, %rsp
ret
Notice there is another idiotic codegen issue that needs to be fixed asap:
xorl %ecx, %ecx
movl %ecx, %eax
llvm-svn: 46850
2008-02-07 16:53:49 +08:00
|
|
|
!AM.isRIPRel) {
|
2008-08-31 23:37:04 +08:00
|
|
|
if (ConstantSDNode
|
|
|
|
*CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1)))
|
2008-09-13 00:56:44 +08:00
|
|
|
if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
|
|
|
|
CN->getZExtValue() == 9) {
|
|
|
|
AM.Scale = unsigned(CN->getZExtValue())-1;
|
2005-11-19 10:11:08 +08:00
|
|
|
|
2008-08-29 05:40:38 +08:00
|
|
|
SDValue MulVal = N.getNode()->getOperand(0);
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Reg;
|
2005-11-19 10:11:08 +08:00
|
|
|
|
|
|
|
// Okay, we know that we have a scale by now. However, if the scaled
|
|
|
|
// value is an add of something and a constant, we can fold the
|
|
|
|
// constant into the disp field here.
|
2008-08-29 05:40:38 +08:00
|
|
|
if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
|
|
|
|
isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) {
|
|
|
|
Reg = MulVal.getNode()->getOperand(0);
|
2005-11-19 10:11:08 +08:00
|
|
|
ConstantSDNode *AddVal =
|
2008-08-29 05:40:38 +08:00
|
|
|
cast<ConstantSDNode>(MulVal.getNode()->getOperand(1));
|
2008-09-13 00:56:44 +08:00
|
|
|
uint64_t Disp = AM.Disp + AddVal->getZExtValue() *
|
|
|
|
CN->getZExtValue();
|
Teach DAGCombine to fold constant offsets into GlobalAddress nodes,
and add a TargetLowering hook for it to use to determine when this
is legal (i.e. not in PIC mode, etc.)
This allows instruction selection to emit folded constant offsets
in more cases, such as the included testcase, eliminating the need
for explicit arithmetic instructions.
This eliminates the need for the C++ code in X86ISelDAGToDAG.cpp
that attempted to achieve the same effect, but wasn't as effective.
Also, fix handling of offsets in GlobalAddressSDNodes in several
places, including changing GlobalAddressSDNode's offset from
int to int64_t.
The Mips, Alpha, Sparc, and CellSPU targets appear to be
unaware of GlobalAddress offsets currently, so set the hook to
false on those targets.
llvm-svn: 57748
2008-10-18 10:06:02 +08:00
|
|
|
if (!is64Bit || isInt32(Disp))
|
2006-09-08 14:48:29 +08:00
|
|
|
AM.Disp = Disp;
|
|
|
|
else
|
2008-08-29 05:40:38 +08:00
|
|
|
Reg = N.getNode()->getOperand(0);
|
2005-11-19 10:11:08 +08:00
|
|
|
} else {
|
2008-08-29 05:40:38 +08:00
|
|
|
Reg = N.getNode()->getOperand(0);
|
2005-11-19 10:11:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
AM.IndexReg = AM.Base.Reg = Reg;
|
|
|
|
return false;
|
|
|
|
}
|
2007-02-05 04:18:17 +08:00
|
|
|
}
|
2005-11-19 10:11:08 +08:00
|
|
|
break;
|
|
|
|
|
2007-02-05 04:18:17 +08:00
|
|
|
case ISD::ADD:
|
Eliminate the ISel priority queue, which used the topological order for a
priority function. Instead, just iterate over the AllNodes list, which is
already in topological order. This eliminates a fair amount of bookkeeping,
and speeds up the isel phase by about 15% on many testcases.
The impact on most targets is that AddToISelQueue calls can be simply removed.
In the x86 target, there are two additional notable changes.
The rule-bending AND+SHIFT optimization in MatchAddress that creates new
pre-isel nodes during isel is now a little more verbose, but more robust.
Instead of either creating an invalid DAG or creating an invalid topological
sort, as it has historically done, it can now just insert the new nodes into
the node list at a position where they will be consistent with the topological
ordering.
Also, the address-matching code has logic that checked to see if a node was
"already selected". However, when a node is selected, it has all its uses
taken away via ReplaceAllUsesWith or equivalent, so it won't recieve any
further visits from MatchAddress. This code is now removed.
llvm-svn: 58748
2008-11-05 12:14:16 +08:00
|
|
|
{
|
2006-02-11 10:05:36 +08:00
|
|
|
X86ISelAddressMode Backup = AM;
|
2008-08-29 05:40:38 +08:00
|
|
|
if (!MatchAddress(N.getNode()->getOperand(0), AM, false, Depth+1) &&
|
|
|
|
!MatchAddress(N.getNode()->getOperand(1), AM, false, Depth+1))
|
2006-02-11 10:05:36 +08:00
|
|
|
return false;
|
|
|
|
AM = Backup;
|
2008-08-29 05:40:38 +08:00
|
|
|
if (!MatchAddress(N.getNode()->getOperand(1), AM, false, Depth+1) &&
|
|
|
|
!MatchAddress(N.getNode()->getOperand(0), AM, false, Depth+1))
|
2006-02-11 10:05:36 +08:00
|
|
|
return false;
|
|
|
|
AM = Backup;
|
|
|
|
}
|
2005-11-19 10:11:08 +08:00
|
|
|
break;
|
2006-05-30 14:59:36 +08:00
|
|
|
|
2007-02-05 04:18:17 +08:00
|
|
|
case ISD::OR:
|
|
|
|
// Handle "X | C" as "X + C" iff X is known to have C bits clear.
|
2007-12-08 15:22:58 +08:00
|
|
|
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
|
|
|
|
X86ISelAddressMode Backup = AM;
|
2008-11-11 23:52:29 +08:00
|
|
|
uint64_t Offset = CN->getSExtValue();
|
2007-12-08 15:22:58 +08:00
|
|
|
// Start with the LHS as an addr mode.
|
|
|
|
if (!MatchAddress(N.getOperand(0), AM, false) &&
|
|
|
|
// Address could not have picked a GV address for the displacement.
|
|
|
|
AM.GV == NULL &&
|
|
|
|
// On x86-64, the resultant disp must fit in 32-bits.
|
2008-11-11 23:52:29 +08:00
|
|
|
(!is64Bit || isInt32(AM.Disp + Offset)) &&
|
2007-12-08 15:22:58 +08:00
|
|
|
// Check to see if the LHS & C is zero.
|
2008-02-26 05:11:39 +08:00
|
|
|
CurDAG->MaskedValueIsZero(N.getOperand(0), CN->getAPIntValue())) {
|
2008-11-11 23:52:29 +08:00
|
|
|
AM.Disp += Offset;
|
2007-12-08 15:22:58 +08:00
|
|
|
return false;
|
2006-05-30 14:59:36 +08:00
|
|
|
}
|
2007-12-08 15:22:58 +08:00
|
|
|
AM = Backup;
|
2006-05-30 14:59:36 +08:00
|
|
|
}
|
|
|
|
break;
|
2007-12-13 08:43:27 +08:00
|
|
|
|
|
|
|
case ISD::AND: {
|
|
|
|
// Handle "(x << C1) & C2" as "(X & (C2>>C1)) << C1" if safe and if this
|
|
|
|
// allows us to fold the shift into this addressing mode.
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Shift = N.getOperand(0);
|
2007-12-13 08:43:27 +08:00
|
|
|
if (Shift.getOpcode() != ISD::SHL) break;
|
Eliminate the ISel priority queue, which used the topological order for a
priority function. Instead, just iterate over the AllNodes list, which is
already in topological order. This eliminates a fair amount of bookkeeping,
and speeds up the isel phase by about 15% on many testcases.
The impact on most targets is that AddToISelQueue calls can be simply removed.
In the x86 target, there are two additional notable changes.
The rule-bending AND+SHIFT optimization in MatchAddress that creates new
pre-isel nodes during isel is now a little more verbose, but more robust.
Instead of either creating an invalid DAG or creating an invalid topological
sort, as it has historically done, it can now just insert the new nodes into
the node list at a position where they will be consistent with the topological
ordering.
Also, the address-matching code has logic that checked to see if a node was
"already selected". However, when a node is selected, it has all its uses
taken away via ReplaceAllUsesWith or equivalent, so it won't recieve any
further visits from MatchAddress. This code is now removed.
llvm-svn: 58748
2008-11-05 12:14:16 +08:00
|
|
|
|
2007-12-13 08:43:27 +08:00
|
|
|
// Scale must not be used already.
|
2008-08-29 05:40:38 +08:00
|
|
|
if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
|
Fix a x86-64 codegen deficiency. Allow gv + offset when using rip addressing mode.
Before:
_main:
subq $8, %rsp
leaq _X(%rip), %rax
movsd 8(%rax), %xmm1
movss _X(%rip), %xmm0
call _t
xorl %ecx, %ecx
movl %ecx, %eax
addq $8, %rsp
ret
Now:
_main:
subq $8, %rsp
movsd _X+8(%rip), %xmm1
movss _X(%rip), %xmm0
call _t
xorl %ecx, %ecx
movl %ecx, %eax
addq $8, %rsp
ret
Notice there is another idiotic codegen issue that needs to be fixed asap:
xorl %ecx, %ecx
movl %ecx, %eax
llvm-svn: 46850
2008-02-07 16:53:49 +08:00
|
|
|
|
|
|
|
// Not when RIP is used as the base.
|
|
|
|
if (AM.isRIPRel) break;
|
2007-12-13 08:43:27 +08:00
|
|
|
|
|
|
|
ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N.getOperand(1));
|
|
|
|
ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
|
|
|
|
if (!C1 || !C2) break;
|
|
|
|
|
|
|
|
// Not likely to be profitable if either the AND or SHIFT node has more
|
|
|
|
// than one use (unless all uses are for address computation). Besides,
|
|
|
|
// isel mechanism requires their node ids to be reused.
|
|
|
|
if (!N.hasOneUse() || !Shift.hasOneUse())
|
|
|
|
break;
|
|
|
|
|
|
|
|
// Verify that the shift amount is something we can fold.
|
2008-09-13 00:56:44 +08:00
|
|
|
unsigned ShiftCst = C1->getZExtValue();
|
2007-12-13 08:43:27 +08:00
|
|
|
if (ShiftCst != 1 && ShiftCst != 2 && ShiftCst != 3)
|
|
|
|
break;
|
|
|
|
|
|
|
|
// Get the new AND mask, this folds to a constant.
|
Eliminate the ISel priority queue, which used the topological order for a
priority function. Instead, just iterate over the AllNodes list, which is
already in topological order. This eliminates a fair amount of bookkeeping,
and speeds up the isel phase by about 15% on many testcases.
The impact on most targets is that AddToISelQueue calls can be simply removed.
In the x86 target, there are two additional notable changes.
The rule-bending AND+SHIFT optimization in MatchAddress that creates new
pre-isel nodes during isel is now a little more verbose, but more robust.
Instead of either creating an invalid DAG or creating an invalid topological
sort, as it has historically done, it can now just insert the new nodes into
the node list at a position where they will be consistent with the topological
ordering.
Also, the address-matching code has logic that checked to see if a node was
"already selected". However, when a node is selected, it has all its uses
taken away via ReplaceAllUsesWith or equivalent, so it won't recieve any
further visits from MatchAddress. This code is now removed.
llvm-svn: 58748
2008-11-05 12:14:16 +08:00
|
|
|
SDValue X = Shift.getOperand(0);
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue NewANDMask = CurDAG->getNode(ISD::SRL, N.getValueType(),
|
2008-10-15 01:15:39 +08:00
|
|
|
SDValue(C2, 0), SDValue(C1, 0));
|
Eliminate the ISel priority queue, which used the topological order for a
priority function. Instead, just iterate over the AllNodes list, which is
already in topological order. This eliminates a fair amount of bookkeeping,
and speeds up the isel phase by about 15% on many testcases.
The impact on most targets is that AddToISelQueue calls can be simply removed.
In the x86 target, there are two additional notable changes.
The rule-bending AND+SHIFT optimization in MatchAddress that creates new
pre-isel nodes during isel is now a little more verbose, but more robust.
Instead of either creating an invalid DAG or creating an invalid topological
sort, as it has historically done, it can now just insert the new nodes into
the node list at a position where they will be consistent with the topological
ordering.
Also, the address-matching code has logic that checked to see if a node was
"already selected". However, when a node is selected, it has all its uses
taken away via ReplaceAllUsesWith or equivalent, so it won't recieve any
further visits from MatchAddress. This code is now removed.
llvm-svn: 58748
2008-11-05 12:14:16 +08:00
|
|
|
SDValue NewAND = CurDAG->getNode(ISD::AND, N.getValueType(), X, NewANDMask);
|
2008-10-14 04:52:04 +08:00
|
|
|
SDValue NewSHIFT = CurDAG->getNode(ISD::SHL, N.getValueType(),
|
|
|
|
NewAND, SDValue(C1, 0));
|
Eliminate the ISel priority queue, which used the topological order for a
priority function. Instead, just iterate over the AllNodes list, which is
already in topological order. This eliminates a fair amount of bookkeeping,
and speeds up the isel phase by about 15% on many testcases.
The impact on most targets is that AddToISelQueue calls can be simply removed.
In the x86 target, there are two additional notable changes.
The rule-bending AND+SHIFT optimization in MatchAddress that creates new
pre-isel nodes during isel is now a little more verbose, but more robust.
Instead of either creating an invalid DAG or creating an invalid topological
sort, as it has historically done, it can now just insert the new nodes into
the node list at a position where they will be consistent with the topological
ordering.
Also, the address-matching code has logic that checked to see if a node was
"already selected". However, when a node is selected, it has all its uses
taken away via ReplaceAllUsesWith or equivalent, so it won't recieve any
further visits from MatchAddress. This code is now removed.
llvm-svn: 58748
2008-11-05 12:14:16 +08:00
|
|
|
|
|
|
|
// Insert the new nodes into the topological ordering.
|
|
|
|
if (C1->getNodeId() > X.getNode()->getNodeId()) {
|
|
|
|
CurDAG->RepositionNode(X.getNode(), C1);
|
|
|
|
C1->setNodeId(X.getNode()->getNodeId());
|
|
|
|
}
|
|
|
|
if (NewANDMask.getNode()->getNodeId() == -1 ||
|
|
|
|
NewANDMask.getNode()->getNodeId() > X.getNode()->getNodeId()) {
|
|
|
|
CurDAG->RepositionNode(X.getNode(), NewANDMask.getNode());
|
|
|
|
NewANDMask.getNode()->setNodeId(X.getNode()->getNodeId());
|
|
|
|
}
|
|
|
|
if (NewAND.getNode()->getNodeId() == -1 ||
|
|
|
|
NewAND.getNode()->getNodeId() > Shift.getNode()->getNodeId()) {
|
|
|
|
CurDAG->RepositionNode(Shift.getNode(), NewAND.getNode());
|
|
|
|
NewAND.getNode()->setNodeId(Shift.getNode()->getNodeId());
|
|
|
|
}
|
|
|
|
if (NewSHIFT.getNode()->getNodeId() == -1 ||
|
|
|
|
NewSHIFT.getNode()->getNodeId() > N.getNode()->getNodeId()) {
|
|
|
|
CurDAG->RepositionNode(N.getNode(), NewSHIFT.getNode());
|
|
|
|
NewSHIFT.getNode()->setNodeId(N.getNode()->getNodeId());
|
|
|
|
}
|
|
|
|
|
2008-10-14 04:52:04 +08:00
|
|
|
CurDAG->ReplaceAllUsesWith(N, NewSHIFT);
|
2007-12-13 08:43:27 +08:00
|
|
|
|
|
|
|
AM.Scale = 1 << ShiftCst;
|
|
|
|
AM.IndexReg = NewAND;
|
|
|
|
return false;
|
|
|
|
}
|
2006-05-30 14:59:36 +08:00
|
|
|
}
|
2005-11-19 10:11:08 +08:00
|
|
|
|
2007-08-14 04:03:06 +08:00
|
|
|
return MatchAddressBase(N, AM, isRoot, Depth);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// MatchAddressBase - Helper for MatchAddress. Add the specified node to the
|
|
|
|
/// specified addressing mode without any further recursion.
|
2008-07-28 05:46:04 +08:00
|
|
|
bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM,
|
2007-08-14 04:03:06 +08:00
|
|
|
bool isRoot, unsigned Depth) {
|
2005-11-19 10:11:08 +08:00
|
|
|
// Is the base register already occupied?
|
2008-08-29 05:40:38 +08:00
|
|
|
if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.getNode()) {
|
2005-11-19 10:11:08 +08:00
|
|
|
// If so, check to see if the scale index register is set.
|
2008-08-29 05:40:38 +08:00
|
|
|
if (AM.IndexReg.getNode() == 0 && !AM.isRIPRel) {
|
2005-11-19 10:11:08 +08:00
|
|
|
AM.IndexReg = N;
|
|
|
|
AM.Scale = 1;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we cannot select it.
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Default, generate it as a register.
|
|
|
|
AM.BaseType = X86ISelAddressMode::RegBase;
|
|
|
|
AM.Base.Reg = N;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2005-12-08 10:01:35 +08:00
|
|
|
/// SelectAddr - returns true if it is able pattern match an addressing mode.
|
|
|
|
/// It returns the operands which make up the maximal addressing mode it can
|
|
|
|
/// match by reference.
|
2008-07-28 05:46:04 +08:00
|
|
|
bool X86DAGToDAGISel::SelectAddr(SDValue Op, SDValue N, SDValue &Base,
|
|
|
|
SDValue &Scale, SDValue &Index,
|
|
|
|
SDValue &Disp) {
|
2005-12-08 10:01:35 +08:00
|
|
|
X86ISelAddressMode AM;
|
2006-01-11 14:09:51 +08:00
|
|
|
if (MatchAddress(N, AM))
|
|
|
|
return false;
|
|
|
|
|
2008-06-06 20:08:01 +08:00
|
|
|
MVT VT = N.getValueType();
|
2006-01-11 14:09:51 +08:00
|
|
|
if (AM.BaseType == X86ISelAddressMode::RegBase) {
|
2008-08-29 05:40:38 +08:00
|
|
|
if (!AM.Base.Reg.getNode())
|
2006-09-08 14:48:29 +08:00
|
|
|
AM.Base.Reg = CurDAG->getRegister(0, VT);
|
2005-12-08 10:01:35 +08:00
|
|
|
}
|
2006-01-11 14:09:51 +08:00
|
|
|
|
2008-08-29 05:40:38 +08:00
|
|
|
if (!AM.IndexReg.getNode())
|
2006-09-08 14:48:29 +08:00
|
|
|
AM.IndexReg = CurDAG->getRegister(0, VT);
|
2006-01-11 14:09:51 +08:00
|
|
|
|
|
|
|
getAddressOperands(AM, Base, Scale, Index, Disp);
|
|
|
|
return true;
|
2005-12-08 10:01:35 +08:00
|
|
|
}
|
|
|
|
|
2006-10-08 05:55:32 +08:00
|
|
|
/// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to
|
|
|
|
/// match a load whose top elements are either undef or zeros. The load flavor
|
|
|
|
/// is derived from the type of N, which is either v4f32 or v2f64.
|
2008-07-28 05:46:04 +08:00
|
|
|
bool X86DAGToDAGISel::SelectScalarSSELoad(SDValue Op, SDValue Pred,
|
|
|
|
SDValue N, SDValue &Base,
|
|
|
|
SDValue &Scale, SDValue &Index,
|
|
|
|
SDValue &Disp, SDValue &InChain,
|
|
|
|
SDValue &OutChain) {
|
2006-10-08 05:55:32 +08:00
|
|
|
if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) {
|
Fold "zero extending vector loads" now that evan added the chain manip stuff.
This compiles both tests in X86/vec_ss_load_fold.ll into:
_test1:
movss 4(%esp), %xmm0
subss LCPI1_0, %xmm0
mulss LCPI1_1, %xmm0
minss LCPI1_2, %xmm0
xorps %xmm1, %xmm1
maxss %xmm1, %xmm0
cvttss2si %xmm0, %eax
andl $65535, %eax
ret
instead of:
_test1:
movss LCPI1_0, %xmm0
movss 4(%esp), %xmm1
subss %xmm0, %xmm1
movss LCPI1_1, %xmm0
mulss %xmm0, %xmm1
movss LCPI1_2, %xmm0
minss %xmm0, %xmm1
xorps %xmm0, %xmm0
maxss %xmm0, %xmm1
cvttss2si %xmm1, %eax
andl $65535, %eax
ret
llvm-svn: 30894
2006-10-12 06:09:58 +08:00
|
|
|
InChain = N.getOperand(0).getValue(1);
|
2008-08-29 05:40:38 +08:00
|
|
|
if (ISD::isNON_EXTLoad(InChain.getNode()) &&
|
2006-10-16 14:34:55 +08:00
|
|
|
InChain.getValue(0).hasOneUse() &&
|
2006-11-11 05:23:04 +08:00
|
|
|
N.hasOneUse() &&
|
2008-11-27 08:49:46 +08:00
|
|
|
IsLegalAndProfitableToFold(N.getNode(), Pred.getNode(), Op.getNode())) {
|
2006-10-12 05:06:01 +08:00
|
|
|
LoadSDNode *LD = cast<LoadSDNode>(InChain);
|
2006-11-09 04:34:28 +08:00
|
|
|
if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp))
|
2006-10-08 05:55:32 +08:00
|
|
|
return false;
|
2006-10-12 05:06:01 +08:00
|
|
|
OutChain = LD->getChain();
|
2006-10-08 05:55:32 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
Fold "zero extending vector loads" now that evan added the chain manip stuff.
This compiles both tests in X86/vec_ss_load_fold.ll into:
_test1:
movss 4(%esp), %xmm0
subss LCPI1_0, %xmm0
mulss LCPI1_1, %xmm0
minss LCPI1_2, %xmm0
xorps %xmm1, %xmm1
maxss %xmm1, %xmm0
cvttss2si %xmm0, %eax
andl $65535, %eax
ret
instead of:
_test1:
movss LCPI1_0, %xmm0
movss 4(%esp), %xmm1
subss %xmm0, %xmm1
movss LCPI1_1, %xmm0
mulss %xmm0, %xmm1
movss LCPI1_2, %xmm0
minss %xmm0, %xmm1
xorps %xmm0, %xmm0
maxss %xmm0, %xmm1
cvttss2si %xmm1, %eax
andl $65535, %eax
ret
llvm-svn: 30894
2006-10-12 06:09:58 +08:00
|
|
|
|
|
|
|
// Also handle the case where we explicitly require zeros in the top
|
2006-10-08 05:55:32 +08:00
|
|
|
// elements. This is a vector shuffle from the zero vector.
|
2008-08-29 05:40:38 +08:00
|
|
|
if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() &&
|
Fix a long standing deficiency in the X86 backend: we would
sometimes emit "zero" and "all one" vectors multiple times,
for example:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
pcmpeqd %mm0, %mm0
movq %mm0, _M2
ret
instead of:
_test2:
pcmpeqd %mm0, %mm0
movq %mm0, _M1
movq %mm0, _M2
ret
This patch fixes this by always arranging for zero/one vectors
to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be
any random type. This ensures they get trivially CSE'd on the dag.
This fix is also important for LegalizeDAGTypes, as it gets unhappy
when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when
'i64' isn't legal.
This patch makes the following changes:
1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into
their canonical types.
2) The now-dead patterns are removed from the SSE/MMX .td files.
3) All the patterns in the .td file that referred to immAllOnesV or
immAllZerosV in the wrong form now use *_bc to match them with a
bitcast wrapped around them.
4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle
bitcast'd zero vectors, which simplifies the code actually.
5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that
is legal, instead of generating one that is illegal and expecting
a later legalize pass to clean it up.
6) isZeroShuffle is generalized to handle bitcast of zeros.
7) several other minor tweaks.
This patch is definite goodness, but has the potential to cause random
code quality regressions. Please be on the lookout for these and let
me know if they happen.
llvm-svn: 44310
2007-11-25 08:24:49 +08:00
|
|
|
// Check to see if the top elements are all zeros (or bitcast of zeros).
|
2008-05-08 08:57:18 +08:00
|
|
|
N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
|
2008-08-29 05:40:38 +08:00
|
|
|
N.getOperand(0).getNode()->hasOneUse() &&
|
|
|
|
ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) &&
|
2008-05-08 08:57:18 +08:00
|
|
|
N.getOperand(0).getOperand(0).hasOneUse()) {
|
|
|
|
// Okay, this is a zero extending load. Fold it.
|
|
|
|
LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0));
|
|
|
|
if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp))
|
|
|
|
return false;
|
|
|
|
OutChain = LD->getChain();
|
2008-07-28 05:46:04 +08:00
|
|
|
InChain = SDValue(LD, 1);
|
2008-05-08 08:57:18 +08:00
|
|
|
return true;
|
Fold "zero extending vector loads" now that evan added the chain manip stuff.
This compiles both tests in X86/vec_ss_load_fold.ll into:
_test1:
movss 4(%esp), %xmm0
subss LCPI1_0, %xmm0
mulss LCPI1_1, %xmm0
minss LCPI1_2, %xmm0
xorps %xmm1, %xmm1
maxss %xmm1, %xmm0
cvttss2si %xmm0, %eax
andl $65535, %eax
ret
instead of:
_test1:
movss LCPI1_0, %xmm0
movss 4(%esp), %xmm1
subss %xmm0, %xmm1
movss LCPI1_1, %xmm0
mulss %xmm0, %xmm1
movss LCPI1_2, %xmm0
minss %xmm0, %xmm1
xorps %xmm0, %xmm0
maxss %xmm0, %xmm1
cvttss2si %xmm1, %eax
andl $65535, %eax
ret
llvm-svn: 30894
2006-10-12 06:09:58 +08:00
|
|
|
}
|
2006-10-08 05:55:32 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-02-25 18:09:08 +08:00
|
|
|
/// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing
|
|
|
|
/// mode it matches can be cost effectively emitted as an LEA instruction.
|
2008-07-28 05:46:04 +08:00
|
|
|
bool X86DAGToDAGISel::SelectLEAAddr(SDValue Op, SDValue N,
|
|
|
|
SDValue &Base, SDValue &Scale,
|
|
|
|
SDValue &Index, SDValue &Disp) {
|
2006-02-25 18:09:08 +08:00
|
|
|
X86ISelAddressMode AM;
|
|
|
|
if (MatchAddress(N, AM))
|
|
|
|
return false;
|
|
|
|
|
2008-06-06 20:08:01 +08:00
|
|
|
MVT VT = N.getValueType();
|
2006-02-25 18:09:08 +08:00
|
|
|
unsigned Complexity = 0;
|
|
|
|
if (AM.BaseType == X86ISelAddressMode::RegBase)
|
2008-08-29 05:40:38 +08:00
|
|
|
if (AM.Base.Reg.getNode())
|
2006-02-25 18:09:08 +08:00
|
|
|
Complexity = 1;
|
|
|
|
else
|
2006-09-08 14:48:29 +08:00
|
|
|
AM.Base.Reg = CurDAG->getRegister(0, VT);
|
2006-02-25 18:09:08 +08:00
|
|
|
else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
|
|
|
|
Complexity = 4;
|
|
|
|
|
2008-08-29 05:40:38 +08:00
|
|
|
if (AM.IndexReg.getNode())
|
2006-02-25 18:09:08 +08:00
|
|
|
Complexity++;
|
|
|
|
else
|
2006-09-08 14:48:29 +08:00
|
|
|
AM.IndexReg = CurDAG->getRegister(0, VT);
|
2006-02-25 18:09:08 +08:00
|
|
|
|
Two changes:
1) codegen a shift of a register as a shift, not an LEA.
2) teach the RA to convert a shift to an LEA instruction if it wants something
in three-address form.
This gives us asm diffs like:
- leal (,%eax,4), %eax
+ shll $2, %eax
which is faster on some processors and smaller on all of them.
and, more interestingly:
- movl 24(%esi), %eax
- leal (,%eax,4), %edi
+ movl 24(%esi), %edi
+ shll $2, %edi
Without #2, #1 was a significant pessimization in some cases.
This implements CodeGen/X86/shift-codegen.ll
llvm-svn: 35204
2007-03-20 14:08:29 +08:00
|
|
|
// Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
|
|
|
|
// a simple shift.
|
|
|
|
if (AM.Scale > 1)
|
2006-03-01 05:13:57 +08:00
|
|
|
Complexity++;
|
2006-02-25 18:09:08 +08:00
|
|
|
|
|
|
|
// FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
|
|
|
|
// to a LEA. This is determined with some expermentation but is by no means
|
|
|
|
// optimal (especially for code size consideration). LEA is nice because of
|
|
|
|
// its three-address nature. Tweak the cost function again when we can run
|
|
|
|
// convertToThreeAddress() at register allocation time.
|
2006-09-08 14:48:29 +08:00
|
|
|
if (AM.GV || AM.CP || AM.ES || AM.JT != -1) {
|
|
|
|
// For X86-64, we should always use lea to materialize RIP relative
|
|
|
|
// addresses.
|
2006-12-06 06:03:40 +08:00
|
|
|
if (Subtarget->is64Bit())
|
2006-09-08 14:48:29 +08:00
|
|
|
Complexity = 4;
|
|
|
|
else
|
|
|
|
Complexity += 2;
|
|
|
|
}
|
2006-02-25 18:09:08 +08:00
|
|
|
|
2008-08-29 05:40:38 +08:00
|
|
|
if (AM.Disp && (AM.Base.Reg.getNode() || AM.IndexReg.getNode()))
|
2006-02-25 18:09:08 +08:00
|
|
|
Complexity++;
|
|
|
|
|
|
|
|
if (Complexity > 2) {
|
|
|
|
getAddressOperands(AM, Base, Scale, Index, Disp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2008-07-28 05:46:04 +08:00
|
|
|
bool X86DAGToDAGISel::TryFoldLoad(SDValue P, SDValue N,
|
|
|
|
SDValue &Base, SDValue &Scale,
|
|
|
|
SDValue &Index, SDValue &Disp) {
|
2008-08-29 05:40:38 +08:00
|
|
|
if (ISD::isNON_EXTLoad(N.getNode()) &&
|
2006-02-06 14:02:33 +08:00
|
|
|
N.hasOneUse() &&
|
2008-11-27 08:49:46 +08:00
|
|
|
IsLegalAndProfitableToFold(N.getNode(), P.getNode(), P.getNode()))
|
2006-11-09 04:34:28 +08:00
|
|
|
return SelectAddr(P, N.getOperand(1), Base, Scale, Index, Disp);
|
2006-01-07 04:36:21 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2008-09-24 02:22:58 +08:00
|
|
|
/// getGlobalBaseReg - Return an SDNode that returns the value of
|
|
|
|
/// the global base register. Output instructions required to
|
|
|
|
/// initialize the global base register, if necessary.
|
2006-02-18 08:15:05 +08:00
|
|
|
///
|
2006-08-26 13:34:46 +08:00
|
|
|
SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
|
2008-09-30 08:58:23 +08:00
|
|
|
MachineFunction *MF = CurBB->getParent();
|
|
|
|
unsigned GlobalBaseReg = TM.getInstrInfo()->getGlobalBaseReg(MF);
|
2008-08-29 05:40:38 +08:00
|
|
|
return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode();
|
2006-02-18 08:15:05 +08:00
|
|
|
}
|
|
|
|
|
2006-05-20 09:36:52 +08:00
|
|
|
static SDNode *FindCallStartFromCall(SDNode *Node) {
|
|
|
|
if (Node->getOpcode() == ISD::CALLSEQ_START) return Node;
|
|
|
|
assert(Node->getOperand(0).getValueType() == MVT::Other &&
|
|
|
|
"Node doesn't have a token chain argument!");
|
2008-08-29 05:40:38 +08:00
|
|
|
return FindCallStartFromCall(Node->getOperand(0).getNode());
|
2006-05-20 09:36:52 +08:00
|
|
|
}
|
|
|
|
|
2008-08-21 05:27:32 +08:00
|
|
|
/// getTruncateTo8Bit - return an SDNode that implements a subreg based
|
|
|
|
/// truncate of the specified operand to i8. This can be done with tablegen,
|
|
|
|
/// except that this code uses MVT::Flag in a tricky way that happens to
|
|
|
|
/// improve scheduling in some cases.
|
|
|
|
SDNode *X86DAGToDAGISel::getTruncateTo8Bit(SDValue N0) {
|
|
|
|
assert(!Subtarget->is64Bit() &&
|
|
|
|
"getTruncateTo8Bit is only needed on x86-32!");
|
|
|
|
SDValue SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
|
|
|
|
|
|
|
|
// Ensure that the source register has an 8-bit subreg on 32-bit targets
|
|
|
|
unsigned Opc;
|
|
|
|
MVT N0VT = N0.getValueType();
|
|
|
|
switch (N0VT.getSimpleVT()) {
|
|
|
|
default: assert(0 && "Unknown truncate!");
|
|
|
|
case MVT::i16:
|
|
|
|
Opc = X86::MOV16to16_;
|
|
|
|
break;
|
|
|
|
case MVT::i32:
|
|
|
|
Opc = X86::MOV32to32_;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The use of MVT::Flag here is not strictly accurate, but it helps
|
|
|
|
// scheduling in some cases.
|
|
|
|
N0 = SDValue(CurDAG->getTargetNode(Opc, N0VT, MVT::Flag, N0), 0);
|
|
|
|
return CurDAG->getTargetNode(X86::EXTRACT_SUBREG,
|
|
|
|
MVT::i8, N0, SRIdx, N0.getValue(1));
|
2007-08-11 05:48:46 +08:00
|
|
|
}
|
|
|
|
|
2008-10-03 02:53:47 +08:00
|
|
|
SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
|
|
|
|
SDValue Chain = Node->getOperand(0);
|
|
|
|
SDValue In1 = Node->getOperand(1);
|
|
|
|
SDValue In2L = Node->getOperand(2);
|
|
|
|
SDValue In2H = Node->getOperand(3);
|
|
|
|
SDValue Tmp0, Tmp1, Tmp2, Tmp3;
|
|
|
|
if (!SelectAddr(In1, In1, Tmp0, Tmp1, Tmp2, Tmp3))
|
|
|
|
return NULL;
|
2008-10-04 03:41:08 +08:00
|
|
|
SDValue LSI = Node->getOperand(4); // MemOperand
|
2008-10-03 02:53:47 +08:00
|
|
|
const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, In2L, In2H, LSI, Chain };
|
|
|
|
return CurDAG->getTargetNode(Opc, MVT::i32, MVT::i32, MVT::Other, Ops, 8);
|
|
|
|
}
|
2007-08-11 05:48:46 +08:00
|
|
|
|
2008-07-28 05:46:04 +08:00
|
|
|
SDNode *X86DAGToDAGISel::Select(SDValue N) {
|
2008-08-29 05:40:38 +08:00
|
|
|
SDNode *Node = N.getNode();
|
2008-06-06 20:08:01 +08:00
|
|
|
MVT NVT = Node->getValueType(0);
|
2006-01-07 04:36:21 +08:00
|
|
|
unsigned Opc, MOpc;
|
|
|
|
unsigned Opcode = Node->getOpcode();
|
2005-11-16 09:54:32 +08:00
|
|
|
|
2006-02-11 06:24:32 +08:00
|
|
|
#ifndef NDEBUG
|
2006-11-17 15:52:03 +08:00
|
|
|
DOUT << std::string(Indent, ' ') << "Selecting: ";
|
2006-02-11 06:24:32 +08:00
|
|
|
DEBUG(Node->dump(CurDAG));
|
2006-11-17 15:52:03 +08:00
|
|
|
DOUT << "\n";
|
2006-02-11 06:46:26 +08:00
|
|
|
Indent += 2;
|
2006-02-11 06:24:32 +08:00
|
|
|
#endif
|
|
|
|
|
2008-07-18 03:10:17 +08:00
|
|
|
if (Node->isMachineOpcode()) {
|
2006-02-11 06:24:32 +08:00
|
|
|
#ifndef NDEBUG
|
2006-11-17 15:52:03 +08:00
|
|
|
DOUT << std::string(Indent-2, ' ') << "== ";
|
2006-02-11 06:24:32 +08:00
|
|
|
DEBUG(Node->dump(CurDAG));
|
2006-11-17 15:52:03 +08:00
|
|
|
DOUT << "\n";
|
2006-02-11 06:46:26 +08:00
|
|
|
Indent -= 2;
|
2006-02-11 06:24:32 +08:00
|
|
|
#endif
|
2006-08-11 17:08:15 +08:00
|
|
|
return NULL; // Already selected.
|
2006-02-09 08:37:58 +08:00
|
|
|
}
|
2006-01-12 06:15:18 +08:00
|
|
|
|
2006-01-07 04:36:21 +08:00
|
|
|
switch (Opcode) {
|
2005-11-16 09:54:32 +08:00
|
|
|
default: break;
|
2006-02-24 04:41:18 +08:00
|
|
|
case X86ISD::GlobalBaseReg:
|
2006-08-26 13:34:46 +08:00
|
|
|
return getGlobalBaseReg();
|
2006-02-24 04:41:18 +08:00
|
|
|
|
2008-10-03 02:53:47 +08:00
|
|
|
case X86ISD::ATOMOR64_DAG:
|
|
|
|
return SelectAtomic64(Node, X86::ATOMOR6432);
|
|
|
|
case X86ISD::ATOMXOR64_DAG:
|
|
|
|
return SelectAtomic64(Node, X86::ATOMXOR6432);
|
|
|
|
case X86ISD::ATOMADD64_DAG:
|
|
|
|
return SelectAtomic64(Node, X86::ATOMADD6432);
|
|
|
|
case X86ISD::ATOMSUB64_DAG:
|
|
|
|
return SelectAtomic64(Node, X86::ATOMSUB6432);
|
|
|
|
case X86ISD::ATOMNAND64_DAG:
|
|
|
|
return SelectAtomic64(Node, X86::ATOMNAND6432);
|
|
|
|
case X86ISD::ATOMAND64_DAG:
|
|
|
|
return SelectAtomic64(Node, X86::ATOMAND6432);
|
2008-10-04 06:25:52 +08:00
|
|
|
case X86ISD::ATOMSWAP64_DAG:
|
|
|
|
return SelectAtomic64(Node, X86::ATOMSWAP6432);
|
2008-10-03 02:53:47 +08:00
|
|
|
|
2007-10-09 02:33:35 +08:00
|
|
|
case ISD::SMUL_LOHI:
|
|
|
|
case ISD::UMUL_LOHI: {
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue N0 = Node->getOperand(0);
|
|
|
|
SDValue N1 = Node->getOperand(1);
|
2007-10-09 02:33:35 +08:00
|
|
|
|
|
|
|
bool isSigned = Opcode == ISD::SMUL_LOHI;
|
|
|
|
if (!isSigned)
|
2008-06-06 20:08:01 +08:00
|
|
|
switch (NVT.getSimpleVT()) {
|
2006-01-07 04:36:21 +08:00
|
|
|
default: assert(0 && "Unsupported VT!");
|
|
|
|
case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break;
|
|
|
|
case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
|
|
|
|
case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break;
|
2006-09-08 14:48:29 +08:00
|
|
|
case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break;
|
2006-01-07 04:36:21 +08:00
|
|
|
}
|
|
|
|
else
|
2008-06-06 20:08:01 +08:00
|
|
|
switch (NVT.getSimpleVT()) {
|
2006-01-07 04:36:21 +08:00
|
|
|
default: assert(0 && "Unsupported VT!");
|
|
|
|
case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break;
|
|
|
|
case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
|
|
|
|
case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
|
2006-09-08 14:48:29 +08:00
|
|
|
case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
|
2006-01-07 04:36:21 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned LoReg, HiReg;
|
2008-06-06 20:08:01 +08:00
|
|
|
switch (NVT.getSimpleVT()) {
|
2006-01-07 04:36:21 +08:00
|
|
|
default: assert(0 && "Unsupported VT!");
|
|
|
|
case MVT::i8: LoReg = X86::AL; HiReg = X86::AH; break;
|
|
|
|
case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; break;
|
|
|
|
case MVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break;
|
2006-09-08 14:48:29 +08:00
|
|
|
case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break;
|
2006-01-07 04:36:21 +08:00
|
|
|
}
|
|
|
|
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Tmp0, Tmp1, Tmp2, Tmp3;
|
2007-08-02 13:48:35 +08:00
|
|
|
bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3);
|
2007-10-09 02:33:35 +08:00
|
|
|
// multiplty is commmutative
|
2006-01-07 07:19:29 +08:00
|
|
|
if (!foldedLoad) {
|
2006-02-06 14:02:33 +08:00
|
|
|
foldedLoad = TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3);
|
2007-08-02 13:48:35 +08:00
|
|
|
if (foldedLoad)
|
|
|
|
std::swap(N0, N1);
|
2006-01-07 07:19:29 +08:00
|
|
|
}
|
|
|
|
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), LoReg,
|
|
|
|
N0, SDValue()).getValue(1);
|
2006-01-07 04:36:21 +08:00
|
|
|
|
|
|
|
if (foldedLoad) {
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N1.getOperand(0), InFlag };
|
2006-02-09 15:17:49 +08:00
|
|
|
SDNode *CNode =
|
2006-08-27 16:14:06 +08:00
|
|
|
CurDAG->getTargetNode(MOpc, MVT::Other, MVT::Flag, Ops, 6);
|
2008-07-28 05:46:04 +08:00
|
|
|
InFlag = SDValue(CNode, 1);
|
2007-10-09 02:33:35 +08:00
|
|
|
// Update the chain.
|
2008-07-28 05:46:04 +08:00
|
|
|
ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
|
2006-01-07 04:36:21 +08:00
|
|
|
} else {
|
2006-02-09 15:17:49 +08:00
|
|
|
InFlag =
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue(CurDAG->getTargetNode(Opc, MVT::Flag, N1, InFlag), 0);
|
2006-01-07 04:36:21 +08:00
|
|
|
}
|
|
|
|
|
2007-10-09 02:33:35 +08:00
|
|
|
// Copy the low half of the result, if it is needed.
|
|
|
|
if (!N.getValue(0).use_empty()) {
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
|
2007-10-09 02:33:35 +08:00
|
|
|
LoReg, NVT, InFlag);
|
|
|
|
InFlag = Result.getValue(2);
|
|
|
|
ReplaceUses(N.getValue(0), Result);
|
|
|
|
#ifndef NDEBUG
|
|
|
|
DOUT << std::string(Indent-2, ' ') << "=> ";
|
2008-08-29 05:40:38 +08:00
|
|
|
DEBUG(Result.getNode()->dump(CurDAG));
|
2007-10-09 02:33:35 +08:00
|
|
|
DOUT << "\n";
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
// Copy the high half of the result, if it is needed.
|
|
|
|
if (!N.getValue(1).use_empty()) {
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Result;
|
2007-10-09 02:33:35 +08:00
|
|
|
if (HiReg == X86::AH && Subtarget->is64Bit()) {
|
|
|
|
// Prevent use of AH in a REX instruction by referencing AX instead.
|
|
|
|
// Shift it down 8 bits.
|
|
|
|
Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
|
|
|
|
X86::AX, MVT::i16, InFlag);
|
|
|
|
InFlag = Result.getValue(2);
|
2008-07-28 05:46:04 +08:00
|
|
|
Result = SDValue(CurDAG->getTargetNode(X86::SHR16ri, MVT::i16, Result,
|
2008-08-31 23:37:04 +08:00
|
|
|
CurDAG->getTargetConstant(8, MVT::i8)), 0);
|
2007-10-09 02:33:35 +08:00
|
|
|
// Then truncate it down to i8.
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
|
|
|
|
Result = SDValue(CurDAG->getTargetNode(X86::EXTRACT_SUBREG,
|
2007-10-09 02:33:35 +08:00
|
|
|
MVT::i8, Result, SRIdx), 0);
|
|
|
|
} else {
|
|
|
|
Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
|
|
|
|
HiReg, NVT, InFlag);
|
|
|
|
InFlag = Result.getValue(2);
|
|
|
|
}
|
|
|
|
ReplaceUses(N.getValue(1), Result);
|
|
|
|
#ifndef NDEBUG
|
|
|
|
DOUT << std::string(Indent-2, ' ') << "=> ";
|
2008-08-29 05:40:38 +08:00
|
|
|
DEBUG(Result.getNode()->dump(CurDAG));
|
2007-10-09 02:33:35 +08:00
|
|
|
DOUT << "\n";
|
|
|
|
#endif
|
2007-08-10 05:59:35 +08:00
|
|
|
}
|
2006-02-09 08:37:58 +08:00
|
|
|
|
2006-02-11 06:24:32 +08:00
|
|
|
#ifndef NDEBUG
|
2006-02-11 06:46:26 +08:00
|
|
|
Indent -= 2;
|
2006-02-11 06:24:32 +08:00
|
|
|
#endif
|
2007-10-09 02:33:35 +08:00
|
|
|
|
2006-08-11 17:08:15 +08:00
|
|
|
return NULL;
|
2006-01-07 07:19:29 +08:00
|
|
|
}
|
2006-02-18 08:15:05 +08:00
|
|
|
|
2007-10-09 02:33:35 +08:00
|
|
|
case ISD::SDIVREM:
|
|
|
|
case ISD::UDIVREM: {
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue N0 = Node->getOperand(0);
|
|
|
|
SDValue N1 = Node->getOperand(1);
|
2007-10-09 02:33:35 +08:00
|
|
|
|
|
|
|
bool isSigned = Opcode == ISD::SDIVREM;
|
2006-01-07 07:19:29 +08:00
|
|
|
if (!isSigned)
|
2008-06-06 20:08:01 +08:00
|
|
|
switch (NVT.getSimpleVT()) {
|
2006-01-07 07:19:29 +08:00
|
|
|
default: assert(0 && "Unsupported VT!");
|
|
|
|
case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
|
|
|
|
case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
|
|
|
|
case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
|
2006-09-08 14:48:29 +08:00
|
|
|
case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
|
2006-01-07 07:19:29 +08:00
|
|
|
}
|
|
|
|
else
|
2008-06-06 20:08:01 +08:00
|
|
|
switch (NVT.getSimpleVT()) {
|
2006-01-07 07:19:29 +08:00
|
|
|
default: assert(0 && "Unsupported VT!");
|
|
|
|
case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
|
|
|
|
case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
|
|
|
|
case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
|
2006-09-08 14:48:29 +08:00
|
|
|
case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
|
2006-01-07 07:19:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned LoReg, HiReg;
|
|
|
|
unsigned ClrOpcode, SExtOpcode;
|
2008-06-06 20:08:01 +08:00
|
|
|
switch (NVT.getSimpleVT()) {
|
2006-01-07 07:19:29 +08:00
|
|
|
default: assert(0 && "Unsupported VT!");
|
|
|
|
case MVT::i8:
|
|
|
|
LoReg = X86::AL; HiReg = X86::AH;
|
2006-11-18 06:10:14 +08:00
|
|
|
ClrOpcode = 0;
|
2006-01-07 07:19:29 +08:00
|
|
|
SExtOpcode = X86::CBW;
|
|
|
|
break;
|
|
|
|
case MVT::i16:
|
|
|
|
LoReg = X86::AX; HiReg = X86::DX;
|
2006-06-03 05:20:34 +08:00
|
|
|
ClrOpcode = X86::MOV16r0;
|
2006-01-07 07:19:29 +08:00
|
|
|
SExtOpcode = X86::CWD;
|
|
|
|
break;
|
|
|
|
case MVT::i32:
|
|
|
|
LoReg = X86::EAX; HiReg = X86::EDX;
|
2006-06-03 05:20:34 +08:00
|
|
|
ClrOpcode = X86::MOV32r0;
|
2006-01-07 07:19:29 +08:00
|
|
|
SExtOpcode = X86::CDQ;
|
|
|
|
break;
|
2006-09-08 14:48:29 +08:00
|
|
|
case MVT::i64:
|
|
|
|
LoReg = X86::RAX; HiReg = X86::RDX;
|
|
|
|
ClrOpcode = X86::MOV64r0;
|
|
|
|
SExtOpcode = X86::CQO;
|
|
|
|
break;
|
2006-01-07 07:19:29 +08:00
|
|
|
}
|
|
|
|
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Tmp0, Tmp1, Tmp2, Tmp3;
|
2007-10-09 02:33:35 +08:00
|
|
|
bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3);
|
|
|
|
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue InFlag;
|
2006-11-18 06:10:14 +08:00
|
|
|
if (NVT == MVT::i8 && !isSigned) {
|
|
|
|
// Special case for div8, just use a move with zero extension to AX to
|
|
|
|
// clear the upper 8 bits (AH).
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Tmp0, Tmp1, Tmp2, Tmp3, Move, Chain;
|
2006-11-18 06:10:14 +08:00
|
|
|
if (TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3)) {
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N0.getOperand(0) };
|
2006-11-18 06:10:14 +08:00
|
|
|
Move =
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue(CurDAG->getTargetNode(X86::MOVZX16rm8, MVT::i16, MVT::Other,
|
2006-11-18 06:10:14 +08:00
|
|
|
Ops, 5), 0);
|
|
|
|
Chain = Move.getValue(1);
|
|
|
|
ReplaceUses(N0.getValue(1), Chain);
|
|
|
|
} else {
|
|
|
|
Move =
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue(CurDAG->getTargetNode(X86::MOVZX16rr8, MVT::i16, N0), 0);
|
2006-11-18 06:10:14 +08:00
|
|
|
Chain = CurDAG->getEntryNode();
|
|
|
|
}
|
2008-07-28 05:46:04 +08:00
|
|
|
Chain = CurDAG->getCopyToReg(Chain, X86::AX, Move, SDValue());
|
2006-01-07 07:19:29 +08:00
|
|
|
InFlag = Chain.getValue(1);
|
2006-11-18 06:10:14 +08:00
|
|
|
} else {
|
|
|
|
InFlag =
|
2007-10-09 02:33:35 +08:00
|
|
|
CurDAG->getCopyToReg(CurDAG->getEntryNode(),
|
2008-07-28 05:46:04 +08:00
|
|
|
LoReg, N0, SDValue()).getValue(1);
|
2006-11-18 06:10:14 +08:00
|
|
|
if (isSigned) {
|
|
|
|
// Sign extend the low part into the high part.
|
|
|
|
InFlag =
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue(CurDAG->getTargetNode(SExtOpcode, MVT::Flag, InFlag), 0);
|
2006-11-18 06:10:14 +08:00
|
|
|
} else {
|
|
|
|
// Zero out the high part, effectively zero extending the input.
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue ClrNode = SDValue(CurDAG->getTargetNode(ClrOpcode, NVT), 0);
|
2007-10-09 02:33:35 +08:00
|
|
|
InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), HiReg,
|
|
|
|
ClrNode, InFlag).getValue(1);
|
2006-11-18 06:10:14 +08:00
|
|
|
}
|
2006-01-07 07:19:29 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (foldedLoad) {
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N1.getOperand(0), InFlag };
|
2006-02-09 15:17:49 +08:00
|
|
|
SDNode *CNode =
|
2006-08-27 16:14:06 +08:00
|
|
|
CurDAG->getTargetNode(MOpc, MVT::Other, MVT::Flag, Ops, 6);
|
2008-07-28 05:46:04 +08:00
|
|
|
InFlag = SDValue(CNode, 1);
|
2007-10-09 02:33:35 +08:00
|
|
|
// Update the chain.
|
2008-07-28 05:46:04 +08:00
|
|
|
ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
|
2006-01-07 07:19:29 +08:00
|
|
|
} else {
|
2006-02-09 15:17:49 +08:00
|
|
|
InFlag =
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue(CurDAG->getTargetNode(Opc, MVT::Flag, N1, InFlag), 0);
|
2006-01-07 07:19:29 +08:00
|
|
|
}
|
|
|
|
|
2007-09-26 02:23:27 +08:00
|
|
|
// Copy the division (low) result, if it is needed.
|
|
|
|
if (!N.getValue(0).use_empty()) {
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
|
2007-10-09 02:33:35 +08:00
|
|
|
LoReg, NVT, InFlag);
|
2007-09-26 02:23:27 +08:00
|
|
|
InFlag = Result.getValue(2);
|
|
|
|
ReplaceUses(N.getValue(0), Result);
|
|
|
|
#ifndef NDEBUG
|
|
|
|
DOUT << std::string(Indent-2, ' ') << "=> ";
|
2008-08-29 05:40:38 +08:00
|
|
|
DEBUG(Result.getNode()->dump(CurDAG));
|
2007-09-26 02:23:27 +08:00
|
|
|
DOUT << "\n";
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
// Copy the remainder (high) result, if it is needed.
|
|
|
|
if (!N.getValue(1).use_empty()) {
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Result;
|
2007-09-26 02:23:27 +08:00
|
|
|
if (HiReg == X86::AH && Subtarget->is64Bit()) {
|
|
|
|
// Prevent use of AH in a REX instruction by referencing AX instead.
|
|
|
|
// Shift it down 8 bits.
|
2007-10-09 02:33:35 +08:00
|
|
|
Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
|
|
|
|
X86::AX, MVT::i16, InFlag);
|
2007-09-26 02:23:27 +08:00
|
|
|
InFlag = Result.getValue(2);
|
2008-07-28 05:46:04 +08:00
|
|
|
Result = SDValue(CurDAG->getTargetNode(X86::SHR16ri, MVT::i16, Result,
|
2008-08-31 23:37:04 +08:00
|
|
|
CurDAG->getTargetConstant(8, MVT::i8)), 0);
|
2007-09-26 02:23:27 +08:00
|
|
|
// Then truncate it down to i8.
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
|
|
|
|
Result = SDValue(CurDAG->getTargetNode(X86::EXTRACT_SUBREG,
|
2007-09-26 02:23:27 +08:00
|
|
|
MVT::i8, Result, SRIdx), 0);
|
|
|
|
} else {
|
2007-10-09 02:33:35 +08:00
|
|
|
Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
|
|
|
|
HiReg, NVT, InFlag);
|
2007-09-26 02:23:27 +08:00
|
|
|
InFlag = Result.getValue(2);
|
|
|
|
}
|
|
|
|
ReplaceUses(N.getValue(1), Result);
|
|
|
|
#ifndef NDEBUG
|
|
|
|
DOUT << std::string(Indent-2, ' ') << "=> ";
|
2008-08-29 05:40:38 +08:00
|
|
|
DEBUG(Result.getNode()->dump(CurDAG));
|
2007-09-26 02:23:27 +08:00
|
|
|
DOUT << "\n";
|
|
|
|
#endif
|
2007-08-10 05:59:35 +08:00
|
|
|
}
|
2006-02-11 06:24:32 +08:00
|
|
|
|
|
|
|
#ifndef NDEBUG
|
2006-02-11 06:46:26 +08:00
|
|
|
Indent -= 2;
|
2006-02-11 06:24:32 +08:00
|
|
|
#endif
|
2006-08-11 17:08:15 +08:00
|
|
|
|
|
|
|
return NULL;
|
2005-12-17 10:02:50 +08:00
|
|
|
}
|
2007-08-11 06:22:41 +08:00
|
|
|
|
2007-08-11 05:48:46 +08:00
|
|
|
case ISD::SIGN_EXTEND_INREG: {
|
2008-06-06 20:08:01 +08:00
|
|
|
MVT SVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
|
2008-08-21 05:27:32 +08:00
|
|
|
if (SVT == MVT::i8 && !Subtarget->is64Bit()) {
|
|
|
|
SDValue N0 = Node->getOperand(0);
|
|
|
|
|
|
|
|
SDValue TruncOp = SDValue(getTruncateTo8Bit(N0), 0);
|
|
|
|
unsigned Opc = 0;
|
|
|
|
switch (NVT.getSimpleVT()) {
|
2008-06-06 20:08:01 +08:00
|
|
|
default: assert(0 && "Unknown sign_extend_inreg!");
|
2008-08-21 05:27:32 +08:00
|
|
|
case MVT::i16:
|
|
|
|
Opc = X86::MOVSX16rr8;
|
|
|
|
break;
|
|
|
|
case MVT::i32:
|
|
|
|
Opc = X86::MOVSX32rr8;
|
|
|
|
break;
|
2007-08-11 05:48:46 +08:00
|
|
|
}
|
|
|
|
|
2008-08-21 05:27:32 +08:00
|
|
|
SDNode *ResNode = CurDAG->getTargetNode(Opc, NVT, TruncOp);
|
2007-08-11 05:48:46 +08:00
|
|
|
|
|
|
|
#ifndef NDEBUG
|
2008-08-21 05:27:32 +08:00
|
|
|
DOUT << std::string(Indent-2, ' ') << "=> ";
|
2008-08-29 05:40:38 +08:00
|
|
|
DEBUG(TruncOp.getNode()->dump(CurDAG));
|
2008-08-21 05:27:32 +08:00
|
|
|
DOUT << "\n";
|
|
|
|
DOUT << std::string(Indent-2, ' ') << "=> ";
|
|
|
|
DEBUG(ResNode->dump(CurDAG));
|
|
|
|
DOUT << "\n";
|
|
|
|
Indent -= 2;
|
2007-08-11 05:48:46 +08:00
|
|
|
#endif
|
2008-08-21 05:27:32 +08:00
|
|
|
return ResNode;
|
|
|
|
}
|
2007-08-11 05:48:46 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case ISD::TRUNCATE: {
|
2008-08-21 05:27:32 +08:00
|
|
|
if (NVT == MVT::i8 && !Subtarget->is64Bit()) {
|
|
|
|
SDValue Input = Node->getOperand(0);
|
|
|
|
SDNode *ResNode = getTruncateTo8Bit(Input);
|
2007-08-11 05:48:46 +08:00
|
|
|
|
2006-05-08 16:01:26 +08:00
|
|
|
#ifndef NDEBUG
|
2006-11-17 15:52:03 +08:00
|
|
|
DOUT << std::string(Indent-2, ' ') << "=> ";
|
2006-08-26 13:34:46 +08:00
|
|
|
DEBUG(ResNode->dump(CurDAG));
|
2006-11-17 15:52:03 +08:00
|
|
|
DOUT << "\n";
|
2006-05-08 16:01:26 +08:00
|
|
|
Indent -= 2;
|
|
|
|
#endif
|
2008-08-21 05:27:32 +08:00
|
|
|
return ResNode;
|
|
|
|
}
|
2006-05-20 15:44:28 +08:00
|
|
|
break;
|
2006-05-08 16:01:26 +08:00
|
|
|
}
|
2008-06-17 10:01:22 +08:00
|
|
|
|
|
|
|
case ISD::DECLARE: {
|
|
|
|
// Handle DECLARE nodes here because the second operand may have been
|
|
|
|
// wrapped in X86ISD::Wrapper.
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Chain = Node->getOperand(0);
|
|
|
|
SDValue N1 = Node->getOperand(1);
|
|
|
|
SDValue N2 = Node->getOperand(2);
|
2008-12-11 05:49:05 +08:00
|
|
|
FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(N1);
|
|
|
|
if (!FINode)
|
2008-06-18 10:48:27 +08:00
|
|
|
break;
|
|
|
|
if (N2.getOpcode() == ISD::ADD &&
|
|
|
|
N2.getOperand(0).getOpcode() == X86ISD::GlobalBaseReg)
|
|
|
|
N2 = N2.getOperand(1);
|
2008-12-11 05:49:05 +08:00
|
|
|
if (N2.getOpcode() != X86ISD::Wrapper)
|
|
|
|
break;
|
2009-01-10 11:33:22 +08:00
|
|
|
GlobalAddressSDNode *GVNode =
|
|
|
|
dyn_cast<GlobalAddressSDNode>(N2.getOperand(0));
|
2008-12-11 05:49:05 +08:00
|
|
|
if (!GVNode)
|
|
|
|
break;
|
|
|
|
SDValue Tmp1 = CurDAG->getTargetFrameIndex(FINode->getIndex(),
|
|
|
|
TLI.getPointerTy());
|
|
|
|
SDValue Tmp2 = CurDAG->getTargetGlobalAddress(GVNode->getGlobal(),
|
|
|
|
TLI.getPointerTy());
|
|
|
|
SDValue Ops[] = { Tmp1, Tmp2, Chain };
|
|
|
|
return CurDAG->getTargetNode(TargetInstrInfo::DECLARE,
|
|
|
|
MVT::Other, Ops, 3);
|
2008-06-17 10:01:22 +08:00
|
|
|
break;
|
|
|
|
}
|
2005-11-16 09:54:32 +08:00
|
|
|
}
|
|
|
|
|
2006-08-26 13:34:46 +08:00
|
|
|
SDNode *ResNode = SelectCode(N);
|
2006-08-11 17:08:15 +08:00
|
|
|
|
2006-02-11 06:24:32 +08:00
|
|
|
#ifndef NDEBUG
|
2006-11-17 15:52:03 +08:00
|
|
|
DOUT << std::string(Indent-2, ' ') << "=> ";
|
2008-08-29 05:40:38 +08:00
|
|
|
if (ResNode == NULL || ResNode == N.getNode())
|
|
|
|
DEBUG(N.getNode()->dump(CurDAG));
|
2006-08-26 13:34:46 +08:00
|
|
|
else
|
|
|
|
DEBUG(ResNode->dump(CurDAG));
|
2006-11-17 15:52:03 +08:00
|
|
|
DOUT << "\n";
|
2006-02-11 06:46:26 +08:00
|
|
|
Indent -= 2;
|
2006-02-11 06:24:32 +08:00
|
|
|
#endif
|
2006-08-11 17:08:15 +08:00
|
|
|
|
|
|
|
return ResNode;
|
2005-11-16 09:54:32 +08:00
|
|
|
}
|
|
|
|
|
2006-06-09 02:03:49 +08:00
|
|
|
bool X86DAGToDAGISel::
|
2008-07-28 05:46:04 +08:00
|
|
|
SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
|
2008-08-23 10:25:05 +08:00
|
|
|
std::vector<SDValue> &OutOps) {
|
2008-07-28 05:46:04 +08:00
|
|
|
SDValue Op0, Op1, Op2, Op3;
|
2006-06-09 02:03:49 +08:00
|
|
|
switch (ConstraintCode) {
|
|
|
|
case 'o': // offsetable ??
|
|
|
|
case 'v': // not offsetable ??
|
|
|
|
default: return true;
|
|
|
|
case 'm': // memory
|
2006-11-09 04:34:28 +08:00
|
|
|
if (!SelectAddr(Op, Op, Op0, Op1, Op2, Op3))
|
2006-06-09 02:03:49 +08:00
|
|
|
return true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2006-08-26 09:05:16 +08:00
|
|
|
OutOps.push_back(Op0);
|
|
|
|
OutOps.push_back(Op1);
|
|
|
|
OutOps.push_back(Op2);
|
|
|
|
OutOps.push_back(Op3);
|
2006-06-09 02:03:49 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2005-11-16 09:54:32 +08:00
|
|
|
/// createX86ISelDag - This pass converts a legalized DAG into a
|
|
|
|
/// X86-specific DAG, ready for instruction scheduling.
|
|
|
|
///
|
2006-08-30 02:28:33 +08:00
|
|
|
FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM, bool Fast) {
|
|
|
|
return new X86DAGToDAGISel(TM, Fast);
|
2005-11-16 09:54:32 +08:00
|
|
|
}
|