llvm-project/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp

1599 lines
56 KiB
C++
Raw Normal View History

//===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines a DAG pattern matching instruction selector for X86,
// converting from a legalized dag to a X86 dag.
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "x86-isel"
#include "X86.h"
#include "X86InstrBuilder.h"
#include "X86ISelLowering.h"
#include "X86MachineFunctionInfo.h"
#include "X86RegisterInfo.h"
#include "X86Subtarget.h"
#include "X86TargetMachine.h"
#include "llvm/GlobalValue.h"
#include "llvm/Instructions.h"
#include "llvm/Intrinsics.h"
#include "llvm/Support/CFG.h"
#include "llvm/Type.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Streams.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
using namespace llvm;
STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
//===----------------------------------------------------------------------===//
// Pattern Matcher Implementation
//===----------------------------------------------------------------------===//
namespace {
/// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
/// SDValue's instead of register numbers for the leaves of the matched
/// tree.
struct X86ISelAddressMode {
enum {
RegBase,
FrameIndexBase
} BaseType;
struct { // This is really a union, discriminated by BaseType!
SDValue Reg;
int FrameIndex;
} Base;
bool isRIPRel; // RIP as base?
unsigned Scale;
SDValue IndexReg;
int32_t Disp;
GlobalValue *GV;
Constant *CP;
const char *ES;
int JT;
unsigned Align; // CP alignment.
X86ISelAddressMode()
: BaseType(RegBase), isRIPRel(false), Scale(1), IndexReg(), Disp(0),
GV(0), CP(0), ES(0), JT(-1), Align(0) {
}
void dump() {
cerr << "X86ISelAddressMode " << this << "\n";
cerr << "Base.Reg ";
if (Base.Reg.getNode() != 0) Base.Reg.getNode()->dump();
else cerr << "nul";
cerr << " Base.FrameIndex " << Base.FrameIndex << "\n";
cerr << "isRIPRel " << isRIPRel << " Scale" << Scale << "\n";
cerr << "IndexReg ";
if (IndexReg.getNode() != 0) IndexReg.getNode()->dump();
else cerr << "nul";
cerr << " Disp " << Disp << "\n";
cerr << "GV "; if (GV) GV->dump();
else cerr << "nul";
cerr << " CP "; if (CP) CP->dump();
else cerr << "nul";
cerr << "\n";
cerr << "ES "; if (ES) cerr << ES; else cerr << "nul";
cerr << " JT" << JT << " Align" << Align << "\n";
}
};
}
namespace {
//===--------------------------------------------------------------------===//
/// ISel - X86 specific code to select X86 machine instructions for
/// SelectionDAG operations.
///
2006-06-29 07:27:49 +08:00
class VISIBILITY_HIDDEN X86DAGToDAGISel : public SelectionDAGISel {
/// TM - Keep a reference to X86TargetMachine.
///
X86TargetMachine &TM;
/// X86Lowering - This object fully describes how to lower LLVM code to an
/// X86-specific SelectionDAG.
X86TargetLowering &X86Lowering;
/// Subtarget - Keep a pointer to the X86Subtarget around so that we can
/// make the right decision when generating code for different targets.
const X86Subtarget *Subtarget;
/// CurBB - Current BB being isel'd.
///
MachineBasicBlock *CurBB;
/// OptForSize - If true, selector should try to optimize for code size
/// instead of performance.
bool OptForSize;
public:
X86DAGToDAGISel(X86TargetMachine &tm, bool fast)
: SelectionDAGISel(*tm.getTargetLowering(), fast),
TM(tm), X86Lowering(*TM.getTargetLowering()),
Subtarget(&TM.getSubtarget<X86Subtarget>()),
OptForSize(false) {}
virtual const char *getPassName() const {
return "X86 DAG->DAG Instruction Selection";
}
/// InstructionSelect - This callback is invoked by
/// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
virtual void InstructionSelect();
virtual void EmitFunctionEntryCode(Function &Fn, MachineFunction &MF);
virtual
bool IsLegalAndProfitableToFold(SDNode *N, SDNode *U, SDNode *Root) const;
// Include the pieces autogenerated from the target description.
#include "X86GenDAGISel.inc"
private:
SDNode *Select(SDValue N);
SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
bool MatchAddress(SDValue N, X86ISelAddressMode &AM,
2007-03-29 02:38:33 +08:00
bool isRoot = true, unsigned Depth = 0);
bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM,
bool isRoot, unsigned Depth);
bool SelectAddr(SDValue Op, SDValue N, SDValue &Base,
SDValue &Scale, SDValue &Index, SDValue &Disp);
bool SelectLEAAddr(SDValue Op, SDValue N, SDValue &Base,
SDValue &Scale, SDValue &Index, SDValue &Disp);
bool SelectScalarSSELoad(SDValue Op, SDValue Pred,
SDValue N, SDValue &Base, SDValue &Scale,
SDValue &Index, SDValue &Disp,
SDValue &InChain, SDValue &OutChain);
bool TryFoldLoad(SDValue P, SDValue N,
SDValue &Base, SDValue &Scale,
SDValue &Index, SDValue &Disp);
void PreprocessForRMW();
void PreprocessForFPConvert();
/// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
/// inline asm expressions.
virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
char ConstraintCode,
std::vector<SDValue> &OutOps);
void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI);
inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base,
SDValue &Scale, SDValue &Index,
SDValue &Disp) {
Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ?
CurDAG->getTargetFrameIndex(AM.Base.FrameIndex, TLI.getPointerTy()) :
AM.Base.Reg;
Scale = getI8Imm(AM.Scale);
Index = AM.IndexReg;
// These are 32-bit even in 64-bit mode since RIP relative offset
// is 32-bit.
if (AM.GV)
Disp = CurDAG->getTargetGlobalAddress(AM.GV, MVT::i32, AM.Disp);
else if (AM.CP)
Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
AM.Align, AM.Disp);
else if (AM.ES)
Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32);
else if (AM.JT != -1)
Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32);
else
Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32);
}
/// getI8Imm - Return a target constant with the specified value, of type
/// i8.
inline SDValue getI8Imm(unsigned Imm) {
return CurDAG->getTargetConstant(Imm, MVT::i8);
}
/// getI16Imm - Return a target constant with the specified value, of type
/// i16.
inline SDValue getI16Imm(unsigned Imm) {
return CurDAG->getTargetConstant(Imm, MVT::i16);
}
/// getI32Imm - Return a target constant with the specified value, of type
/// i32.
inline SDValue getI32Imm(unsigned Imm) {
return CurDAG->getTargetConstant(Imm, MVT::i32);
}
/// getGlobalBaseReg - Return an SDNode that returns the value of
/// the global base register. Output instructions required to
/// initialize the global base register, if necessary.
///
SDNode *getGlobalBaseReg();
/// getTruncateTo8Bit - return an SDNode that implements a subreg based
/// truncate of the specified operand to i8. This can be done with tablegen,
/// except that this code uses MVT::Flag in a tricky way that happens to
/// improve scheduling in some cases.
SDNode *getTruncateTo8Bit(SDValue N0);
2006-02-11 06:46:26 +08:00
#ifndef NDEBUG
unsigned Indent;
#endif
};
}
/// findFlagUse - Return use of MVT::Flag value produced by the specified
/// SDNode.
///
static SDNode *findFlagUse(SDNode *N) {
unsigned FlagResNo = N->getNumValues()-1;
for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
SDNode *User = *I;
for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
SDValue Op = User->getOperand(i);
if (Op.getNode() == N && Op.getResNo() == FlagResNo)
return User;
}
}
return NULL;
}
/// findNonImmUse - Return true by reference in "found" if "Use" is an
/// non-immediate use of "Def". This function recursively traversing
/// up the operand chain ignoring certain nodes.
static void findNonImmUse(SDNode *Use, SDNode* Def, SDNode *ImmedUse,
SDNode *Root, bool &found,
SmallPtrSet<SDNode*, 16> &Visited) {
if (found ||
Use->getNodeId() < Def->getNodeId() ||
!Visited.insert(Use))
return;
for (unsigned i = 0, e = Use->getNumOperands(); !found && i != e; ++i) {
SDNode *N = Use->getOperand(i).getNode();
if (N == Def) {
if (Use == ImmedUse || Use == Root)
continue; // We are not looking for immediate use.
assert(N != Root);
found = true;
break;
}
// Traverse up the operand chain.
findNonImmUse(N, Def, ImmedUse, Root, found, Visited);
}
}
/// isNonImmUse - Start searching from Root up the DAG to check is Def can
/// be reached. Return true if that's the case. However, ignore direct uses
/// by ImmedUse (which would be U in the example illustrated in
/// IsLegalAndProfitableToFold) and by Root (which can happen in the store
/// case).
/// FIXME: to be really generic, we should allow direct use by any node
/// that is being folded. But realisticly since we only fold loads which
/// have one non-chain use, we only need to watch out for load/op/store
/// and load/op/cmp case where the root (store / cmp) may reach the load via
/// its chain operand.
static inline bool isNonImmUse(SDNode *Root, SDNode *Def, SDNode *ImmedUse) {
SmallPtrSet<SDNode*, 16> Visited;
bool found = false;
findNonImmUse(Root, Def, ImmedUse, Root, found, Visited);
return found;
}
bool X86DAGToDAGISel::IsLegalAndProfitableToFold(SDNode *N, SDNode *U,
SDNode *Root) const {
if (Fast) return false;
if (U == Root)
switch (U->getOpcode()) {
default: break;
case ISD::ADD:
case ISD::ADDC:
case ISD::ADDE:
case ISD::AND:
case ISD::OR:
case ISD::XOR: {
// If the other operand is a 8-bit immediate we should fold the immediate
// instead. This reduces code size.
// e.g.
// movl 4(%esp), %eax
// addl $4, %eax
// vs.
// movl $4, %eax
// addl 4(%esp), %eax
// The former is 2 bytes shorter. In case where the increment is 1, then
// the saving can be 4 bytes (by using incl %eax).
ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(U->getOperand(1));
if (Imm) {
if (U->getValueType(0) == MVT::i64) {
if ((int32_t)Imm->getZExtValue() == (int64_t)Imm->getZExtValue())
return false;
} else {
if ((int8_t)Imm->getZExtValue() == (int64_t)Imm->getZExtValue())
return false;
}
}
}
}
// If Root use can somehow reach N through a path that that doesn't contain
// U then folding N would create a cycle. e.g. In the following
// diagram, Root can reach N through X. If N is folded into into Root, then
// X is both a predecessor and a successor of U.
//
// [N*] //
// ^ ^ //
// / \ //
// [U*] [X]? //
// ^ ^ //
// \ / //
// \ / //
// [Root*] //
//
// * indicates nodes to be folded together.
//
// If Root produces a flag, then it gets (even more) interesting. Since it
// will be "glued" together with its flag use in the scheduler, we need to
// check if it might reach N.
//
// [N*] //
// ^ ^ //
// / \ //
// [U*] [X]? //
// ^ ^ //
// \ \ //
// \ | //
// [Root*] | //
// ^ | //
// f | //
// | / //
// [Y] / //
// ^ / //
// f / //
// | / //
// [FU] //
//
// If FU (flag use) indirectly reaches N (the load), and Root folds N
// (call it Fold), then X is a predecessor of FU and a successor of
// Fold. But since Fold and FU are flagged together, this will create
// a cycle in the scheduling graph.
MVT VT = Root->getValueType(Root->getNumValues()-1);
while (VT == MVT::Flag) {
SDNode *FU = findFlagUse(Root);
if (FU == NULL)
break;
Root = FU;
VT = Root->getValueType(Root->getNumValues()-1);
}
return !isNonImmUse(Root, N, U);
}
/// MoveBelowTokenFactor - Replace TokenFactor operand with load's chain operand
/// and move load below the TokenFactor. Replace store's chain operand with
/// load's chain result.
static void MoveBelowTokenFactor(SelectionDAG *CurDAG, SDValue Load,
SDValue Store, SDValue TF) {
SmallVector<SDValue, 4> Ops;
for (unsigned i = 0, e = TF.getNode()->getNumOperands(); i != e; ++i)
if (Load.getNode() == TF.getOperand(i).getNode())
Ops.push_back(Load.getOperand(0));
else
Ops.push_back(TF.getOperand(i));
CurDAG->UpdateNodeOperands(TF, &Ops[0], Ops.size());
CurDAG->UpdateNodeOperands(Load, TF, Load.getOperand(1), Load.getOperand(2));
CurDAG->UpdateNodeOperands(Store, Load.getValue(1), Store.getOperand(1),
Store.getOperand(2), Store.getOperand(3));
}
/// isRMWLoad - Return true if N is a load that's part of RMW sub-DAG.
///
static bool isRMWLoad(SDValue N, SDValue Chain, SDValue Address,
SDValue &Load) {
if (N.getOpcode() == ISD::BIT_CONVERT)
N = N.getOperand(0);
LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
if (!LD || LD->isVolatile())
return false;
if (LD->getAddressingMode() != ISD::UNINDEXED)
return false;
ISD::LoadExtType ExtType = LD->getExtensionType();
if (ExtType != ISD::NON_EXTLOAD && ExtType != ISD::EXTLOAD)
return false;
if (N.hasOneUse() &&
N.getOperand(1) == Address &&
N.getNode()->isOperandOf(Chain.getNode())) {
Load = N;
return true;
}
return false;
}
/// MoveBelowCallSeqStart - Replace CALLSEQ_START operand with load's chain
/// operand and move load below the call's chain operand.
static void MoveBelowCallSeqStart(SelectionDAG *CurDAG, SDValue Load,
SDValue Call, SDValue Chain) {
SmallVector<SDValue, 8> Ops;
for (unsigned i = 0, e = Chain.getNode()->getNumOperands(); i != e; ++i)
if (Load.getNode() == Chain.getOperand(i).getNode())
Ops.push_back(Load.getOperand(0));
else
Ops.push_back(Chain.getOperand(i));
CurDAG->UpdateNodeOperands(Chain, &Ops[0], Ops.size());
CurDAG->UpdateNodeOperands(Load, Call.getOperand(0),
Load.getOperand(1), Load.getOperand(2));
Ops.clear();
Ops.push_back(SDValue(Load.getNode(), 1));
for (unsigned i = 1, e = Call.getNode()->getNumOperands(); i != e; ++i)
Ops.push_back(Call.getOperand(i));
CurDAG->UpdateNodeOperands(Call, &Ops[0], Ops.size());
}
/// isCalleeLoad - Return true if call address is a load and it can be
/// moved below CALLSEQ_START and the chains leading up to the call.
/// Return the CALLSEQ_START by reference as a second output.
static bool isCalleeLoad(SDValue Callee, SDValue &Chain) {
if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
return false;
LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
if (!LD ||
LD->isVolatile() ||
LD->getAddressingMode() != ISD::UNINDEXED ||
LD->getExtensionType() != ISD::NON_EXTLOAD)
return false;
// Now let's find the callseq_start.
while (Chain.getOpcode() != ISD::CALLSEQ_START) {
if (!Chain.hasOneUse())
return false;
Chain = Chain.getOperand(0);
}
return Chain.getOperand(0).getNode() == Callee.getNode();
}
Significantly simplify and improve handling of FP function results on x86-32. This case returns the value in ST(0) and then has to convert it to an SSE register. This causes significant codegen ugliness in some cases. For example in the trivial fp-stack-direct-ret.ll testcase we used to generate: _bar: subl $28, %esp call L_foo$stub fstpl 16(%esp) movsd 16(%esp), %xmm0 movsd %xmm0, 8(%esp) fldl 8(%esp) addl $28, %esp ret because we move the result of foo() into an XMM register, then have to move it back for the return of bar. Instead of hacking ever-more special cases into the call result lowering code we take a much simpler approach: on x86-32, fp return is modeled as always returning into an f80 register which is then truncated to f32 or f64 as needed. Similarly for a result, we model it as an extension to f80 + return. This exposes the truncate and extensions to the dag combiner, allowing target independent code to hack on them, eliminating them in this case. This gives us this code for the example above: _bar: subl $12, %esp call L_foo$stub addl $12, %esp ret The nasty aspect of this is that these conversions are not legal, but we want the second pass of dag combiner (post-legalize) to be able to hack on them. To handle this, we lie to legalize and say they are legal, then custom expand them on entry to the isel pass (PreprocessForFPConvert). This is gross, but less gross than the code it is replacing :) This also allows us to generate better code in several other cases. For example on fp-stack-ret-conv.ll, we now generate: _test: subl $12, %esp call L_foo$stub fstps 8(%esp) movl 16(%esp), %eax cvtss2sd 8(%esp), %xmm0 movsd %xmm0, (%eax) addl $12, %esp ret where before we produced (incidentally, the old bad code is identical to what gcc produces): _test: subl $12, %esp call L_foo$stub fstpl (%esp) cvtsd2ss (%esp), %xmm0 cvtss2sd %xmm0, %xmm0 movl 16(%esp), %eax movsd %xmm0, (%eax) addl $12, %esp ret Note that we generate slightly worse code on pr1505b.ll due to a scheduling deficiency that is unrelated to this patch. llvm-svn: 46307
2008-01-24 16:07:48 +08:00
/// PreprocessForRMW - Preprocess the DAG to make instruction selection better.
/// This is only run if not in -fast mode (aka -O0).
/// This allows the instruction selector to pick more read-modify-write
/// instructions. This is a common case:
///
/// [Load chain]
/// ^
/// |
/// [Load]
/// ^ ^
/// | |
/// / \-
/// / |
/// [TokenFactor] [Op]
/// ^ ^
/// | |
/// \ /
/// \ /
/// [Store]
///
/// The fact the store's chain operand != load's chain will prevent the
/// (store (op (load))) instruction from being selected. We can transform it to:
///
/// [Load chain]
/// ^
/// |
/// [TokenFactor]
/// ^
/// |
/// [Load]
/// ^ ^
/// | |
/// | \-
/// | |
/// | [Op]
/// | ^
/// | |
/// \ /
/// \ /
/// [Store]
void X86DAGToDAGISel::PreprocessForRMW() {
for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
E = CurDAG->allnodes_end(); I != E; ++I) {
if (I->getOpcode() == X86ISD::CALL) {
/// Also try moving call address load from outside callseq_start to just
/// before the call to allow it to be folded.
///
/// [Load chain]
/// ^
/// |
/// [Load]
/// ^ ^
/// | |
/// / \--
/// / |
///[CALLSEQ_START] |
/// ^ |
/// | |
/// [LOAD/C2Reg] |
/// | |
/// \ /
/// \ /
/// [CALL]
SDValue Chain = I->getOperand(0);
SDValue Load = I->getOperand(1);
if (!isCalleeLoad(Load, Chain))
continue;
MoveBelowCallSeqStart(CurDAG, Load, SDValue(I, 0), Chain);
++NumLoadMoved;
continue;
}
if (!ISD::isNON_TRUNCStore(I))
continue;
SDValue Chain = I->getOperand(0);
if (Chain.getNode()->getOpcode() != ISD::TokenFactor)
continue;
SDValue N1 = I->getOperand(1);
SDValue N2 = I->getOperand(2);
if ((N1.getValueType().isFloatingPoint() &&
!N1.getValueType().isVector()) ||
!N1.hasOneUse())
continue;
bool RModW = false;
SDValue Load;
unsigned Opcode = N1.getNode()->getOpcode();
switch (Opcode) {
case ISD::ADD:
case ISD::MUL:
case ISD::AND:
case ISD::OR:
case ISD::XOR:
case ISD::ADDC:
case ISD::ADDE:
case ISD::VECTOR_SHUFFLE: {
SDValue N10 = N1.getOperand(0);
SDValue N11 = N1.getOperand(1);
RModW = isRMWLoad(N10, Chain, N2, Load);
if (!RModW)
RModW = isRMWLoad(N11, Chain, N2, Load);
break;
}
case ISD::SUB:
case ISD::SHL:
case ISD::SRA:
case ISD::SRL:
case ISD::ROTL:
case ISD::ROTR:
case ISD::SUBC:
case ISD::SUBE:
case X86ISD::SHLD:
case X86ISD::SHRD: {
SDValue N10 = N1.getOperand(0);
RModW = isRMWLoad(N10, Chain, N2, Load);
break;
}
}
if (RModW) {
MoveBelowTokenFactor(CurDAG, Load, SDValue(I, 0), Chain);
++NumLoadMoved;
}
}
}
Significantly simplify and improve handling of FP function results on x86-32. This case returns the value in ST(0) and then has to convert it to an SSE register. This causes significant codegen ugliness in some cases. For example in the trivial fp-stack-direct-ret.ll testcase we used to generate: _bar: subl $28, %esp call L_foo$stub fstpl 16(%esp) movsd 16(%esp), %xmm0 movsd %xmm0, 8(%esp) fldl 8(%esp) addl $28, %esp ret because we move the result of foo() into an XMM register, then have to move it back for the return of bar. Instead of hacking ever-more special cases into the call result lowering code we take a much simpler approach: on x86-32, fp return is modeled as always returning into an f80 register which is then truncated to f32 or f64 as needed. Similarly for a result, we model it as an extension to f80 + return. This exposes the truncate and extensions to the dag combiner, allowing target independent code to hack on them, eliminating them in this case. This gives us this code for the example above: _bar: subl $12, %esp call L_foo$stub addl $12, %esp ret The nasty aspect of this is that these conversions are not legal, but we want the second pass of dag combiner (post-legalize) to be able to hack on them. To handle this, we lie to legalize and say they are legal, then custom expand them on entry to the isel pass (PreprocessForFPConvert). This is gross, but less gross than the code it is replacing :) This also allows us to generate better code in several other cases. For example on fp-stack-ret-conv.ll, we now generate: _test: subl $12, %esp call L_foo$stub fstps 8(%esp) movl 16(%esp), %eax cvtss2sd 8(%esp), %xmm0 movsd %xmm0, (%eax) addl $12, %esp ret where before we produced (incidentally, the old bad code is identical to what gcc produces): _test: subl $12, %esp call L_foo$stub fstpl (%esp) cvtsd2ss (%esp), %xmm0 cvtss2sd %xmm0, %xmm0 movl 16(%esp), %eax movsd %xmm0, (%eax) addl $12, %esp ret Note that we generate slightly worse code on pr1505b.ll due to a scheduling deficiency that is unrelated to this patch. llvm-svn: 46307
2008-01-24 16:07:48 +08:00
/// PreprocessForFPConvert - Walk over the dag lowering fpround and fpextend
/// nodes that target the FP stack to be store and load to the stack. This is a
/// gross hack. We would like to simply mark these as being illegal, but when
/// we do that, legalize produces these when it expands calls, then expands
/// these in the same legalize pass. We would like dag combine to be able to
/// hack on these between the call expansion and the node legalization. As such
/// this pass basically does "really late" legalization of these inline with the
/// X86 isel pass.
void X86DAGToDAGISel::PreprocessForFPConvert() {
for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
E = CurDAG->allnodes_end(); I != E; ) {
Significantly simplify and improve handling of FP function results on x86-32. This case returns the value in ST(0) and then has to convert it to an SSE register. This causes significant codegen ugliness in some cases. For example in the trivial fp-stack-direct-ret.ll testcase we used to generate: _bar: subl $28, %esp call L_foo$stub fstpl 16(%esp) movsd 16(%esp), %xmm0 movsd %xmm0, 8(%esp) fldl 8(%esp) addl $28, %esp ret because we move the result of foo() into an XMM register, then have to move it back for the return of bar. Instead of hacking ever-more special cases into the call result lowering code we take a much simpler approach: on x86-32, fp return is modeled as always returning into an f80 register which is then truncated to f32 or f64 as needed. Similarly for a result, we model it as an extension to f80 + return. This exposes the truncate and extensions to the dag combiner, allowing target independent code to hack on them, eliminating them in this case. This gives us this code for the example above: _bar: subl $12, %esp call L_foo$stub addl $12, %esp ret The nasty aspect of this is that these conversions are not legal, but we want the second pass of dag combiner (post-legalize) to be able to hack on them. To handle this, we lie to legalize and say they are legal, then custom expand them on entry to the isel pass (PreprocessForFPConvert). This is gross, but less gross than the code it is replacing :) This also allows us to generate better code in several other cases. For example on fp-stack-ret-conv.ll, we now generate: _test: subl $12, %esp call L_foo$stub fstps 8(%esp) movl 16(%esp), %eax cvtss2sd 8(%esp), %xmm0 movsd %xmm0, (%eax) addl $12, %esp ret where before we produced (incidentally, the old bad code is identical to what gcc produces): _test: subl $12, %esp call L_foo$stub fstpl (%esp) cvtsd2ss (%esp), %xmm0 cvtss2sd %xmm0, %xmm0 movl 16(%esp), %eax movsd %xmm0, (%eax) addl $12, %esp ret Note that we generate slightly worse code on pr1505b.ll due to a scheduling deficiency that is unrelated to this patch. llvm-svn: 46307
2008-01-24 16:07:48 +08:00
SDNode *N = I++; // Preincrement iterator to avoid invalidation issues.
if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND)
continue;
// If the source and destination are SSE registers, then this is a legal
// conversion that should not be lowered.
MVT SrcVT = N->getOperand(0).getValueType();
MVT DstVT = N->getValueType(0);
Significantly simplify and improve handling of FP function results on x86-32. This case returns the value in ST(0) and then has to convert it to an SSE register. This causes significant codegen ugliness in some cases. For example in the trivial fp-stack-direct-ret.ll testcase we used to generate: _bar: subl $28, %esp call L_foo$stub fstpl 16(%esp) movsd 16(%esp), %xmm0 movsd %xmm0, 8(%esp) fldl 8(%esp) addl $28, %esp ret because we move the result of foo() into an XMM register, then have to move it back for the return of bar. Instead of hacking ever-more special cases into the call result lowering code we take a much simpler approach: on x86-32, fp return is modeled as always returning into an f80 register which is then truncated to f32 or f64 as needed. Similarly for a result, we model it as an extension to f80 + return. This exposes the truncate and extensions to the dag combiner, allowing target independent code to hack on them, eliminating them in this case. This gives us this code for the example above: _bar: subl $12, %esp call L_foo$stub addl $12, %esp ret The nasty aspect of this is that these conversions are not legal, but we want the second pass of dag combiner (post-legalize) to be able to hack on them. To handle this, we lie to legalize and say they are legal, then custom expand them on entry to the isel pass (PreprocessForFPConvert). This is gross, but less gross than the code it is replacing :) This also allows us to generate better code in several other cases. For example on fp-stack-ret-conv.ll, we now generate: _test: subl $12, %esp call L_foo$stub fstps 8(%esp) movl 16(%esp), %eax cvtss2sd 8(%esp), %xmm0 movsd %xmm0, (%eax) addl $12, %esp ret where before we produced (incidentally, the old bad code is identical to what gcc produces): _test: subl $12, %esp call L_foo$stub fstpl (%esp) cvtsd2ss (%esp), %xmm0 cvtss2sd %xmm0, %xmm0 movl 16(%esp), %eax movsd %xmm0, (%eax) addl $12, %esp ret Note that we generate slightly worse code on pr1505b.ll due to a scheduling deficiency that is unrelated to this patch. llvm-svn: 46307
2008-01-24 16:07:48 +08:00
bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT);
bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT);
if (SrcIsSSE && DstIsSSE)
continue;
if (!SrcIsSSE && !DstIsSSE) {
// If this is an FPStack extension, it is a noop.
if (N->getOpcode() == ISD::FP_EXTEND)
continue;
// If this is a value-preserving FPStack truncation, it is a noop.
if (N->getConstantOperandVal(1))
continue;
}
Significantly simplify and improve handling of FP function results on x86-32. This case returns the value in ST(0) and then has to convert it to an SSE register. This causes significant codegen ugliness in some cases. For example in the trivial fp-stack-direct-ret.ll testcase we used to generate: _bar: subl $28, %esp call L_foo$stub fstpl 16(%esp) movsd 16(%esp), %xmm0 movsd %xmm0, 8(%esp) fldl 8(%esp) addl $28, %esp ret because we move the result of foo() into an XMM register, then have to move it back for the return of bar. Instead of hacking ever-more special cases into the call result lowering code we take a much simpler approach: on x86-32, fp return is modeled as always returning into an f80 register which is then truncated to f32 or f64 as needed. Similarly for a result, we model it as an extension to f80 + return. This exposes the truncate and extensions to the dag combiner, allowing target independent code to hack on them, eliminating them in this case. This gives us this code for the example above: _bar: subl $12, %esp call L_foo$stub addl $12, %esp ret The nasty aspect of this is that these conversions are not legal, but we want the second pass of dag combiner (post-legalize) to be able to hack on them. To handle this, we lie to legalize and say they are legal, then custom expand them on entry to the isel pass (PreprocessForFPConvert). This is gross, but less gross than the code it is replacing :) This also allows us to generate better code in several other cases. For example on fp-stack-ret-conv.ll, we now generate: _test: subl $12, %esp call L_foo$stub fstps 8(%esp) movl 16(%esp), %eax cvtss2sd 8(%esp), %xmm0 movsd %xmm0, (%eax) addl $12, %esp ret where before we produced (incidentally, the old bad code is identical to what gcc produces): _test: subl $12, %esp call L_foo$stub fstpl (%esp) cvtsd2ss (%esp), %xmm0 cvtss2sd %xmm0, %xmm0 movl 16(%esp), %eax movsd %xmm0, (%eax) addl $12, %esp ret Note that we generate slightly worse code on pr1505b.ll due to a scheduling deficiency that is unrelated to this patch. llvm-svn: 46307
2008-01-24 16:07:48 +08:00
// Here we could have an FP stack truncation or an FPStack <-> SSE convert.
// FPStack has extload and truncstore. SSE can fold direct loads into other
// operations. Based on this, decide what we want to do.
MVT MemVT;
Significantly simplify and improve handling of FP function results on x86-32. This case returns the value in ST(0) and then has to convert it to an SSE register. This causes significant codegen ugliness in some cases. For example in the trivial fp-stack-direct-ret.ll testcase we used to generate: _bar: subl $28, %esp call L_foo$stub fstpl 16(%esp) movsd 16(%esp), %xmm0 movsd %xmm0, 8(%esp) fldl 8(%esp) addl $28, %esp ret because we move the result of foo() into an XMM register, then have to move it back for the return of bar. Instead of hacking ever-more special cases into the call result lowering code we take a much simpler approach: on x86-32, fp return is modeled as always returning into an f80 register which is then truncated to f32 or f64 as needed. Similarly for a result, we model it as an extension to f80 + return. This exposes the truncate and extensions to the dag combiner, allowing target independent code to hack on them, eliminating them in this case. This gives us this code for the example above: _bar: subl $12, %esp call L_foo$stub addl $12, %esp ret The nasty aspect of this is that these conversions are not legal, but we want the second pass of dag combiner (post-legalize) to be able to hack on them. To handle this, we lie to legalize and say they are legal, then custom expand them on entry to the isel pass (PreprocessForFPConvert). This is gross, but less gross than the code it is replacing :) This also allows us to generate better code in several other cases. For example on fp-stack-ret-conv.ll, we now generate: _test: subl $12, %esp call L_foo$stub fstps 8(%esp) movl 16(%esp), %eax cvtss2sd 8(%esp), %xmm0 movsd %xmm0, (%eax) addl $12, %esp ret where before we produced (incidentally, the old bad code is identical to what gcc produces): _test: subl $12, %esp call L_foo$stub fstpl (%esp) cvtsd2ss (%esp), %xmm0 cvtss2sd %xmm0, %xmm0 movl 16(%esp), %eax movsd %xmm0, (%eax) addl $12, %esp ret Note that we generate slightly worse code on pr1505b.ll due to a scheduling deficiency that is unrelated to this patch. llvm-svn: 46307
2008-01-24 16:07:48 +08:00
if (N->getOpcode() == ISD::FP_ROUND)
MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
else
MemVT = SrcIsSSE ? SrcVT : DstVT;
SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
Significantly simplify and improve handling of FP function results on x86-32. This case returns the value in ST(0) and then has to convert it to an SSE register. This causes significant codegen ugliness in some cases. For example in the trivial fp-stack-direct-ret.ll testcase we used to generate: _bar: subl $28, %esp call L_foo$stub fstpl 16(%esp) movsd 16(%esp), %xmm0 movsd %xmm0, 8(%esp) fldl 8(%esp) addl $28, %esp ret because we move the result of foo() into an XMM register, then have to move it back for the return of bar. Instead of hacking ever-more special cases into the call result lowering code we take a much simpler approach: on x86-32, fp return is modeled as always returning into an f80 register which is then truncated to f32 or f64 as needed. Similarly for a result, we model it as an extension to f80 + return. This exposes the truncate and extensions to the dag combiner, allowing target independent code to hack on them, eliminating them in this case. This gives us this code for the example above: _bar: subl $12, %esp call L_foo$stub addl $12, %esp ret The nasty aspect of this is that these conversions are not legal, but we want the second pass of dag combiner (post-legalize) to be able to hack on them. To handle this, we lie to legalize and say they are legal, then custom expand them on entry to the isel pass (PreprocessForFPConvert). This is gross, but less gross than the code it is replacing :) This also allows us to generate better code in several other cases. For example on fp-stack-ret-conv.ll, we now generate: _test: subl $12, %esp call L_foo$stub fstps 8(%esp) movl 16(%esp), %eax cvtss2sd 8(%esp), %xmm0 movsd %xmm0, (%eax) addl $12, %esp ret where before we produced (incidentally, the old bad code is identical to what gcc produces): _test: subl $12, %esp call L_foo$stub fstpl (%esp) cvtsd2ss (%esp), %xmm0 cvtss2sd %xmm0, %xmm0 movl 16(%esp), %eax movsd %xmm0, (%eax) addl $12, %esp ret Note that we generate slightly worse code on pr1505b.ll due to a scheduling deficiency that is unrelated to this patch. llvm-svn: 46307
2008-01-24 16:07:48 +08:00
// FIXME: optimize the case where the src/dest is a load or store?
SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(),
N->getOperand(0),
MemTmp, NULL, 0, MemVT);
SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, DstVT, Store, MemTmp,
NULL, 0, MemVT);
Significantly simplify and improve handling of FP function results on x86-32. This case returns the value in ST(0) and then has to convert it to an SSE register. This causes significant codegen ugliness in some cases. For example in the trivial fp-stack-direct-ret.ll testcase we used to generate: _bar: subl $28, %esp call L_foo$stub fstpl 16(%esp) movsd 16(%esp), %xmm0 movsd %xmm0, 8(%esp) fldl 8(%esp) addl $28, %esp ret because we move the result of foo() into an XMM register, then have to move it back for the return of bar. Instead of hacking ever-more special cases into the call result lowering code we take a much simpler approach: on x86-32, fp return is modeled as always returning into an f80 register which is then truncated to f32 or f64 as needed. Similarly for a result, we model it as an extension to f80 + return. This exposes the truncate and extensions to the dag combiner, allowing target independent code to hack on them, eliminating them in this case. This gives us this code for the example above: _bar: subl $12, %esp call L_foo$stub addl $12, %esp ret The nasty aspect of this is that these conversions are not legal, but we want the second pass of dag combiner (post-legalize) to be able to hack on them. To handle this, we lie to legalize and say they are legal, then custom expand them on entry to the isel pass (PreprocessForFPConvert). This is gross, but less gross than the code it is replacing :) This also allows us to generate better code in several other cases. For example on fp-stack-ret-conv.ll, we now generate: _test: subl $12, %esp call L_foo$stub fstps 8(%esp) movl 16(%esp), %eax cvtss2sd 8(%esp), %xmm0 movsd %xmm0, (%eax) addl $12, %esp ret where before we produced (incidentally, the old bad code is identical to what gcc produces): _test: subl $12, %esp call L_foo$stub fstpl (%esp) cvtsd2ss (%esp), %xmm0 cvtss2sd %xmm0, %xmm0 movl 16(%esp), %eax movsd %xmm0, (%eax) addl $12, %esp ret Note that we generate slightly worse code on pr1505b.ll due to a scheduling deficiency that is unrelated to this patch. llvm-svn: 46307
2008-01-24 16:07:48 +08:00
// We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
// extload we created. This will cause general havok on the dag because
// anything below the conversion could be folded into other existing nodes.
// To avoid invalidating 'I', back it up to the convert node.
--I;
CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
Significantly simplify and improve handling of FP function results on x86-32. This case returns the value in ST(0) and then has to convert it to an SSE register. This causes significant codegen ugliness in some cases. For example in the trivial fp-stack-direct-ret.ll testcase we used to generate: _bar: subl $28, %esp call L_foo$stub fstpl 16(%esp) movsd 16(%esp), %xmm0 movsd %xmm0, 8(%esp) fldl 8(%esp) addl $28, %esp ret because we move the result of foo() into an XMM register, then have to move it back for the return of bar. Instead of hacking ever-more special cases into the call result lowering code we take a much simpler approach: on x86-32, fp return is modeled as always returning into an f80 register which is then truncated to f32 or f64 as needed. Similarly for a result, we model it as an extension to f80 + return. This exposes the truncate and extensions to the dag combiner, allowing target independent code to hack on them, eliminating them in this case. This gives us this code for the example above: _bar: subl $12, %esp call L_foo$stub addl $12, %esp ret The nasty aspect of this is that these conversions are not legal, but we want the second pass of dag combiner (post-legalize) to be able to hack on them. To handle this, we lie to legalize and say they are legal, then custom expand them on entry to the isel pass (PreprocessForFPConvert). This is gross, but less gross than the code it is replacing :) This also allows us to generate better code in several other cases. For example on fp-stack-ret-conv.ll, we now generate: _test: subl $12, %esp call L_foo$stub fstps 8(%esp) movl 16(%esp), %eax cvtss2sd 8(%esp), %xmm0 movsd %xmm0, (%eax) addl $12, %esp ret where before we produced (incidentally, the old bad code is identical to what gcc produces): _test: subl $12, %esp call L_foo$stub fstpl (%esp) cvtsd2ss (%esp), %xmm0 cvtss2sd %xmm0, %xmm0 movl 16(%esp), %eax movsd %xmm0, (%eax) addl $12, %esp ret Note that we generate slightly worse code on pr1505b.ll due to a scheduling deficiency that is unrelated to this patch. llvm-svn: 46307
2008-01-24 16:07:48 +08:00
// Now that we did that, the node is dead. Increment the iterator to the
// next node to process, then delete N.
++I;
CurDAG->DeleteNode(N);
Significantly simplify and improve handling of FP function results on x86-32. This case returns the value in ST(0) and then has to convert it to an SSE register. This causes significant codegen ugliness in some cases. For example in the trivial fp-stack-direct-ret.ll testcase we used to generate: _bar: subl $28, %esp call L_foo$stub fstpl 16(%esp) movsd 16(%esp), %xmm0 movsd %xmm0, 8(%esp) fldl 8(%esp) addl $28, %esp ret because we move the result of foo() into an XMM register, then have to move it back for the return of bar. Instead of hacking ever-more special cases into the call result lowering code we take a much simpler approach: on x86-32, fp return is modeled as always returning into an f80 register which is then truncated to f32 or f64 as needed. Similarly for a result, we model it as an extension to f80 + return. This exposes the truncate and extensions to the dag combiner, allowing target independent code to hack on them, eliminating them in this case. This gives us this code for the example above: _bar: subl $12, %esp call L_foo$stub addl $12, %esp ret The nasty aspect of this is that these conversions are not legal, but we want the second pass of dag combiner (post-legalize) to be able to hack on them. To handle this, we lie to legalize and say they are legal, then custom expand them on entry to the isel pass (PreprocessForFPConvert). This is gross, but less gross than the code it is replacing :) This also allows us to generate better code in several other cases. For example on fp-stack-ret-conv.ll, we now generate: _test: subl $12, %esp call L_foo$stub fstps 8(%esp) movl 16(%esp), %eax cvtss2sd 8(%esp), %xmm0 movsd %xmm0, (%eax) addl $12, %esp ret where before we produced (incidentally, the old bad code is identical to what gcc produces): _test: subl $12, %esp call L_foo$stub fstpl (%esp) cvtsd2ss (%esp), %xmm0 cvtss2sd %xmm0, %xmm0 movl 16(%esp), %eax movsd %xmm0, (%eax) addl $12, %esp ret Note that we generate slightly worse code on pr1505b.ll due to a scheduling deficiency that is unrelated to this patch. llvm-svn: 46307
2008-01-24 16:07:48 +08:00
}
}
/// InstructionSelectBasicBlock - This callback is invoked by SelectionDAGISel
/// when it has created a SelectionDAG for us to codegen.
void X86DAGToDAGISel::InstructionSelect() {
CurBB = BB; // BB can change as result of isel.
const Function *F = CurDAG->getMachineFunction().getFunction();
OptForSize = F->hasFnAttr(Attribute::OptimizeForSize);
DEBUG(BB->dump());
if (!Fast)
PreprocessForRMW();
Significantly simplify and improve handling of FP function results on x86-32. This case returns the value in ST(0) and then has to convert it to an SSE register. This causes significant codegen ugliness in some cases. For example in the trivial fp-stack-direct-ret.ll testcase we used to generate: _bar: subl $28, %esp call L_foo$stub fstpl 16(%esp) movsd 16(%esp), %xmm0 movsd %xmm0, 8(%esp) fldl 8(%esp) addl $28, %esp ret because we move the result of foo() into an XMM register, then have to move it back for the return of bar. Instead of hacking ever-more special cases into the call result lowering code we take a much simpler approach: on x86-32, fp return is modeled as always returning into an f80 register which is then truncated to f32 or f64 as needed. Similarly for a result, we model it as an extension to f80 + return. This exposes the truncate and extensions to the dag combiner, allowing target independent code to hack on them, eliminating them in this case. This gives us this code for the example above: _bar: subl $12, %esp call L_foo$stub addl $12, %esp ret The nasty aspect of this is that these conversions are not legal, but we want the second pass of dag combiner (post-legalize) to be able to hack on them. To handle this, we lie to legalize and say they are legal, then custom expand them on entry to the isel pass (PreprocessForFPConvert). This is gross, but less gross than the code it is replacing :) This also allows us to generate better code in several other cases. For example on fp-stack-ret-conv.ll, we now generate: _test: subl $12, %esp call L_foo$stub fstps 8(%esp) movl 16(%esp), %eax cvtss2sd 8(%esp), %xmm0 movsd %xmm0, (%eax) addl $12, %esp ret where before we produced (incidentally, the old bad code is identical to what gcc produces): _test: subl $12, %esp call L_foo$stub fstpl (%esp) cvtsd2ss (%esp), %xmm0 cvtss2sd %xmm0, %xmm0 movl 16(%esp), %eax movsd %xmm0, (%eax) addl $12, %esp ret Note that we generate slightly worse code on pr1505b.ll due to a scheduling deficiency that is unrelated to this patch. llvm-svn: 46307
2008-01-24 16:07:48 +08:00
// FIXME: This should only happen when not -fast.
PreprocessForFPConvert();
// Codegen the basic block.
#ifndef NDEBUG
DOUT << "===== Instruction selection begins:\n";
2006-02-11 06:46:26 +08:00
Indent = 0;
#endif
SelectRoot(*CurDAG);
#ifndef NDEBUG
DOUT << "===== Instruction selection ends:\n";
#endif
CurDAG->RemoveDeadNodes();
}
/// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
/// the main function.
void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
MachineFrameInfo *MFI) {
const TargetInstrInfo *TII = TM.getInstrInfo();
if (Subtarget->isTargetCygMing())
BuildMI(BB, TII->get(X86::CALLpcrel32)).addExternalSymbol("__main");
}
void X86DAGToDAGISel::EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) {
// If this is main, emit special code for main.
MachineBasicBlock *BB = MF.begin();
if (Fn.hasExternalLinkage() && Fn.getName() == "main")
EmitSpecialCodeForMain(BB, MF.getFrameInfo());
}
/// MatchAddress - Add the specified node to the specified addressing mode,
/// returning true if it cannot be done. This just pattern matches for the
/// addressing mode.
bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM,
bool isRoot, unsigned Depth) {
bool is64Bit = Subtarget->is64Bit();
DOUT << "MatchAddress: "; DEBUG(AM.dump());
// Limit recursion.
if (Depth > 5)
return MatchAddressBase(N, AM, isRoot, Depth);
// RIP relative addressing: %rip + 32-bit displacement!
if (AM.isRIPRel) {
if (!AM.ES && AM.JT != -1 && N.getOpcode() == ISD::Constant) {
uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
if (!is64Bit || isInt32(AM.Disp + Val)) {
AM.Disp += Val;
return false;
}
}
return true;
}
switch (N.getOpcode()) {
default: break;
case ISD::Constant: {
uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
if (!is64Bit || isInt32(AM.Disp + Val)) {
AM.Disp += Val;
return false;
}
break;
}
case X86ISD::Wrapper: {
DOUT << "Wrapper: 64bit " << is64Bit;
DOUT << " AM "; DEBUG(AM.dump()); DOUT << "\n";
// Under X86-64 non-small code model, GV (and friends) are 64-bits.
// Also, base and index reg must be 0 in order to use rip as base.
if (is64Bit && (TM.getCodeModel() != CodeModel::Small ||
AM.Base.Reg.getNode() || AM.IndexReg.getNode()))
break;
if (AM.GV != 0 || AM.CP != 0 || AM.ES != 0 || AM.JT != -1)
break;
// If value is available in a register both base and index components have
// been picked, we can't fit the result available in the register in the
// addressing mode. Duplicate GlobalAddress or ConstantPool as displacement.
{
SDValue N0 = N.getOperand(0);
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
uint64_t Offset = G->getOffset();
if (!is64Bit || isInt32(AM.Disp + Offset)) {
GlobalValue *GV = G->getGlobal();
AM.GV = GV;
AM.Disp += Offset;
AM.isRIPRel = TM.symbolicAddressesAreRIPRel();
return false;
}
} else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
uint64_t Offset = CP->getOffset();
if (!is64Bit || isInt32(AM.Disp + Offset)) {
AM.CP = CP->getConstVal();
AM.Align = CP->getAlignment();
AM.Disp += Offset;
AM.isRIPRel = TM.symbolicAddressesAreRIPRel();
return false;
}
} else if (ExternalSymbolSDNode *S =dyn_cast<ExternalSymbolSDNode>(N0)) {
AM.ES = S->getSymbol();
AM.isRIPRel = TM.symbolicAddressesAreRIPRel();
return false;
} else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
AM.JT = J->getIndex();
AM.isRIPRel = TM.symbolicAddressesAreRIPRel();
return false;
}
}
break;
}
case ISD::FrameIndex:
if (AM.BaseType == X86ISelAddressMode::RegBase
&& AM.Base.Reg.getNode() == 0) {
AM.BaseType = X86ISelAddressMode::FrameIndexBase;
AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
return false;
}
break;
case ISD::SHL:
if (AM.IndexReg.getNode() != 0 || AM.Scale != 1 || AM.isRIPRel)
break;
if (ConstantSDNode
*CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) {
unsigned Val = CN->getZExtValue();
if (Val == 1 || Val == 2 || Val == 3) {
AM.Scale = 1 << Val;
SDValue ShVal = N.getNode()->getOperand(0);
// Okay, we know that we have a scale by now. However, if the scaled
// value is an add of something and a constant, we can fold the
// constant into the disp field here.
if (ShVal.getNode()->getOpcode() == ISD::ADD && ShVal.hasOneUse() &&
isa<ConstantSDNode>(ShVal.getNode()->getOperand(1))) {
AM.IndexReg = ShVal.getNode()->getOperand(0);
ConstantSDNode *AddVal =
cast<ConstantSDNode>(ShVal.getNode()->getOperand(1));
uint64_t Disp = AM.Disp + (AddVal->getZExtValue() << Val);
if (!is64Bit || isInt32(Disp))
AM.Disp = Disp;
else
AM.IndexReg = ShVal;
} else {
AM.IndexReg = ShVal;
}
return false;
}
break;
}
case ISD::SMUL_LOHI:
case ISD::UMUL_LOHI:
// A mul_lohi where we need the low part can be folded as a plain multiply.
if (N.getResNo() != 0) break;
// FALL THROUGH
case ISD::MUL:
// X*[3,5,9] -> X+X*[2,4,8]
if (AM.BaseType == X86ISelAddressMode::RegBase &&
AM.Base.Reg.getNode() == 0 &&
AM.IndexReg.getNode() == 0 &&
!AM.isRIPRel) {
if (ConstantSDNode
*CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1)))
if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
CN->getZExtValue() == 9) {
AM.Scale = unsigned(CN->getZExtValue())-1;
SDValue MulVal = N.getNode()->getOperand(0);
SDValue Reg;
// Okay, we know that we have a scale by now. However, if the scaled
// value is an add of something and a constant, we can fold the
// constant into the disp field here.
if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) {
Reg = MulVal.getNode()->getOperand(0);
ConstantSDNode *AddVal =
cast<ConstantSDNode>(MulVal.getNode()->getOperand(1));
uint64_t Disp = AM.Disp + AddVal->getZExtValue() *
CN->getZExtValue();
if (!is64Bit || isInt32(Disp))
AM.Disp = Disp;
else
Reg = N.getNode()->getOperand(0);
} else {
Reg = N.getNode()->getOperand(0);
}
AM.IndexReg = AM.Base.Reg = Reg;
return false;
}
}
break;
case ISD::ADD:
{
X86ISelAddressMode Backup = AM;
if (!MatchAddress(N.getNode()->getOperand(0), AM, false, Depth+1) &&
!MatchAddress(N.getNode()->getOperand(1), AM, false, Depth+1))
return false;
AM = Backup;
if (!MatchAddress(N.getNode()->getOperand(1), AM, false, Depth+1) &&
!MatchAddress(N.getNode()->getOperand(0), AM, false, Depth+1))
return false;
AM = Backup;
}
break;
case ISD::OR:
// Handle "X | C" as "X + C" iff X is known to have C bits clear.
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
X86ISelAddressMode Backup = AM;
uint64_t Offset = CN->getSExtValue();
// Start with the LHS as an addr mode.
if (!MatchAddress(N.getOperand(0), AM, false) &&
// Address could not have picked a GV address for the displacement.
AM.GV == NULL &&
// On x86-64, the resultant disp must fit in 32-bits.
(!is64Bit || isInt32(AM.Disp + Offset)) &&
// Check to see if the LHS & C is zero.
CurDAG->MaskedValueIsZero(N.getOperand(0), CN->getAPIntValue())) {
AM.Disp += Offset;
return false;
}
AM = Backup;
}
break;
case ISD::AND: {
// Handle "(x << C1) & C2" as "(X & (C2>>C1)) << C1" if safe and if this
// allows us to fold the shift into this addressing mode.
SDValue Shift = N.getOperand(0);
if (Shift.getOpcode() != ISD::SHL) break;
// Scale must not be used already.
if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
// Not when RIP is used as the base.
if (AM.isRIPRel) break;
ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N.getOperand(1));
ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
if (!C1 || !C2) break;
// Not likely to be profitable if either the AND or SHIFT node has more
// than one use (unless all uses are for address computation). Besides,
// isel mechanism requires their node ids to be reused.
if (!N.hasOneUse() || !Shift.hasOneUse())
break;
// Verify that the shift amount is something we can fold.
unsigned ShiftCst = C1->getZExtValue();
if (ShiftCst != 1 && ShiftCst != 2 && ShiftCst != 3)
break;
// Get the new AND mask, this folds to a constant.
SDValue X = Shift.getOperand(0);
SDValue NewANDMask = CurDAG->getNode(ISD::SRL, N.getValueType(),
2008-10-15 01:15:39 +08:00
SDValue(C2, 0), SDValue(C1, 0));
SDValue NewAND = CurDAG->getNode(ISD::AND, N.getValueType(), X, NewANDMask);
SDValue NewSHIFT = CurDAG->getNode(ISD::SHL, N.getValueType(),
NewAND, SDValue(C1, 0));
// Insert the new nodes into the topological ordering.
if (C1->getNodeId() > X.getNode()->getNodeId()) {
CurDAG->RepositionNode(X.getNode(), C1);
C1->setNodeId(X.getNode()->getNodeId());
}
if (NewANDMask.getNode()->getNodeId() == -1 ||
NewANDMask.getNode()->getNodeId() > X.getNode()->getNodeId()) {
CurDAG->RepositionNode(X.getNode(), NewANDMask.getNode());
NewANDMask.getNode()->setNodeId(X.getNode()->getNodeId());
}
if (NewAND.getNode()->getNodeId() == -1 ||
NewAND.getNode()->getNodeId() > Shift.getNode()->getNodeId()) {
CurDAG->RepositionNode(Shift.getNode(), NewAND.getNode());
NewAND.getNode()->setNodeId(Shift.getNode()->getNodeId());
}
if (NewSHIFT.getNode()->getNodeId() == -1 ||
NewSHIFT.getNode()->getNodeId() > N.getNode()->getNodeId()) {
CurDAG->RepositionNode(N.getNode(), NewSHIFT.getNode());
NewSHIFT.getNode()->setNodeId(N.getNode()->getNodeId());
}
CurDAG->ReplaceAllUsesWith(N, NewSHIFT);
AM.Scale = 1 << ShiftCst;
AM.IndexReg = NewAND;
return false;
}
}
return MatchAddressBase(N, AM, isRoot, Depth);
}
/// MatchAddressBase - Helper for MatchAddress. Add the specified node to the
/// specified addressing mode without any further recursion.
bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM,
bool isRoot, unsigned Depth) {
// Is the base register already occupied?
if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.getNode()) {
// If so, check to see if the scale index register is set.
if (AM.IndexReg.getNode() == 0 && !AM.isRIPRel) {
AM.IndexReg = N;
AM.Scale = 1;
return false;
}
// Otherwise, we cannot select it.
return true;
}
// Default, generate it as a register.
AM.BaseType = X86ISelAddressMode::RegBase;
AM.Base.Reg = N;
return false;
}
/// SelectAddr - returns true if it is able pattern match an addressing mode.
/// It returns the operands which make up the maximal addressing mode it can
/// match by reference.
bool X86DAGToDAGISel::SelectAddr(SDValue Op, SDValue N, SDValue &Base,
SDValue &Scale, SDValue &Index,
SDValue &Disp) {
X86ISelAddressMode AM;
if (MatchAddress(N, AM))
return false;
MVT VT = N.getValueType();
if (AM.BaseType == X86ISelAddressMode::RegBase) {
if (!AM.Base.Reg.getNode())
AM.Base.Reg = CurDAG->getRegister(0, VT);
}
if (!AM.IndexReg.getNode())
AM.IndexReg = CurDAG->getRegister(0, VT);
getAddressOperands(AM, Base, Scale, Index, Disp);
return true;
}
/// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to
/// match a load whose top elements are either undef or zeros. The load flavor
/// is derived from the type of N, which is either v4f32 or v2f64.
bool X86DAGToDAGISel::SelectScalarSSELoad(SDValue Op, SDValue Pred,
SDValue N, SDValue &Base,
SDValue &Scale, SDValue &Index,
SDValue &Disp, SDValue &InChain,
SDValue &OutChain) {
if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) {
InChain = N.getOperand(0).getValue(1);
if (ISD::isNON_EXTLoad(InChain.getNode()) &&
InChain.getValue(0).hasOneUse() &&
N.hasOneUse() &&
IsLegalAndProfitableToFold(N.getNode(), Pred.getNode(), Op.getNode())) {
LoadSDNode *LD = cast<LoadSDNode>(InChain);
2006-11-09 04:34:28 +08:00
if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp))
return false;
OutChain = LD->getChain();
return true;
}
}
// Also handle the case where we explicitly require zeros in the top
// elements. This is a vector shuffle from the zero vector.
if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() &&
Fix a long standing deficiency in the X86 backend: we would sometimes emit "zero" and "all one" vectors multiple times, for example: _test2: pcmpeqd %mm0, %mm0 movq %mm0, _M1 pcmpeqd %mm0, %mm0 movq %mm0, _M2 ret instead of: _test2: pcmpeqd %mm0, %mm0 movq %mm0, _M1 movq %mm0, _M2 ret This patch fixes this by always arranging for zero/one vectors to be defined as v4i32 or v2i32 (SSE/MMX) instead of letting them be any random type. This ensures they get trivially CSE'd on the dag. This fix is also important for LegalizeDAGTypes, as it gets unhappy when the x86 backend wants BUILD_VECTOR(i64 0) to be legal even when 'i64' isn't legal. This patch makes the following changes: 1) X86TargetLowering::LowerBUILD_VECTOR now lowers 0/1 vectors into their canonical types. 2) The now-dead patterns are removed from the SSE/MMX .td files. 3) All the patterns in the .td file that referred to immAllOnesV or immAllZerosV in the wrong form now use *_bc to match them with a bitcast wrapped around them. 4) X86DAGToDAGISel::SelectScalarSSELoad is generalized to handle bitcast'd zero vectors, which simplifies the code actually. 5) getShuffleVectorZeroOrUndef is updated to generate a shuffle that is legal, instead of generating one that is illegal and expecting a later legalize pass to clean it up. 6) isZeroShuffle is generalized to handle bitcast of zeros. 7) several other minor tweaks. This patch is definite goodness, but has the potential to cause random code quality regressions. Please be on the lookout for these and let me know if they happen. llvm-svn: 44310
2007-11-25 08:24:49 +08:00
// Check to see if the top elements are all zeros (or bitcast of zeros).
N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
N.getOperand(0).getNode()->hasOneUse() &&
ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) &&
N.getOperand(0).getOperand(0).hasOneUse()) {
// Okay, this is a zero extending load. Fold it.
LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0));
if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp))
return false;
OutChain = LD->getChain();
InChain = SDValue(LD, 1);
return true;
}
return false;
}
/// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing
/// mode it matches can be cost effectively emitted as an LEA instruction.
bool X86DAGToDAGISel::SelectLEAAddr(SDValue Op, SDValue N,
SDValue &Base, SDValue &Scale,
SDValue &Index, SDValue &Disp) {
X86ISelAddressMode AM;
if (MatchAddress(N, AM))
return false;
MVT VT = N.getValueType();
unsigned Complexity = 0;
if (AM.BaseType == X86ISelAddressMode::RegBase)
if (AM.Base.Reg.getNode())
Complexity = 1;
else
AM.Base.Reg = CurDAG->getRegister(0, VT);
else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
Complexity = 4;
if (AM.IndexReg.getNode())
Complexity++;
else
AM.IndexReg = CurDAG->getRegister(0, VT);
// Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
// a simple shift.
if (AM.Scale > 1)
Complexity++;
// FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
// to a LEA. This is determined with some expermentation but is by no means
// optimal (especially for code size consideration). LEA is nice because of
// its three-address nature. Tweak the cost function again when we can run
// convertToThreeAddress() at register allocation time.
if (AM.GV || AM.CP || AM.ES || AM.JT != -1) {
// For X86-64, we should always use lea to materialize RIP relative
// addresses.
if (Subtarget->is64Bit())
Complexity = 4;
else
Complexity += 2;
}
if (AM.Disp && (AM.Base.Reg.getNode() || AM.IndexReg.getNode()))
Complexity++;
if (Complexity > 2) {
getAddressOperands(AM, Base, Scale, Index, Disp);
return true;
}
return false;
}
bool X86DAGToDAGISel::TryFoldLoad(SDValue P, SDValue N,
SDValue &Base, SDValue &Scale,
SDValue &Index, SDValue &Disp) {
if (ISD::isNON_EXTLoad(N.getNode()) &&
N.hasOneUse() &&
IsLegalAndProfitableToFold(N.getNode(), P.getNode(), P.getNode()))
2006-11-09 04:34:28 +08:00
return SelectAddr(P, N.getOperand(1), Base, Scale, Index, Disp);
return false;
}
/// getGlobalBaseReg - Return an SDNode that returns the value of
/// the global base register. Output instructions required to
/// initialize the global base register, if necessary.
///
SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
MachineFunction *MF = CurBB->getParent();
unsigned GlobalBaseReg = TM.getInstrInfo()->getGlobalBaseReg(MF);
return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode();
}
static SDNode *FindCallStartFromCall(SDNode *Node) {
if (Node->getOpcode() == ISD::CALLSEQ_START) return Node;
assert(Node->getOperand(0).getValueType() == MVT::Other &&
"Node doesn't have a token chain argument!");
return FindCallStartFromCall(Node->getOperand(0).getNode());
}
/// getTruncateTo8Bit - return an SDNode that implements a subreg based
/// truncate of the specified operand to i8. This can be done with tablegen,
/// except that this code uses MVT::Flag in a tricky way that happens to
/// improve scheduling in some cases.
SDNode *X86DAGToDAGISel::getTruncateTo8Bit(SDValue N0) {
assert(!Subtarget->is64Bit() &&
"getTruncateTo8Bit is only needed on x86-32!");
SDValue SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
// Ensure that the source register has an 8-bit subreg on 32-bit targets
unsigned Opc;
MVT N0VT = N0.getValueType();
switch (N0VT.getSimpleVT()) {
default: assert(0 && "Unknown truncate!");
case MVT::i16:
Opc = X86::MOV16to16_;
break;
case MVT::i32:
Opc = X86::MOV32to32_;
break;
}
// The use of MVT::Flag here is not strictly accurate, but it helps
// scheduling in some cases.
N0 = SDValue(CurDAG->getTargetNode(Opc, N0VT, MVT::Flag, N0), 0);
return CurDAG->getTargetNode(X86::EXTRACT_SUBREG,
MVT::i8, N0, SRIdx, N0.getValue(1));
}
SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
SDValue Chain = Node->getOperand(0);
SDValue In1 = Node->getOperand(1);
SDValue In2L = Node->getOperand(2);
SDValue In2H = Node->getOperand(3);
SDValue Tmp0, Tmp1, Tmp2, Tmp3;
if (!SelectAddr(In1, In1, Tmp0, Tmp1, Tmp2, Tmp3))
return NULL;
SDValue LSI = Node->getOperand(4); // MemOperand
const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, In2L, In2H, LSI, Chain };
return CurDAG->getTargetNode(Opc, MVT::i32, MVT::i32, MVT::Other, Ops, 8);
}
SDNode *X86DAGToDAGISel::Select(SDValue N) {
SDNode *Node = N.getNode();
MVT NVT = Node->getValueType(0);
unsigned Opc, MOpc;
unsigned Opcode = Node->getOpcode();
#ifndef NDEBUG
DOUT << std::string(Indent, ' ') << "Selecting: ";
DEBUG(Node->dump(CurDAG));
DOUT << "\n";
2006-02-11 06:46:26 +08:00
Indent += 2;
#endif
if (Node->isMachineOpcode()) {
#ifndef NDEBUG
DOUT << std::string(Indent-2, ' ') << "== ";
DEBUG(Node->dump(CurDAG));
DOUT << "\n";
2006-02-11 06:46:26 +08:00
Indent -= 2;
#endif
return NULL; // Already selected.
}
switch (Opcode) {
default: break;
case X86ISD::GlobalBaseReg:
return getGlobalBaseReg();
case X86ISD::ATOMOR64_DAG:
return SelectAtomic64(Node, X86::ATOMOR6432);
case X86ISD::ATOMXOR64_DAG:
return SelectAtomic64(Node, X86::ATOMXOR6432);
case X86ISD::ATOMADD64_DAG:
return SelectAtomic64(Node, X86::ATOMADD6432);
case X86ISD::ATOMSUB64_DAG:
return SelectAtomic64(Node, X86::ATOMSUB6432);
case X86ISD::ATOMNAND64_DAG:
return SelectAtomic64(Node, X86::ATOMNAND6432);
case X86ISD::ATOMAND64_DAG:
return SelectAtomic64(Node, X86::ATOMAND6432);
case X86ISD::ATOMSWAP64_DAG:
return SelectAtomic64(Node, X86::ATOMSWAP6432);
case ISD::SMUL_LOHI:
case ISD::UMUL_LOHI: {
SDValue N0 = Node->getOperand(0);
SDValue N1 = Node->getOperand(1);
bool isSigned = Opcode == ISD::SMUL_LOHI;
if (!isSigned)
switch (NVT.getSimpleVT()) {
default: assert(0 && "Unsupported VT!");
case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break;
case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break;
case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break;
}
else
switch (NVT.getSimpleVT()) {
default: assert(0 && "Unsupported VT!");
case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break;
case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
}
unsigned LoReg, HiReg;
switch (NVT.getSimpleVT()) {
default: assert(0 && "Unsupported VT!");
case MVT::i8: LoReg = X86::AL; HiReg = X86::AH; break;
case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; break;
case MVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break;
case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break;
}
SDValue Tmp0, Tmp1, Tmp2, Tmp3;
bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3);
// multiplty is commmutative
if (!foldedLoad) {
foldedLoad = TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3);
if (foldedLoad)
std::swap(N0, N1);
}
SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), LoReg,
N0, SDValue()).getValue(1);
if (foldedLoad) {
SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N1.getOperand(0), InFlag };
SDNode *CNode =
CurDAG->getTargetNode(MOpc, MVT::Other, MVT::Flag, Ops, 6);
InFlag = SDValue(CNode, 1);
// Update the chain.
ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
} else {
InFlag =
SDValue(CurDAG->getTargetNode(Opc, MVT::Flag, N1, InFlag), 0);
}
// Copy the low half of the result, if it is needed.
if (!N.getValue(0).use_empty()) {
SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
LoReg, NVT, InFlag);
InFlag = Result.getValue(2);
ReplaceUses(N.getValue(0), Result);
#ifndef NDEBUG
DOUT << std::string(Indent-2, ' ') << "=> ";
DEBUG(Result.getNode()->dump(CurDAG));
DOUT << "\n";
#endif
}
// Copy the high half of the result, if it is needed.
if (!N.getValue(1).use_empty()) {
SDValue Result;
if (HiReg == X86::AH && Subtarget->is64Bit()) {
// Prevent use of AH in a REX instruction by referencing AX instead.
// Shift it down 8 bits.
Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
X86::AX, MVT::i16, InFlag);
InFlag = Result.getValue(2);
Result = SDValue(CurDAG->getTargetNode(X86::SHR16ri, MVT::i16, Result,
CurDAG->getTargetConstant(8, MVT::i8)), 0);
// Then truncate it down to i8.
SDValue SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
Result = SDValue(CurDAG->getTargetNode(X86::EXTRACT_SUBREG,
MVT::i8, Result, SRIdx), 0);
} else {
Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
HiReg, NVT, InFlag);
InFlag = Result.getValue(2);
}
ReplaceUses(N.getValue(1), Result);
#ifndef NDEBUG
DOUT << std::string(Indent-2, ' ') << "=> ";
DEBUG(Result.getNode()->dump(CurDAG));
DOUT << "\n";
#endif
}
#ifndef NDEBUG
2006-02-11 06:46:26 +08:00
Indent -= 2;
#endif
return NULL;
}
case ISD::SDIVREM:
case ISD::UDIVREM: {
SDValue N0 = Node->getOperand(0);
SDValue N1 = Node->getOperand(1);
bool isSigned = Opcode == ISD::SDIVREM;
if (!isSigned)
switch (NVT.getSimpleVT()) {
default: assert(0 && "Unsupported VT!");
case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
}
else
switch (NVT.getSimpleVT()) {
default: assert(0 && "Unsupported VT!");
case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
}
unsigned LoReg, HiReg;
unsigned ClrOpcode, SExtOpcode;
switch (NVT.getSimpleVT()) {
default: assert(0 && "Unsupported VT!");
case MVT::i8:
LoReg = X86::AL; HiReg = X86::AH;
ClrOpcode = 0;
SExtOpcode = X86::CBW;
break;
case MVT::i16:
LoReg = X86::AX; HiReg = X86::DX;
ClrOpcode = X86::MOV16r0;
SExtOpcode = X86::CWD;
break;
case MVT::i32:
LoReg = X86::EAX; HiReg = X86::EDX;
ClrOpcode = X86::MOV32r0;
SExtOpcode = X86::CDQ;
break;
case MVT::i64:
LoReg = X86::RAX; HiReg = X86::RDX;
ClrOpcode = X86::MOV64r0;
SExtOpcode = X86::CQO;
break;
}
SDValue Tmp0, Tmp1, Tmp2, Tmp3;
bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3);
SDValue InFlag;
if (NVT == MVT::i8 && !isSigned) {
// Special case for div8, just use a move with zero extension to AX to
// clear the upper 8 bits (AH).
SDValue Tmp0, Tmp1, Tmp2, Tmp3, Move, Chain;
if (TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3)) {
SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N0.getOperand(0) };
Move =
SDValue(CurDAG->getTargetNode(X86::MOVZX16rm8, MVT::i16, MVT::Other,
Ops, 5), 0);
Chain = Move.getValue(1);
ReplaceUses(N0.getValue(1), Chain);
} else {
Move =
SDValue(CurDAG->getTargetNode(X86::MOVZX16rr8, MVT::i16, N0), 0);
Chain = CurDAG->getEntryNode();
}
Chain = CurDAG->getCopyToReg(Chain, X86::AX, Move, SDValue());
InFlag = Chain.getValue(1);
} else {
InFlag =
CurDAG->getCopyToReg(CurDAG->getEntryNode(),
LoReg, N0, SDValue()).getValue(1);
if (isSigned) {
// Sign extend the low part into the high part.
InFlag =
SDValue(CurDAG->getTargetNode(SExtOpcode, MVT::Flag, InFlag), 0);
} else {
// Zero out the high part, effectively zero extending the input.
SDValue ClrNode = SDValue(CurDAG->getTargetNode(ClrOpcode, NVT), 0);
InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), HiReg,
ClrNode, InFlag).getValue(1);
}
}
if (foldedLoad) {
SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N1.getOperand(0), InFlag };
SDNode *CNode =
CurDAG->getTargetNode(MOpc, MVT::Other, MVT::Flag, Ops, 6);
InFlag = SDValue(CNode, 1);
// Update the chain.
ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
} else {
InFlag =
SDValue(CurDAG->getTargetNode(Opc, MVT::Flag, N1, InFlag), 0);
}
// Copy the division (low) result, if it is needed.
if (!N.getValue(0).use_empty()) {
SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
LoReg, NVT, InFlag);
InFlag = Result.getValue(2);
ReplaceUses(N.getValue(0), Result);
#ifndef NDEBUG
DOUT << std::string(Indent-2, ' ') << "=> ";
DEBUG(Result.getNode()->dump(CurDAG));
DOUT << "\n";
#endif
}
// Copy the remainder (high) result, if it is needed.
if (!N.getValue(1).use_empty()) {
SDValue Result;
if (HiReg == X86::AH && Subtarget->is64Bit()) {
// Prevent use of AH in a REX instruction by referencing AX instead.
// Shift it down 8 bits.
Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
X86::AX, MVT::i16, InFlag);
InFlag = Result.getValue(2);
Result = SDValue(CurDAG->getTargetNode(X86::SHR16ri, MVT::i16, Result,
CurDAG->getTargetConstant(8, MVT::i8)), 0);
// Then truncate it down to i8.
SDValue SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
Result = SDValue(CurDAG->getTargetNode(X86::EXTRACT_SUBREG,
MVT::i8, Result, SRIdx), 0);
} else {
Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
HiReg, NVT, InFlag);
InFlag = Result.getValue(2);
}
ReplaceUses(N.getValue(1), Result);
#ifndef NDEBUG
DOUT << std::string(Indent-2, ' ') << "=> ";
DEBUG(Result.getNode()->dump(CurDAG));
DOUT << "\n";
#endif
}
#ifndef NDEBUG
2006-02-11 06:46:26 +08:00
Indent -= 2;
#endif
return NULL;
2005-12-17 10:02:50 +08:00
}
case ISD::SIGN_EXTEND_INREG: {
MVT SVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
if (SVT == MVT::i8 && !Subtarget->is64Bit()) {
SDValue N0 = Node->getOperand(0);
SDValue TruncOp = SDValue(getTruncateTo8Bit(N0), 0);
unsigned Opc = 0;
switch (NVT.getSimpleVT()) {
default: assert(0 && "Unknown sign_extend_inreg!");
case MVT::i16:
Opc = X86::MOVSX16rr8;
break;
case MVT::i32:
Opc = X86::MOVSX32rr8;
break;
}
SDNode *ResNode = CurDAG->getTargetNode(Opc, NVT, TruncOp);
#ifndef NDEBUG
DOUT << std::string(Indent-2, ' ') << "=> ";
DEBUG(TruncOp.getNode()->dump(CurDAG));
DOUT << "\n";
DOUT << std::string(Indent-2, ' ') << "=> ";
DEBUG(ResNode->dump(CurDAG));
DOUT << "\n";
Indent -= 2;
#endif
return ResNode;
}
break;
}
case ISD::TRUNCATE: {
if (NVT == MVT::i8 && !Subtarget->is64Bit()) {
SDValue Input = Node->getOperand(0);
SDNode *ResNode = getTruncateTo8Bit(Input);
#ifndef NDEBUG
DOUT << std::string(Indent-2, ' ') << "=> ";
DEBUG(ResNode->dump(CurDAG));
DOUT << "\n";
Indent -= 2;
#endif
return ResNode;
}
break;
}
case ISD::DECLARE: {
// Handle DECLARE nodes here because the second operand may have been
// wrapped in X86ISD::Wrapper.
SDValue Chain = Node->getOperand(0);
SDValue N1 = Node->getOperand(1);
SDValue N2 = Node->getOperand(2);
2008-12-11 05:49:05 +08:00
FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(N1);
if (!FINode)
break;
if (N2.getOpcode() == ISD::ADD &&
N2.getOperand(0).getOpcode() == X86ISD::GlobalBaseReg)
N2 = N2.getOperand(1);
2008-12-11 05:49:05 +08:00
if (N2.getOpcode() != X86ISD::Wrapper)
break;
GlobalAddressSDNode *GVNode = dyn_cast<GlobalAddressSDNode>(N2.getOperand(0));
if (!GVNode)
break;
SDValue Tmp1 = CurDAG->getTargetFrameIndex(FINode->getIndex(),
TLI.getPointerTy());
SDValue Tmp2 = CurDAG->getTargetGlobalAddress(GVNode->getGlobal(),
TLI.getPointerTy());
SDValue Ops[] = { Tmp1, Tmp2, Chain };
return CurDAG->getTargetNode(TargetInstrInfo::DECLARE,
MVT::Other, Ops, 3);
break;
}
}
SDNode *ResNode = SelectCode(N);
#ifndef NDEBUG
DOUT << std::string(Indent-2, ' ') << "=> ";
if (ResNode == NULL || ResNode == N.getNode())
DEBUG(N.getNode()->dump(CurDAG));
else
DEBUG(ResNode->dump(CurDAG));
DOUT << "\n";
2006-02-11 06:46:26 +08:00
Indent -= 2;
#endif
return ResNode;
}
bool X86DAGToDAGISel::
SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
std::vector<SDValue> &OutOps) {
SDValue Op0, Op1, Op2, Op3;
switch (ConstraintCode) {
case 'o': // offsetable ??
case 'v': // not offsetable ??
default: return true;
case 'm': // memory
2006-11-09 04:34:28 +08:00
if (!SelectAddr(Op, Op, Op0, Op1, Op2, Op3))
return true;
break;
}
OutOps.push_back(Op0);
OutOps.push_back(Op1);
OutOps.push_back(Op2);
OutOps.push_back(Op3);
return false;
}
/// createX86ISelDag - This pass converts a legalized DAG into a
/// X86-specific DAG, ready for instruction scheduling.
///
FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM, bool Fast) {
return new X86DAGToDAGISel(TM, Fast);
}