Renaming ISD::BIT_CONVERT to ISD::BITCAST to better reflect the LLVM IR concept.

llvm-svn: 119990
This commit is contained in:
Wesley Peck 2010-11-23 03:31:01 +00:00
parent 4329e078ac
commit 527da1b6e2
30 changed files with 1142 additions and 1145 deletions

View File

@ -1825,7 +1825,7 @@ register to convert the floating-point value to an integer.
static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) { static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) {
assert(Op.getValueType() == MVT::i32); assert(Op.getValueType() == MVT::i32);
Op = DAG.getNode(SPISD::FTOI, MVT::f32, Op.getOperand(0)); Op = DAG.getNode(SPISD::FTOI, MVT::f32, Op.getOperand(0));
return DAG.getNode(ISD::BIT_CONVERT, MVT::i32, Op); return DAG.getNode(ISD::BITCAST, MVT::i32, Op);
} }
</pre> </pre>
</div> </div>

View File

@ -274,11 +274,11 @@ namespace ISD {
/// IDX, which must be a multiple of the result vector length. /// IDX, which must be a multiple of the result vector length.
EXTRACT_SUBVECTOR, EXTRACT_SUBVECTOR,
/// VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as /// VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as
/// VEC1/VEC2. A VECTOR_SHUFFLE node also contains an array of constant int /// VEC1/VEC2. A VECTOR_SHUFFLE node also contains an array of constant int
/// values that indicate which value (or undef) each result element will /// values that indicate which value (or undef) each result element will
/// get. These constant ints are accessible through the /// get. These constant ints are accessible through the
/// ShuffleVectorSDNode class. This is quite similar to the Altivec /// ShuffleVectorSDNode class. This is quite similar to the Altivec
/// 'vperm' instruction, except that the indices must be constants and are /// 'vperm' instruction, except that the indices must be constants and are
/// in terms of the element size of VEC1/VEC2, not in terms of bytes. /// in terms of the element size of VEC1/VEC2, not in terms of bytes.
VECTOR_SHUFFLE, VECTOR_SHUFFLE,
@ -399,14 +399,14 @@ namespace ISD {
/// X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type. /// X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
FP_EXTEND, FP_EXTEND,
// BIT_CONVERT - This operator converts between integer, vector and FP // BITCAST - This operator converts between integer, vector and FP
// values, as if the value was stored to memory with one type and loaded // values, as if the value was stored to memory with one type and loaded
// from the same address with the other type (or equivalently for vector // from the same address with the other type (or equivalently for vector
// format conversions, etc). The source and result are required to have // format conversions, etc). The source and result are required to have
// the same bit size (e.g. f32 <-> i32). This can also be used for // the same bit size (e.g. f32 <-> i32). This can also be used for
// int-to-int or fp-to-fp conversions, but that is a noop, deleted by // int-to-int or fp-to-fp conversions, but that is a noop, deleted by
// getNode(). // getNode().
BIT_CONVERT, BITCAST,
// CONVERT_RNDSAT - This operator is used to support various conversions // CONVERT_RNDSAT - This operator is used to support various conversions
// between various types (float, signed, unsigned and vectors of those // between various types (float, signed, unsigned and vectors of those
@ -532,7 +532,7 @@ namespace ISD {
// SRCVALUE - This is a node type that holds a Value* that is used to // SRCVALUE - This is a node type that holds a Value* that is used to
// make reference to a value in the LLVM IR. // make reference to a value in the LLVM IR.
SRCVALUE, SRCVALUE,
// MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to // MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to
// reference metadata in the IR. // reference metadata in the IR.
MDNODE_SDNODE, MDNODE_SDNODE,

View File

@ -1,10 +1,10 @@
//===- TargetSelectionDAG.td - Common code for DAG isels ---*- tablegen -*-===// //===- TargetSelectionDAG.td - Common code for DAG isels ---*- tablegen -*-===//
// //
// The LLVM Compiler Infrastructure // The LLVM Compiler Infrastructure
// //
// This file is distributed under the University of Illinois Open Source // This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details. // License. See LICENSE.TXT for details.
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// //
// This file defines the target-independent interfaces used by SelectionDAG // This file defines the target-independent interfaces used by SelectionDAG
@ -123,10 +123,10 @@ def SDTFPRoundOp : SDTypeProfile<1, 1, [ // fround
def SDTFPExtendOp : SDTypeProfile<1, 1, [ // fextend def SDTFPExtendOp : SDTypeProfile<1, 1, [ // fextend
SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<1, 0> SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<1, 0>
]>; ]>;
def SDTIntToFPOp : SDTypeProfile<1, 1, [ // [su]int_to_fp def SDTIntToFPOp : SDTypeProfile<1, 1, [ // [su]int_to_fp
SDTCisFP<0>, SDTCisInt<1> SDTCisFP<0>, SDTCisInt<1>
]>; ]>;
def SDTFPToIntOp : SDTypeProfile<1, 1, [ // fp_to_[su]int def SDTFPToIntOp : SDTypeProfile<1, 1, [ // fp_to_[su]int
SDTCisInt<0>, SDTCisFP<1> SDTCisInt<0>, SDTCisFP<1>
]>; ]>;
def SDTExtInreg : SDTypeProfile<1, 2, [ // sext_inreg def SDTExtInreg : SDTypeProfile<1, 2, [ // sext_inreg
@ -138,7 +138,7 @@ def SDTSetCC : SDTypeProfile<1, 3, [ // setcc
SDTCisInt<0>, SDTCisSameAs<1, 2>, SDTCisVT<3, OtherVT> SDTCisInt<0>, SDTCisSameAs<1, 2>, SDTCisVT<3, OtherVT>
]>; ]>;
def SDTSelect : SDTypeProfile<1, 3, [ // select def SDTSelect : SDTypeProfile<1, 3, [ // select
SDTCisInt<1>, SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3> SDTCisInt<1>, SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3>
]>; ]>;
@ -162,11 +162,11 @@ def SDTBrind : SDTypeProfile<0, 1, [ // brind
def SDTNone : SDTypeProfile<0, 0, []>; // ret, trap def SDTNone : SDTypeProfile<0, 0, []>; // ret, trap
def SDTLoad : SDTypeProfile<1, 1, [ // load def SDTLoad : SDTypeProfile<1, 1, [ // load
SDTCisPtrTy<1> SDTCisPtrTy<1>
]>; ]>;
def SDTStore : SDTypeProfile<0, 2, [ // store def SDTStore : SDTypeProfile<0, 2, [ // store
SDTCisPtrTy<1> SDTCisPtrTy<1>
]>; ]>;
def SDTIStore : SDTypeProfile<1, 3, [ // indexed store def SDTIStore : SDTypeProfile<1, 3, [ // indexed store
@ -235,7 +235,7 @@ class SDPatternOperator;
// Selection DAG Node definitions. // Selection DAG Node definitions.
// //
class SDNode<string opcode, SDTypeProfile typeprof, class SDNode<string opcode, SDTypeProfile typeprof,
list<SDNodeProperty> props = [], string sdclass = "SDNode"> list<SDNodeProperty> props = [], string sdclass = "SDNode">
: SDPatternOperator { : SDPatternOperator {
string Opcode = opcode; string Opcode = opcode;
string SDClass = sdclass; string SDClass = sdclass;
@ -319,7 +319,7 @@ def subc : SDNode<"ISD::SUBC" , SDTIntBinOp,
[SDNPOutFlag]>; [SDNPOutFlag]>;
def sube : SDNode<"ISD::SUBE" , SDTIntBinOp, def sube : SDNode<"ISD::SUBE" , SDTIntBinOp,
[SDNPOutFlag, SDNPInFlag]>; [SDNPOutFlag, SDNPInFlag]>;
def sext_inreg : SDNode<"ISD::SIGN_EXTEND_INREG", SDTExtInreg>; def sext_inreg : SDNode<"ISD::SIGN_EXTEND_INREG", SDTExtInreg>;
def bswap : SDNode<"ISD::BSWAP" , SDTIntUnaryOp>; def bswap : SDNode<"ISD::BSWAP" , SDTIntUnaryOp>;
def ctlz : SDNode<"ISD::CTLZ" , SDTIntUnaryOp>; def ctlz : SDNode<"ISD::CTLZ" , SDTIntUnaryOp>;
@ -329,11 +329,11 @@ def sext : SDNode<"ISD::SIGN_EXTEND", SDTIntExtendOp>;
def zext : SDNode<"ISD::ZERO_EXTEND", SDTIntExtendOp>; def zext : SDNode<"ISD::ZERO_EXTEND", SDTIntExtendOp>;
def anyext : SDNode<"ISD::ANY_EXTEND" , SDTIntExtendOp>; def anyext : SDNode<"ISD::ANY_EXTEND" , SDTIntExtendOp>;
def trunc : SDNode<"ISD::TRUNCATE" , SDTIntTruncOp>; def trunc : SDNode<"ISD::TRUNCATE" , SDTIntTruncOp>;
def bitconvert : SDNode<"ISD::BIT_CONVERT", SDTUnaryOp>; def bitconvert : SDNode<"ISD::BITCAST" , SDTUnaryOp>;
def extractelt : SDNode<"ISD::EXTRACT_VECTOR_ELT", SDTVecExtract>; def extractelt : SDNode<"ISD::EXTRACT_VECTOR_ELT", SDTVecExtract>;
def insertelt : SDNode<"ISD::INSERT_VECTOR_ELT", SDTVecInsert>; def insertelt : SDNode<"ISD::INSERT_VECTOR_ELT", SDTVecInsert>;
def fadd : SDNode<"ISD::FADD" , SDTFPBinOp, [SDNPCommutative]>; def fadd : SDNode<"ISD::FADD" , SDTFPBinOp, [SDNPCommutative]>;
def fsub : SDNode<"ISD::FSUB" , SDTFPBinOp>; def fsub : SDNode<"ISD::FSUB" , SDTFPBinOp>;
def fmul : SDNode<"ISD::FMUL" , SDTFPBinOp, [SDNPCommutative]>; def fmul : SDNode<"ISD::FMUL" , SDTFPBinOp, [SDNPCommutative]>;
@ -423,16 +423,16 @@ def vector_extract : SDNode<"ISD::EXTRACT_VECTOR_ELT",
SDTypeProfile<1, 2, [SDTCisPtrTy<2>]>, []>; SDTypeProfile<1, 2, [SDTCisPtrTy<2>]>, []>;
def vector_insert : SDNode<"ISD::INSERT_VECTOR_ELT", def vector_insert : SDNode<"ISD::INSERT_VECTOR_ELT",
SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisPtrTy<3>]>, []>; SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisPtrTy<3>]>, []>;
// Nodes for intrinsics, you should use the intrinsic itself and let tblgen use // Nodes for intrinsics, you should use the intrinsic itself and let tblgen use
// these internally. Don't reference these directly. // these internally. Don't reference these directly.
def intrinsic_void : SDNode<"ISD::INTRINSIC_VOID", def intrinsic_void : SDNode<"ISD::INTRINSIC_VOID",
SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>, SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
[SDNPHasChain]>; [SDNPHasChain]>;
def intrinsic_w_chain : SDNode<"ISD::INTRINSIC_W_CHAIN", def intrinsic_w_chain : SDNode<"ISD::INTRINSIC_W_CHAIN",
SDTypeProfile<1, -1, [SDTCisPtrTy<1>]>, SDTypeProfile<1, -1, [SDTCisPtrTy<1>]>,
[SDNPHasChain]>; [SDNPHasChain]>;
def intrinsic_wo_chain : SDNode<"ISD::INTRINSIC_WO_CHAIN", def intrinsic_wo_chain : SDNode<"ISD::INTRINSIC_WO_CHAIN",
SDTypeProfile<1, -1, [SDTCisPtrTy<1>]>, []>; SDTypeProfile<1, -1, [SDTCisPtrTy<1>]>, []>;
// Do not use cvt directly. Use cvt forms below // Do not use cvt directly. Use cvt forms below

View File

@ -185,7 +185,7 @@ namespace {
SDValue visitANY_EXTEND(SDNode *N); SDValue visitANY_EXTEND(SDNode *N);
SDValue visitSIGN_EXTEND_INREG(SDNode *N); SDValue visitSIGN_EXTEND_INREG(SDNode *N);
SDValue visitTRUNCATE(SDNode *N); SDValue visitTRUNCATE(SDNode *N);
SDValue visitBIT_CONVERT(SDNode *N); SDValue visitBITCAST(SDNode *N);
SDValue visitBUILD_PAIR(SDNode *N); SDValue visitBUILD_PAIR(SDNode *N);
SDValue visitFADD(SDNode *N); SDValue visitFADD(SDNode *N);
SDValue visitFSUB(SDNode *N); SDValue visitFSUB(SDNode *N);
@ -229,7 +229,7 @@ namespace {
SDValue SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp, SDValue SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp,
unsigned HiOp); unsigned HiOp);
SDValue CombineConsecutiveLoads(SDNode *N, EVT VT); SDValue CombineConsecutiveLoads(SDNode *N, EVT VT);
SDValue ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *, EVT); SDValue ConstantFoldBITCASTofBUILD_VECTOR(SDNode *, EVT);
SDValue BuildSDIV(SDNode *N); SDValue BuildSDIV(SDNode *N);
SDValue BuildUDIV(SDNode *N); SDValue BuildUDIV(SDNode *N);
SDNode *MatchRotate(SDValue LHS, SDValue RHS, DebugLoc DL); SDNode *MatchRotate(SDValue LHS, SDValue RHS, DebugLoc DL);
@ -273,15 +273,15 @@ namespace {
/// Run - runs the dag combiner on all nodes in the work list /// Run - runs the dag combiner on all nodes in the work list
void Run(CombineLevel AtLevel); void Run(CombineLevel AtLevel);
SelectionDAG &getDAG() const { return DAG; } SelectionDAG &getDAG() const { return DAG; }
/// getShiftAmountTy - Returns a type large enough to hold any valid /// getShiftAmountTy - Returns a type large enough to hold any valid
/// shift amount - before type legalization these can be huge. /// shift amount - before type legalization these can be huge.
EVT getShiftAmountTy() { EVT getShiftAmountTy() {
return LegalTypes ? TLI.getShiftAmountTy() : TLI.getPointerTy(); return LegalTypes ? TLI.getShiftAmountTy() : TLI.getPointerTy();
} }
/// isTypeLegal - This method returns true if we are running before type /// isTypeLegal - This method returns true if we are running before type
/// legalization or if the specified VT is legal. /// legalization or if the specified VT is legal.
bool isTypeLegal(const EVT &VT) { bool isTypeLegal(const EVT &VT) {
@ -634,7 +634,7 @@ bool DAGCombiner::SimplifyDemandedBits(SDValue Op, const APInt &Demanded) {
// Replace the old value with the new one. // Replace the old value with the new one.
++NodesCombined; ++NodesCombined;
DEBUG(dbgs() << "\nReplacing.2 "; DEBUG(dbgs() << "\nReplacing.2 ";
TLO.Old.getNode()->dump(&DAG); TLO.Old.getNode()->dump(&DAG);
dbgs() << "\nWith: "; dbgs() << "\nWith: ";
TLO.New.getNode()->dump(&DAG); TLO.New.getNode()->dump(&DAG);
@ -694,7 +694,7 @@ SDValue DAGCombiner::PromoteOperand(SDValue Op, EVT PVT, bool &Replace) {
unsigned ExtOpc = unsigned ExtOpc =
Op.getValueType().isByteSized() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; Op.getValueType().isByteSized() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
return DAG.getNode(ExtOpc, dl, PVT, Op); return DAG.getNode(ExtOpc, dl, PVT, Op);
} }
} }
if (!TLI.isOperationLegal(ISD::ANY_EXTEND, PVT)) if (!TLI.isOperationLegal(ISD::ANY_EXTEND, PVT))
@ -978,7 +978,7 @@ void DAGCombiner::Run(CombineLevel AtLevel) {
RV.getNode()->getOpcode() != ISD::DELETED_NODE && RV.getNode()->getOpcode() != ISD::DELETED_NODE &&
"Node was deleted but visit returned new node!"); "Node was deleted but visit returned new node!");
DEBUG(dbgs() << "\nReplacing.3 "; DEBUG(dbgs() << "\nReplacing.3 ";
N->dump(&DAG); N->dump(&DAG);
dbgs() << "\nWith: "; dbgs() << "\nWith: ";
RV.getNode()->dump(&DAG); RV.getNode()->dump(&DAG);
@ -1057,7 +1057,7 @@ SDValue DAGCombiner::visit(SDNode *N) {
case ISD::ANY_EXTEND: return visitANY_EXTEND(N); case ISD::ANY_EXTEND: return visitANY_EXTEND(N);
case ISD::SIGN_EXTEND_INREG: return visitSIGN_EXTEND_INREG(N); case ISD::SIGN_EXTEND_INREG: return visitSIGN_EXTEND_INREG(N);
case ISD::TRUNCATE: return visitTRUNCATE(N); case ISD::TRUNCATE: return visitTRUNCATE(N);
case ISD::BIT_CONVERT: return visitBIT_CONVERT(N); case ISD::BITCAST: return visitBITCAST(N);
case ISD::BUILD_PAIR: return visitBUILD_PAIR(N); case ISD::BUILD_PAIR: return visitBUILD_PAIR(N);
case ISD::FADD: return visitFADD(N); case ISD::FADD: return visitFADD(N);
case ISD::FSUB: return visitFSUB(N); case ISD::FSUB: return visitFSUB(N);
@ -1228,7 +1228,7 @@ SDValue DAGCombiner::visitTokenFactor(SDNode *N) {
} }
} }
} }
SDValue Result; SDValue Result;
// If we've change things around then replace token factor. // If we've change things around then replace token factor.
@ -1429,10 +1429,10 @@ SDValue DAGCombiner::visitADD(SDNode *N) {
if (N1.getOpcode() == ISD::AND) { if (N1.getOpcode() == ISD::AND) {
SDValue AndOp0 = N1.getOperand(0); SDValue AndOp0 = N1.getOperand(0);
ConstantSDNode *AndOp1 = dyn_cast<ConstantSDNode>(N1->getOperand(1)); ConstantSDNode *AndOp1 = dyn_cast<ConstantSDNode>(N1->getOperand(1));
unsigned NumSignBits = DAG.ComputeNumSignBits(AndOp0); unsigned NumSignBits = DAG.ComputeNumSignBits(AndOp0);
unsigned DestBits = VT.getScalarType().getSizeInBits(); unsigned DestBits = VT.getScalarType().getSizeInBits();
// (add z, (and (sbbl x, x), 1)) -> (sub z, (sbbl x, x)) // (add z, (and (sbbl x, x), 1)) -> (sub z, (sbbl x, x))
// and similar xforms where the inner op is either ~0 or 0. // and similar xforms where the inner op is either ~0 or 0.
if (NumSignBits == DestBits && AndOp1 && AndOp1->isOne()) { if (NumSignBits == DestBits && AndOp1 && AndOp1->isOne()) {
@ -2269,8 +2269,8 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
if (ExtVT == LoadedVT && if (ExtVT == LoadedVT &&
(!LegalOperations || TLI.isLoadExtLegal(ISD::ZEXTLOAD, ExtVT))) { (!LegalOperations || TLI.isLoadExtLegal(ISD::ZEXTLOAD, ExtVT))) {
EVT LoadResultTy = HasAnyExt ? LN0->getValueType(0) : VT; EVT LoadResultTy = HasAnyExt ? LN0->getValueType(0) : VT;
SDValue NewLoad = SDValue NewLoad =
DAG.getExtLoad(ISD::ZEXTLOAD, LoadResultTy, LN0->getDebugLoc(), DAG.getExtLoad(ISD::ZEXTLOAD, LoadResultTy, LN0->getDebugLoc(),
LN0->getChain(), LN0->getBasePtr(), LN0->getChain(), LN0->getBasePtr(),
LN0->getPointerInfo(), LN0->getPointerInfo(),
@ -2280,7 +2280,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
CombineTo(LN0, NewLoad, NewLoad.getValue(1)); CombineTo(LN0, NewLoad, NewLoad.getValue(1));
return SDValue(N, 0); // Return N so it doesn't get rechecked! return SDValue(N, 0); // Return N so it doesn't get rechecked!
} }
// Do not change the width of a volatile load. // Do not change the width of a volatile load.
// Do not generate loads of non-round integer types since these can // Do not generate loads of non-round integer types since these can
// be expensive (and would be wrong if the type is not byte sized). // be expensive (and would be wrong if the type is not byte sized).
@ -2304,7 +2304,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
} }
AddToWorkList(NewPtr.getNode()); AddToWorkList(NewPtr.getNode());
EVT LoadResultTy = HasAnyExt ? LN0->getValueType(0) : VT; EVT LoadResultTy = HasAnyExt ? LN0->getValueType(0) : VT;
SDValue Load = SDValue Load =
DAG.getExtLoad(ISD::ZEXTLOAD, LoadResultTy, LN0->getDebugLoc(), DAG.getExtLoad(ISD::ZEXTLOAD, LoadResultTy, LN0->getDebugLoc(),
@ -3086,7 +3086,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0.getOperand(0), return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0.getOperand(0),
DAG.getConstant(c1 + c2, N1.getValueType())); DAG.getConstant(c1 + c2, N1.getValueType()));
} }
// fold (srl (shl x, c), c) -> (and x, cst2) // fold (srl (shl x, c), c) -> (and x, cst2)
if (N1C && N0.getOpcode() == ISD::SHL && N0.getOperand(1) == N1 && if (N1C && N0.getOpcode() == ISD::SHL && N0.getOperand(1) == N1 &&
N0.getValueSizeInBits() <= 64) { N0.getValueSizeInBits() <= 64) {
@ -3094,7 +3094,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0.getOperand(0), return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0.getOperand(0),
DAG.getConstant(~0ULL >> ShAmt, VT)); DAG.getConstant(~0ULL >> ShAmt, VT));
} }
// fold (srl (anyextend x), c) -> (anyextend (srl x, c)) // fold (srl (anyextend x), c) -> (anyextend (srl x, c))
if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) { if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
@ -3198,7 +3198,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
// brcond i32 %c ... // brcond i32 %c ...
// //
// into // into
// //
// %a = ... // %a = ...
// %b = and %a, 2 // %b = and %a, 2
// %c = setcc eq %b, 0 // %c = setcc eq %b, 0
@ -3626,7 +3626,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
N0.getOperand(0), N0.getOperand(1), N0.getOperand(0), N0.getOperand(1),
cast<CondCodeSDNode>(N0.getOperand(2))->get()), cast<CondCodeSDNode>(N0.getOperand(2))->get()),
NegOne, DAG.getConstant(0, VT)); NegOne, DAG.getConstant(0, VT));
} }
// fold (sext x) -> (zext x) if the sign bit is known zero. // fold (sext x) -> (zext x) if the sign bit is known zero.
if ((!LegalOperations || TLI.isOperationLegal(ISD::ZERO_EXTEND, VT)) && if ((!LegalOperations || TLI.isOperationLegal(ISD::ZERO_EXTEND, VT)) &&
@ -4104,7 +4104,7 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
if ((N0.getValueType().getSizeInBits() & (EVTBits-1)) != 0) if ((N0.getValueType().getSizeInBits() & (EVTBits-1)) != 0)
return SDValue(); return SDValue();
} }
// If the shift amount is larger than the input type then we're not // If the shift amount is larger than the input type then we're not
// accessing any of the loaded bytes. If the load was a zextload/extload // accessing any of the loaded bytes. If the load was a zextload/extload
// then the result of the shift+trunc is zero/undef (handled elsewhere). // then the result of the shift+trunc is zero/undef (handled elsewhere).
@ -4112,7 +4112,7 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
// of the extended byte. This is not worth optimizing for. // of the extended byte. This is not worth optimizing for.
if (ShAmt >= VT.getSizeInBits()) if (ShAmt >= VT.getSizeInBits())
return SDValue(); return SDValue();
} }
} }
@ -4379,7 +4379,7 @@ SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, EVT VT) {
return SDValue(); return SDValue();
} }
SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) { SDValue DAGCombiner::visitBITCAST(SDNode *N) {
SDValue N0 = N->getOperand(0); SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0); EVT VT = N->getValueType(0);
@ -4403,12 +4403,12 @@ SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) {
assert(!DestEltVT.isVector() && assert(!DestEltVT.isVector() &&
"Element type of vector ValueType must not be vector!"); "Element type of vector ValueType must not be vector!");
if (isSimple) if (isSimple)
return ConstantFoldBIT_CONVERTofBUILD_VECTOR(N0.getNode(), DestEltVT); return ConstantFoldBITCASTofBUILD_VECTOR(N0.getNode(), DestEltVT);
} }
// If the input is a constant, let getNode fold it. // If the input is a constant, let getNode fold it.
if (isa<ConstantSDNode>(N0) || isa<ConstantFPSDNode>(N0)) { if (isa<ConstantSDNode>(N0) || isa<ConstantFPSDNode>(N0)) {
SDValue Res = DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), VT, N0); SDValue Res = DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, N0);
if (Res.getNode() != N) { if (Res.getNode() != N) {
if (!LegalOperations || if (!LegalOperations ||
TLI.isOperationLegal(Res.getNode()->getOpcode(), VT)) TLI.isOperationLegal(Res.getNode()->getOpcode(), VT))
@ -4424,8 +4424,8 @@ SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) {
} }
// (conv (conv x, t1), t2) -> (conv x, t2) // (conv (conv x, t1), t2) -> (conv x, t2)
if (N0.getOpcode() == ISD::BIT_CONVERT) if (N0.getOpcode() == ISD::BITCAST)
return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), VT, return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT,
N0.getOperand(0)); N0.getOperand(0));
// fold (conv (load x)) -> (load (conv*)x) // fold (conv (load x)) -> (load (conv*)x)
@ -4446,7 +4446,7 @@ SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) {
OrigAlign); OrigAlign);
AddToWorkList(N); AddToWorkList(N);
CombineTo(N0.getNode(), CombineTo(N0.getNode(),
DAG.getNode(ISD::BIT_CONVERT, N0.getDebugLoc(), DAG.getNode(ISD::BITCAST, N0.getDebugLoc(),
N0.getValueType(), Load), N0.getValueType(), Load),
Load.getValue(1)); Load.getValue(1));
return Load; return Load;
@ -4458,7 +4458,7 @@ SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) {
// This often reduces constant pool loads. // This often reduces constant pool loads.
if ((N0.getOpcode() == ISD::FNEG || N0.getOpcode() == ISD::FABS) && if ((N0.getOpcode() == ISD::FNEG || N0.getOpcode() == ISD::FABS) &&
N0.getNode()->hasOneUse() && VT.isInteger() && !VT.isVector()) { N0.getNode()->hasOneUse() && VT.isInteger() && !VT.isVector()) {
SDValue NewConv = DAG.getNode(ISD::BIT_CONVERT, N0.getDebugLoc(), VT, SDValue NewConv = DAG.getNode(ISD::BITCAST, N0.getDebugLoc(), VT,
N0.getOperand(0)); N0.getOperand(0));
AddToWorkList(NewConv.getNode()); AddToWorkList(NewConv.getNode());
@ -4481,7 +4481,7 @@ SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) {
unsigned OrigXWidth = N0.getOperand(1).getValueType().getSizeInBits(); unsigned OrigXWidth = N0.getOperand(1).getValueType().getSizeInBits();
EVT IntXVT = EVT::getIntegerVT(*DAG.getContext(), OrigXWidth); EVT IntXVT = EVT::getIntegerVT(*DAG.getContext(), OrigXWidth);
if (isTypeLegal(IntXVT)) { if (isTypeLegal(IntXVT)) {
SDValue X = DAG.getNode(ISD::BIT_CONVERT, N0.getDebugLoc(), SDValue X = DAG.getNode(ISD::BITCAST, N0.getDebugLoc(),
IntXVT, N0.getOperand(1)); IntXVT, N0.getOperand(1));
AddToWorkList(X.getNode()); AddToWorkList(X.getNode());
@ -4506,7 +4506,7 @@ SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) {
X, DAG.getConstant(SignBit, VT)); X, DAG.getConstant(SignBit, VT));
AddToWorkList(X.getNode()); AddToWorkList(X.getNode());
SDValue Cst = DAG.getNode(ISD::BIT_CONVERT, N0.getDebugLoc(), SDValue Cst = DAG.getNode(ISD::BITCAST, N0.getDebugLoc(),
VT, N0.getOperand(0)); VT, N0.getOperand(0));
Cst = DAG.getNode(ISD::AND, Cst.getDebugLoc(), VT, Cst = DAG.getNode(ISD::AND, Cst.getDebugLoc(), VT,
Cst, DAG.getConstant(~SignBit, VT)); Cst, DAG.getConstant(~SignBit, VT));
@ -4531,11 +4531,11 @@ SDValue DAGCombiner::visitBUILD_PAIR(SDNode *N) {
return CombineConsecutiveLoads(N, VT); return CombineConsecutiveLoads(N, VT);
} }
/// ConstantFoldBIT_CONVERTofBUILD_VECTOR - We know that BV is a build_vector /// ConstantFoldBITCASTofBUILD_VECTOR - We know that BV is a build_vector
/// node with Constant, ConstantFP or Undef operands. DstEltVT indicates the /// node with Constant, ConstantFP or Undef operands. DstEltVT indicates the
/// destination element value type. /// destination element value type.
SDValue DAGCombiner:: SDValue DAGCombiner::
ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) { ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
EVT SrcEltVT = BV->getValueType(0).getVectorElementType(); EVT SrcEltVT = BV->getValueType(0).getVectorElementType();
// If this is already the right type, we're done. // If this is already the right type, we're done.
@ -4553,10 +4553,10 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
// Due to the FP element handling below calling this routine recursively, // Due to the FP element handling below calling this routine recursively,
// we can end up with a scalar-to-vector node here. // we can end up with a scalar-to-vector node here.
if (BV->getOpcode() == ISD::SCALAR_TO_VECTOR) if (BV->getOpcode() == ISD::SCALAR_TO_VECTOR)
return DAG.getNode(ISD::SCALAR_TO_VECTOR, BV->getDebugLoc(), VT, return DAG.getNode(ISD::SCALAR_TO_VECTOR, BV->getDebugLoc(), VT,
DAG.getNode(ISD::BIT_CONVERT, BV->getDebugLoc(), DAG.getNode(ISD::BITCAST, BV->getDebugLoc(),
DstEltVT, BV->getOperand(0))); DstEltVT, BV->getOperand(0)));
SmallVector<SDValue, 8> Ops; SmallVector<SDValue, 8> Ops;
for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) { for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
SDValue Op = BV->getOperand(i); SDValue Op = BV->getOperand(i);
@ -4564,7 +4564,7 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
// are promoted and implicitly truncated. Make that explicit here. // are promoted and implicitly truncated. Make that explicit here.
if (Op.getValueType() != SrcEltVT) if (Op.getValueType() != SrcEltVT)
Op = DAG.getNode(ISD::TRUNCATE, BV->getDebugLoc(), SrcEltVT, Op); Op = DAG.getNode(ISD::TRUNCATE, BV->getDebugLoc(), SrcEltVT, Op);
Ops.push_back(DAG.getNode(ISD::BIT_CONVERT, BV->getDebugLoc(), Ops.push_back(DAG.getNode(ISD::BITCAST, BV->getDebugLoc(),
DstEltVT, Op)); DstEltVT, Op));
AddToWorkList(Ops.back().getNode()); AddToWorkList(Ops.back().getNode());
} }
@ -4580,7 +4580,7 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
// same sizes. // same sizes.
assert((SrcEltVT == MVT::f32 || SrcEltVT == MVT::f64) && "Unknown FP VT!"); assert((SrcEltVT == MVT::f32 || SrcEltVT == MVT::f64) && "Unknown FP VT!");
EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), SrcEltVT.getSizeInBits()); EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), SrcEltVT.getSizeInBits());
BV = ConstantFoldBIT_CONVERTofBUILD_VECTOR(BV, IntVT).getNode(); BV = ConstantFoldBITCASTofBUILD_VECTOR(BV, IntVT).getNode();
SrcEltVT = IntVT; SrcEltVT = IntVT;
} }
@ -4589,10 +4589,10 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
if (DstEltVT.isFloatingPoint()) { if (DstEltVT.isFloatingPoint()) {
assert((DstEltVT == MVT::f32 || DstEltVT == MVT::f64) && "Unknown FP VT!"); assert((DstEltVT == MVT::f32 || DstEltVT == MVT::f64) && "Unknown FP VT!");
EVT TmpVT = EVT::getIntegerVT(*DAG.getContext(), DstEltVT.getSizeInBits()); EVT TmpVT = EVT::getIntegerVT(*DAG.getContext(), DstEltVT.getSizeInBits());
SDNode *Tmp = ConstantFoldBIT_CONVERTofBUILD_VECTOR(BV, TmpVT).getNode(); SDNode *Tmp = ConstantFoldBITCASTofBUILD_VECTOR(BV, TmpVT).getNode();
// Next, convert to FP elements of the same size. // Next, convert to FP elements of the same size.
return ConstantFoldBIT_CONVERTofBUILD_VECTOR(Tmp, DstEltVT); return ConstantFoldBITCASTofBUILD_VECTOR(Tmp, DstEltVT);
} }
// Okay, we know the src/dst types are both integers of differing types. // Okay, we know the src/dst types are both integers of differing types.
@ -5068,7 +5068,7 @@ SDValue DAGCombiner::visitFNEG(SDNode *N) {
// Transform fneg(bitconvert(x)) -> bitconvert(x^sign) to avoid loading // Transform fneg(bitconvert(x)) -> bitconvert(x^sign) to avoid loading
// constant pool values. // constant pool values.
if (N0.getOpcode() == ISD::BIT_CONVERT && if (N0.getOpcode() == ISD::BITCAST &&
!VT.isVector() && !VT.isVector() &&
N0.getNode()->hasOneUse() && N0.getNode()->hasOneUse() &&
N0.getOperand(0).getValueType().isInteger()) { N0.getOperand(0).getValueType().isInteger()) {
@ -5078,7 +5078,7 @@ SDValue DAGCombiner::visitFNEG(SDNode *N) {
Int = DAG.getNode(ISD::XOR, N0.getDebugLoc(), IntVT, Int, Int = DAG.getNode(ISD::XOR, N0.getDebugLoc(), IntVT, Int,
DAG.getConstant(APInt::getSignBit(IntVT.getSizeInBits()), IntVT)); DAG.getConstant(APInt::getSignBit(IntVT.getSizeInBits()), IntVT));
AddToWorkList(Int.getNode()); AddToWorkList(Int.getNode());
return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), return DAG.getNode(ISD::BITCAST, N->getDebugLoc(),
VT, Int); VT, Int);
} }
} }
@ -5104,7 +5104,7 @@ SDValue DAGCombiner::visitFABS(SDNode *N) {
// Transform fabs(bitconvert(x)) -> bitconvert(x&~sign) to avoid loading // Transform fabs(bitconvert(x)) -> bitconvert(x&~sign) to avoid loading
// constant pool values. // constant pool values.
if (N0.getOpcode() == ISD::BIT_CONVERT && N0.getNode()->hasOneUse() && if (N0.getOpcode() == ISD::BITCAST && N0.getNode()->hasOneUse() &&
N0.getOperand(0).getValueType().isInteger() && N0.getOperand(0).getValueType().isInteger() &&
!N0.getOperand(0).getValueType().isVector()) { !N0.getOperand(0).getValueType().isVector()) {
SDValue Int = N0.getOperand(0); SDValue Int = N0.getOperand(0);
@ -5113,7 +5113,7 @@ SDValue DAGCombiner::visitFABS(SDNode *N) {
Int = DAG.getNode(ISD::AND, N0.getDebugLoc(), IntVT, Int, Int = DAG.getNode(ISD::AND, N0.getDebugLoc(), IntVT, Int,
DAG.getConstant(~APInt::getSignBit(IntVT.getSizeInBits()), IntVT)); DAG.getConstant(~APInt::getSignBit(IntVT.getSizeInBits()), IntVT));
AddToWorkList(Int.getNode()); AddToWorkList(Int.getNode());
return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), return DAG.getNode(ISD::BITCAST, N->getDebugLoc(),
N->getValueType(0), Int); N->getValueType(0), Int);
} }
} }
@ -5160,7 +5160,7 @@ SDValue DAGCombiner::visitBRCOND(SDNode *N) {
// brcond i32 %c ... // brcond i32 %c ...
// //
// into // into
// //
// %a = ... // %a = ...
// %b = and i32 %a, 2 // %b = and i32 %a, 2
// %c = setcc eq %b, 0 // %c = setcc eq %b, 0
@ -5211,7 +5211,7 @@ SDValue DAGCombiner::visitBRCOND(SDNode *N) {
// Restore N1 if the above transformation doesn't match. // Restore N1 if the above transformation doesn't match.
N1 = N->getOperand(1); N1 = N->getOperand(1);
} }
// Transform br(xor(x, y)) -> br(x != y) // Transform br(xor(x, y)) -> br(x != y)
// Transform br(xor(xor(x,y), 1)) -> br (x == y) // Transform br(xor(xor(x,y), 1)) -> br (x == y)
if (N1.hasOneUse() && N1.getOpcode() == ISD::XOR) { if (N1.hasOneUse() && N1.getOpcode() == ISD::XOR) {
@ -5665,10 +5665,10 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
// Create token factor to keep old chain connected. // Create token factor to keep old chain connected.
SDValue Token = DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), SDValue Token = DAG.getNode(ISD::TokenFactor, N->getDebugLoc(),
MVT::Other, Chain, ReplLoad.getValue(1)); MVT::Other, Chain, ReplLoad.getValue(1));
// Make sure the new and old chains are cleaned up. // Make sure the new and old chains are cleaned up.
AddToWorkList(Token.getNode()); AddToWorkList(Token.getNode());
// Replace uses with load result and token factor. Don't add users // Replace uses with load result and token factor. Don't add users
// to work list. // to work list.
return CombineTo(N, ReplLoad.getValue(0), Token, false); return CombineTo(N, ReplLoad.getValue(0), Token, false);
@ -5688,17 +5688,17 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
static std::pair<unsigned, unsigned> static std::pair<unsigned, unsigned>
CheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain) { CheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain) {
std::pair<unsigned, unsigned> Result(0, 0); std::pair<unsigned, unsigned> Result(0, 0);
// Check for the structure we're looking for. // Check for the structure we're looking for.
if (V->getOpcode() != ISD::AND || if (V->getOpcode() != ISD::AND ||
!isa<ConstantSDNode>(V->getOperand(1)) || !isa<ConstantSDNode>(V->getOperand(1)) ||
!ISD::isNormalLoad(V->getOperand(0).getNode())) !ISD::isNormalLoad(V->getOperand(0).getNode()))
return Result; return Result;
// Check the chain and pointer. // Check the chain and pointer.
LoadSDNode *LD = cast<LoadSDNode>(V->getOperand(0)); LoadSDNode *LD = cast<LoadSDNode>(V->getOperand(0));
if (LD->getBasePtr() != Ptr) return Result; // Not from same pointer. if (LD->getBasePtr() != Ptr) return Result; // Not from same pointer.
// The store should be chained directly to the load or be an operand of a // The store should be chained directly to the load or be an operand of a
// tokenfactor. // tokenfactor.
if (LD == Chain.getNode()) if (LD == Chain.getNode())
@ -5714,7 +5714,7 @@ CheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain) {
} }
if (!isOk) return Result; if (!isOk) return Result;
} }
// This only handles simple types. // This only handles simple types.
if (V.getValueType() != MVT::i16 && if (V.getValueType() != MVT::i16 &&
V.getValueType() != MVT::i32 && V.getValueType() != MVT::i32 &&
@ -5730,7 +5730,7 @@ CheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain) {
unsigned NotMaskTZ = CountTrailingZeros_64(NotMask); unsigned NotMaskTZ = CountTrailingZeros_64(NotMask);
if (NotMaskTZ & 7) return Result; // Must be multiple of a byte. if (NotMaskTZ & 7) return Result; // Must be multiple of a byte.
if (NotMaskLZ == 64) return Result; // All zero mask. if (NotMaskLZ == 64) return Result; // All zero mask.
// See if we have a continuous run of bits. If so, we have 0*1+0* // See if we have a continuous run of bits. If so, we have 0*1+0*
if (CountTrailingOnes_64(NotMask >> NotMaskTZ)+NotMaskTZ+NotMaskLZ != 64) if (CountTrailingOnes_64(NotMask >> NotMaskTZ)+NotMaskTZ+NotMaskLZ != 64)
return Result; return Result;
@ -5738,19 +5738,19 @@ CheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain) {
// Adjust NotMaskLZ down to be from the actual size of the int instead of i64. // Adjust NotMaskLZ down to be from the actual size of the int instead of i64.
if (V.getValueType() != MVT::i64 && NotMaskLZ) if (V.getValueType() != MVT::i64 && NotMaskLZ)
NotMaskLZ -= 64-V.getValueSizeInBits(); NotMaskLZ -= 64-V.getValueSizeInBits();
unsigned MaskedBytes = (V.getValueSizeInBits()-NotMaskLZ-NotMaskTZ)/8; unsigned MaskedBytes = (V.getValueSizeInBits()-NotMaskLZ-NotMaskTZ)/8;
switch (MaskedBytes) { switch (MaskedBytes) {
case 1: case 1:
case 2: case 2:
case 4: break; case 4: break;
default: return Result; // All one mask, or 5-byte mask. default: return Result; // All one mask, or 5-byte mask.
} }
// Verify that the first bit starts at a multiple of mask so that the access // Verify that the first bit starts at a multiple of mask so that the access
// is aligned the same as the access width. // is aligned the same as the access width.
if (NotMaskTZ && NotMaskTZ/8 % MaskedBytes) return Result; if (NotMaskTZ && NotMaskTZ/8 % MaskedBytes) return Result;
Result.first = MaskedBytes; Result.first = MaskedBytes;
Result.second = NotMaskTZ/8; Result.second = NotMaskTZ/8;
return Result; return Result;
@ -5767,20 +5767,20 @@ ShrinkLoadReplaceStoreWithStore(const std::pair<unsigned, unsigned> &MaskInfo,
unsigned NumBytes = MaskInfo.first; unsigned NumBytes = MaskInfo.first;
unsigned ByteShift = MaskInfo.second; unsigned ByteShift = MaskInfo.second;
SelectionDAG &DAG = DC->getDAG(); SelectionDAG &DAG = DC->getDAG();
// Check to see if IVal is all zeros in the part being masked in by the 'or' // Check to see if IVal is all zeros in the part being masked in by the 'or'
// that uses this. If not, this is not a replacement. // that uses this. If not, this is not a replacement.
APInt Mask = ~APInt::getBitsSet(IVal.getValueSizeInBits(), APInt Mask = ~APInt::getBitsSet(IVal.getValueSizeInBits(),
ByteShift*8, (ByteShift+NumBytes)*8); ByteShift*8, (ByteShift+NumBytes)*8);
if (!DAG.MaskedValueIsZero(IVal, Mask)) return 0; if (!DAG.MaskedValueIsZero(IVal, Mask)) return 0;
// Check that it is legal on the target to do this. It is legal if the new // Check that it is legal on the target to do this. It is legal if the new
// VT we're shrinking to (i8/i16/i32) is legal or we're still before type // VT we're shrinking to (i8/i16/i32) is legal or we're still before type
// legalization. // legalization.
MVT VT = MVT::getIntegerVT(NumBytes*8); MVT VT = MVT::getIntegerVT(NumBytes*8);
if (!DC->isTypeLegal(VT)) if (!DC->isTypeLegal(VT))
return 0; return 0;
// Okay, we can do this! Replace the 'St' store with a store of IVal that is // Okay, we can do this! Replace the 'St' store with a store of IVal that is
// shifted by ByteShift and truncated down to NumBytes. // shifted by ByteShift and truncated down to NumBytes.
if (ByteShift) if (ByteShift)
@ -5795,19 +5795,19 @@ ShrinkLoadReplaceStoreWithStore(const std::pair<unsigned, unsigned> &MaskInfo,
StOffset = ByteShift; StOffset = ByteShift;
else else
StOffset = IVal.getValueType().getStoreSize() - ByteShift - NumBytes; StOffset = IVal.getValueType().getStoreSize() - ByteShift - NumBytes;
SDValue Ptr = St->getBasePtr(); SDValue Ptr = St->getBasePtr();
if (StOffset) { if (StOffset) {
Ptr = DAG.getNode(ISD::ADD, IVal->getDebugLoc(), Ptr.getValueType(), Ptr = DAG.getNode(ISD::ADD, IVal->getDebugLoc(), Ptr.getValueType(),
Ptr, DAG.getConstant(StOffset, Ptr.getValueType())); Ptr, DAG.getConstant(StOffset, Ptr.getValueType()));
NewAlign = MinAlign(NewAlign, StOffset); NewAlign = MinAlign(NewAlign, StOffset);
} }
// Truncate down to the new size. // Truncate down to the new size.
IVal = DAG.getNode(ISD::TRUNCATE, IVal->getDebugLoc(), VT, IVal); IVal = DAG.getNode(ISD::TRUNCATE, IVal->getDebugLoc(), VT, IVal);
++OpsNarrowed; ++OpsNarrowed;
return DAG.getStore(St->getChain(), St->getDebugLoc(), IVal, Ptr, return DAG.getStore(St->getChain(), St->getDebugLoc(), IVal, Ptr,
St->getPointerInfo().getWithOffset(StOffset), St->getPointerInfo().getWithOffset(StOffset),
false, false, NewAlign).getNode(); false, false, NewAlign).getNode();
} }
@ -5831,7 +5831,7 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
return SDValue(); return SDValue();
unsigned Opc = Value.getOpcode(); unsigned Opc = Value.getOpcode();
// If this is "store (or X, Y), P" and X is "(and (load P), cst)", where cst // If this is "store (or X, Y), P" and X is "(and (load P), cst)", where cst
// is a byte mask indicating a consecutive number of bytes, check to see if // is a byte mask indicating a consecutive number of bytes, check to see if
// Y is known to provide just those bytes. If so, we try to replace the // Y is known to provide just those bytes. If so, we try to replace the
@ -5844,7 +5844,7 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
if (SDNode *NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad, if (SDNode *NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad,
Value.getOperand(1), ST,this)) Value.getOperand(1), ST,this))
return SDValue(NewST, 0); return SDValue(NewST, 0);
// Or is commutative, so try swapping X and Y. // Or is commutative, so try swapping X and Y.
MaskedLoad = CheckForMaskedLoad(Value.getOperand(1), Ptr, Chain); MaskedLoad = CheckForMaskedLoad(Value.getOperand(1), Ptr, Chain);
if (MaskedLoad.first) if (MaskedLoad.first)
@ -5852,7 +5852,7 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
Value.getOperand(0), ST,this)) Value.getOperand(0), ST,this))
return SDValue(NewST, 0); return SDValue(NewST, 0);
} }
if ((Opc != ISD::OR && Opc != ISD::XOR && Opc != ISD::AND) || if ((Opc != ISD::OR && Opc != ISD::XOR && Opc != ISD::AND) ||
Value.getOperand(1).getOpcode() != ISD::Constant) Value.getOperand(1).getOpcode() != ISD::Constant)
return SDValue(); return SDValue();
@ -5944,7 +5944,7 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
// If this is a store of a bit convert, store the input value if the // If this is a store of a bit convert, store the input value if the
// resultant store does not need a higher alignment than the original. // resultant store does not need a higher alignment than the original.
if (Value.getOpcode() == ISD::BIT_CONVERT && !ST->isTruncatingStore() && if (Value.getOpcode() == ISD::BITCAST && !ST->isTruncatingStore() &&
ST->isUnindexed()) { ST->isUnindexed()) {
unsigned OrigAlign = ST->getAlignment(); unsigned OrigAlign = ST->getAlignment();
EVT SVT = Value.getOperand(0).getValueType(); EVT SVT = Value.getOperand(0).getValueType();
@ -6146,9 +6146,9 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
InVec.getValueType(), &Ops[0], Ops.size()); InVec.getValueType(), &Ops[0], Ops.size());
} }
// If the invec is an UNDEF and if EltNo is a constant, create a new // If the invec is an UNDEF and if EltNo is a constant, create a new
// BUILD_VECTOR with undef elements and the inserted element. // BUILD_VECTOR with undef elements and the inserted element.
if (!LegalOperations && InVec.getOpcode() == ISD::UNDEF && if (!LegalOperations && InVec.getOpcode() == ISD::UNDEF &&
isa<ConstantSDNode>(EltNo)) { isa<ConstantSDNode>(EltNo)) {
EVT VT = InVec.getValueType(); EVT VT = InVec.getValueType();
EVT EltVT = VT.getVectorElementType(); EVT EltVT = VT.getVectorElementType();
@ -6198,7 +6198,7 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
EVT ExtVT = VT.getVectorElementType(); EVT ExtVT = VT.getVectorElementType();
EVT LVT = ExtVT; EVT LVT = ExtVT;
if (InVec.getOpcode() == ISD::BIT_CONVERT) { if (InVec.getOpcode() == ISD::BITCAST) {
EVT BCVT = InVec.getOperand(0).getValueType(); EVT BCVT = InVec.getOperand(0).getValueType();
if (!BCVT.isVector() || ExtVT.bitsGT(BCVT.getVectorElementType())) if (!BCVT.isVector() || ExtVT.bitsGT(BCVT.getVectorElementType()))
return SDValue(); return SDValue();
@ -6232,7 +6232,7 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
int Idx = (Elt > (int)NumElems) ? -1 : SVN->getMaskElt(Elt); int Idx = (Elt > (int)NumElems) ? -1 : SVN->getMaskElt(Elt);
InVec = (Idx < (int)NumElems) ? InVec.getOperand(0) : InVec.getOperand(1); InVec = (Idx < (int)NumElems) ? InVec.getOperand(0) : InVec.getOperand(1);
if (InVec.getOpcode() == ISD::BIT_CONVERT) if (InVec.getOpcode() == ISD::BITCAST)
InVec = InVec.getOperand(0); InVec = InVec.getOperand(0);
if (ISD::isNormalLoad(InVec.getNode())) { if (ISD::isNormalLoad(InVec.getNode())) {
LN0 = cast<LoadSDNode>(InVec); LN0 = cast<LoadSDNode>(InVec);
@ -6262,7 +6262,7 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
SDValue NewPtr = LN0->getBasePtr(); SDValue NewPtr = LN0->getBasePtr();
unsigned PtrOff = 0; unsigned PtrOff = 0;
if (Elt) { if (Elt) {
PtrOff = LVT.getSizeInBits() * Elt / 8; PtrOff = LVT.getSizeInBits() * Elt / 8;
EVT PtrType = NewPtr.getValueType(); EVT PtrType = NewPtr.getValueType();
@ -6339,7 +6339,7 @@ SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
unsigned ExtIndex = cast<ConstantSDNode>(ExtVal)->getZExtValue(); unsigned ExtIndex = cast<ConstantSDNode>(ExtVal)->getZExtValue();
if (ExtIndex > VT.getVectorNumElements()) if (ExtIndex > VT.getVectorNumElements())
return SDValue(); return SDValue();
Mask.push_back(ExtIndex); Mask.push_back(ExtIndex);
continue; continue;
} }
@ -6396,7 +6396,7 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
// If this is a bit convert that changes the element type of the vector but // If this is a bit convert that changes the element type of the vector but
// not the number of vector elements, look through it. Be careful not to // not the number of vector elements, look through it. Be careful not to
// look though conversions that change things like v4f32 to v2f64. // look though conversions that change things like v4f32 to v2f64.
if (V->getOpcode() == ISD::BIT_CONVERT) { if (V->getOpcode() == ISD::BITCAST) {
SDValue ConvInput = V->getOperand(0); SDValue ConvInput = V->getOperand(0);
if (ConvInput.getValueType().isVector() && if (ConvInput.getValueType().isVector() &&
ConvInput.getValueType().getVectorNumElements() == NumElts) ConvInput.getValueType().getVectorNumElements() == NumElts)
@ -6494,7 +6494,7 @@ SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) {
SDValue LHS = N->getOperand(0); SDValue LHS = N->getOperand(0);
SDValue RHS = N->getOperand(1); SDValue RHS = N->getOperand(1);
if (N->getOpcode() == ISD::AND) { if (N->getOpcode() == ISD::AND) {
if (RHS.getOpcode() == ISD::BIT_CONVERT) if (RHS.getOpcode() == ISD::BITCAST)
RHS = RHS.getOperand(0); RHS = RHS.getOperand(0);
if (RHS.getOpcode() == ISD::BUILD_VECTOR) { if (RHS.getOpcode() == ISD::BUILD_VECTOR) {
SmallVector<int, 8> Indices; SmallVector<int, 8> Indices;
@ -6522,9 +6522,9 @@ SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) {
DAG.getConstant(0, EltVT)); DAG.getConstant(0, EltVT));
SDValue Zero = DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(), SDValue Zero = DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
RVT, &ZeroOps[0], ZeroOps.size()); RVT, &ZeroOps[0], ZeroOps.size());
LHS = DAG.getNode(ISD::BIT_CONVERT, dl, RVT, LHS); LHS = DAG.getNode(ISD::BITCAST, dl, RVT, LHS);
SDValue Shuf = DAG.getVectorShuffle(RVT, dl, LHS, Zero, &Indices[0]); SDValue Shuf = DAG.getVectorShuffle(RVT, dl, LHS, Zero, &Indices[0]);
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Shuf); return DAG.getNode(ISD::BITCAST, dl, VT, Shuf);
} }
} }
@ -6643,7 +6643,7 @@ bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS,
if (LHS.getOpcode() != RHS.getOpcode() || if (LHS.getOpcode() != RHS.getOpcode() ||
!LHS.hasOneUse() || !RHS.hasOneUse()) !LHS.hasOneUse() || !RHS.hasOneUse())
return false; return false;
// If this is a load and the token chain is identical, replace the select // If this is a load and the token chain is identical, replace the select
// of two loads with a load through a select of the address to load from. // of two loads with a load through a select of the address to load from.
// This triggers in things like "select bool X, 10.0, 123.0" after the FP // This triggers in things like "select bool X, 10.0, 123.0" after the FP
@ -6651,7 +6651,7 @@ bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS,
if (LHS.getOpcode() == ISD::LOAD) { if (LHS.getOpcode() == ISD::LOAD) {
LoadSDNode *LLD = cast<LoadSDNode>(LHS); LoadSDNode *LLD = cast<LoadSDNode>(LHS);
LoadSDNode *RLD = cast<LoadSDNode>(RHS); LoadSDNode *RLD = cast<LoadSDNode>(RHS);
// Token chains must be identical. // Token chains must be identical.
if (LHS.getOperand(0) != RHS.getOperand(0) || if (LHS.getOperand(0) != RHS.getOperand(0) ||
// Do not let this transformation reduce the number of volatile loads. // Do not let this transformation reduce the number of volatile loads.
@ -6671,7 +6671,7 @@ bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS,
LLD->getPointerInfo().getAddrSpace() != 0 || LLD->getPointerInfo().getAddrSpace() != 0 ||
RLD->getPointerInfo().getAddrSpace() != 0) RLD->getPointerInfo().getAddrSpace() != 0)
return false; return false;
// Check that the select condition doesn't reach either load. If so, // Check that the select condition doesn't reach either load. If so,
// folding this will induce a cycle into the DAG. If not, this is safe to // folding this will induce a cycle into the DAG. If not, this is safe to
// xform, so create a select of the addresses. // xform, so create a select of the addresses.
@ -6694,7 +6694,7 @@ bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS,
(LLD->hasAnyUseOfValue(1) && (LLD->hasAnyUseOfValue(1) &&
(LLD->isPredecessorOf(CondLHS) || LLD->isPredecessorOf(CondRHS)))) (LLD->isPredecessorOf(CondLHS) || LLD->isPredecessorOf(CondRHS))))
return false; return false;
Addr = DAG.getNode(ISD::SELECT_CC, TheSelect->getDebugLoc(), Addr = DAG.getNode(ISD::SELECT_CC, TheSelect->getDebugLoc(),
LLD->getBasePtr().getValueType(), LLD->getBasePtr().getValueType(),
TheSelect->getOperand(0), TheSelect->getOperand(0),
@ -6742,7 +6742,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
ISD::CondCode CC, bool NotExtCompare) { ISD::CondCode CC, bool NotExtCompare) {
// (x ? y : y) -> y. // (x ? y : y) -> y.
if (N2 == N3) return N2; if (N2 == N3) return N2;
EVT VT = N2.getValueType(); EVT VT = N2.getValueType();
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode()); ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode()); ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
@ -6778,7 +6778,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
return DAG.getNode(ISD::FABS, DL, VT, N3); return DAG.getNode(ISD::FABS, DL, VT, N3);
} }
} }
// Turn "(a cond b) ? 1.0f : 2.0f" into "load (tmp + ((a cond b) ? 0 : 4)" // Turn "(a cond b) ? 1.0f : 2.0f" into "load (tmp + ((a cond b) ? 0 : 4)"
// where "tmp" is a constant pool entry containing an array with 1.0 and 2.0 // where "tmp" is a constant pool entry containing an array with 1.0 and 2.0
// in it. This is a win when the constant is not otherwise available because // in it. This is a win when the constant is not otherwise available because
@ -6801,7 +6801,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
}; };
const Type *FPTy = Elts[0]->getType(); const Type *FPTy = Elts[0]->getType();
const TargetData &TD = *TLI.getTargetData(); const TargetData &TD = *TLI.getTargetData();
// Create a ConstantArray of the two constants. // Create a ConstantArray of the two constants.
Constant *CA = ConstantArray::get(ArrayType::get(FPTy, 2), Elts, 2); Constant *CA = ConstantArray::get(ArrayType::get(FPTy, 2), Elts, 2);
SDValue CPIdx = DAG.getConstantPool(CA, TLI.getPointerTy(), SDValue CPIdx = DAG.getConstantPool(CA, TLI.getPointerTy(),
@ -6813,7 +6813,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
SDValue Zero = DAG.getIntPtrConstant(0); SDValue Zero = DAG.getIntPtrConstant(0);
unsigned EltSize = (unsigned)TD.getTypeAllocSize(Elts[0]->getType()); unsigned EltSize = (unsigned)TD.getTypeAllocSize(Elts[0]->getType());
SDValue One = DAG.getIntPtrConstant(EltSize); SDValue One = DAG.getIntPtrConstant(EltSize);
SDValue Cond = DAG.getSetCC(DL, SDValue Cond = DAG.getSetCC(DL,
TLI.getSetCCResultType(N0.getValueType()), TLI.getSetCCResultType(N0.getValueType()),
N0, N1, CC); N0, N1, CC);
@ -6826,7 +6826,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
false, Alignment); false, Alignment);
} }
} }
// Check to see if we can perform the "gzip trick", transforming // Check to see if we can perform the "gzip trick", transforming
// (select_cc setlt X, 0, A, 0) -> (and (sra X, (sub size(X), 1), A) // (select_cc setlt X, 0, A, 0) -> (and (sra X, (sub size(X), 1), A)
@ -6879,7 +6879,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
// shift-left and shift-right-arith. // shift-left and shift-right-arith.
if (CC == ISD::SETEQ && N0->getOpcode() == ISD::AND && if (CC == ISD::SETEQ && N0->getOpcode() == ISD::AND &&
N0->getValueType(0) == VT && N0->getValueType(0) == VT &&
N1C && N1C->isNullValue() && N1C && N1C->isNullValue() &&
N2C && N2C->isNullValue()) { N2C && N2C->isNullValue()) {
SDValue AndLHS = N0->getOperand(0); SDValue AndLHS = N0->getOperand(0);
ConstantSDNode *ConstAndRHS = dyn_cast<ConstantSDNode>(N0->getOperand(1)); ConstantSDNode *ConstAndRHS = dyn_cast<ConstantSDNode>(N0->getOperand(1));
@ -6889,13 +6889,13 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
SDValue ShlAmt = SDValue ShlAmt =
DAG.getConstant(AndMask.countLeadingZeros(), getShiftAmountTy()); DAG.getConstant(AndMask.countLeadingZeros(), getShiftAmountTy());
SDValue Shl = DAG.getNode(ISD::SHL, N0.getDebugLoc(), VT, AndLHS, ShlAmt); SDValue Shl = DAG.getNode(ISD::SHL, N0.getDebugLoc(), VT, AndLHS, ShlAmt);
// Now arithmetic right shift it all the way over, so the result is either // Now arithmetic right shift it all the way over, so the result is either
// all-ones, or zero. // all-ones, or zero.
SDValue ShrAmt = SDValue ShrAmt =
DAG.getConstant(AndMask.getBitWidth()-1, getShiftAmountTy()); DAG.getConstant(AndMask.getBitWidth()-1, getShiftAmountTy());
SDValue Shr = DAG.getNode(ISD::SRA, N0.getDebugLoc(), VT, Shl, ShrAmt); SDValue Shr = DAG.getNode(ISD::SRA, N0.getDebugLoc(), VT, Shl, ShrAmt);
return DAG.getNode(ISD::AND, DL, VT, Shr, N3); return DAG.getNode(ISD::AND, DL, VT, Shr, N3);
} }
} }
@ -7066,7 +7066,7 @@ static bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset,
Offset += C->getZExtValue(); Offset += C->getZExtValue();
} }
} }
// Return the underlying GlobalValue, and update the Offset. Return false // Return the underlying GlobalValue, and update the Offset. Return false
// for GlobalAddressSDNode since the same GlobalAddress may be represented // for GlobalAddressSDNode since the same GlobalAddress may be represented
// by multiple nodes with different offsets. // by multiple nodes with different offsets.
@ -7125,7 +7125,7 @@ bool DAGCombiner::isAlias(SDValue Ptr1, int64_t Size1,
return !((Offset1 + Size1) <= Offset2 || (Offset2 + Size2) <= Offset1); return !((Offset1 + Size1) <= Offset2 || (Offset2 + Size2) <= Offset1);
} }
// Otherwise, if we know what the bases are, and they aren't identical, then // Otherwise, if we know what the bases are, and they aren't identical, then
// we know they cannot alias. // we know they cannot alias.
if ((isFrameIndex1 || CV1 || GV1) && (isFrameIndex2 || CV2 || GV2)) if ((isFrameIndex1 || CV1 || GV1) && (isFrameIndex2 || CV2 || GV2))
return false; return false;
@ -7139,13 +7139,13 @@ bool DAGCombiner::isAlias(SDValue Ptr1, int64_t Size1,
(Size1 == Size2) && (SrcValueAlign1 > Size1)) { (Size1 == Size2) && (SrcValueAlign1 > Size1)) {
int64_t OffAlign1 = SrcValueOffset1 % SrcValueAlign1; int64_t OffAlign1 = SrcValueOffset1 % SrcValueAlign1;
int64_t OffAlign2 = SrcValueOffset2 % SrcValueAlign1; int64_t OffAlign2 = SrcValueOffset2 % SrcValueAlign1;
// There is no overlap between these relatively aligned accesses of similar // There is no overlap between these relatively aligned accesses of similar
// size, return no alias. // size, return no alias.
if ((OffAlign1 + Size1) <= OffAlign2 || (OffAlign2 + Size2) <= OffAlign1) if ((OffAlign1 + Size1) <= OffAlign2 || (OffAlign2 + Size2) <= OffAlign1)
return false; return false;
} }
if (CombinerGlobalAA) { if (CombinerGlobalAA) {
// Use alias analysis information. // Use alias analysis information.
int64_t MinOffset = std::min(SrcValueOffset1, SrcValueOffset2); int64_t MinOffset = std::min(SrcValueOffset1, SrcValueOffset2);
@ -7166,7 +7166,7 @@ bool DAGCombiner::isAlias(SDValue Ptr1, int64_t Size1,
/// node. Returns true if the operand was a load. /// node. Returns true if the operand was a load.
bool DAGCombiner::FindAliasInfo(SDNode *N, bool DAGCombiner::FindAliasInfo(SDNode *N,
SDValue &Ptr, int64_t &Size, SDValue &Ptr, int64_t &Size,
const Value *&SrcValue, const Value *&SrcValue,
int &SrcValueOffset, int &SrcValueOffset,
unsigned &SrcValueAlign, unsigned &SrcValueAlign,
const MDNode *&TBAAInfo) const { const MDNode *&TBAAInfo) const {
@ -7206,26 +7206,26 @@ void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain,
int SrcValueOffset; int SrcValueOffset;
unsigned SrcValueAlign; unsigned SrcValueAlign;
const MDNode *SrcTBAAInfo; const MDNode *SrcTBAAInfo;
bool IsLoad = FindAliasInfo(N, Ptr, Size, SrcValue, SrcValueOffset, bool IsLoad = FindAliasInfo(N, Ptr, Size, SrcValue, SrcValueOffset,
SrcValueAlign, SrcTBAAInfo); SrcValueAlign, SrcTBAAInfo);
// Starting off. // Starting off.
Chains.push_back(OriginalChain); Chains.push_back(OriginalChain);
unsigned Depth = 0; unsigned Depth = 0;
// Look at each chain and determine if it is an alias. If so, add it to the // Look at each chain and determine if it is an alias. If so, add it to the
// aliases list. If not, then continue up the chain looking for the next // aliases list. If not, then continue up the chain looking for the next
// candidate. // candidate.
while (!Chains.empty()) { while (!Chains.empty()) {
SDValue Chain = Chains.back(); SDValue Chain = Chains.back();
Chains.pop_back(); Chains.pop_back();
// For TokenFactor nodes, look at each operand and only continue up the // For TokenFactor nodes, look at each operand and only continue up the
// chain until we find two aliases. If we've seen two aliases, assume we'll // chain until we find two aliases. If we've seen two aliases, assume we'll
// find more and revert to original chain since the xform is unlikely to be // find more and revert to original chain since the xform is unlikely to be
// profitable. // profitable.
// //
// FIXME: The depth check could be made to return the last non-aliasing // FIXME: The depth check could be made to return the last non-aliasing
// chain we found before we hit a tokenfactor rather than the original // chain we found before we hit a tokenfactor rather than the original
// chain. // chain.
if (Depth > 6 || Aliases.size() == 2) { if (Depth > 6 || Aliases.size() == 2) {
@ -7309,9 +7309,9 @@ SDValue DAGCombiner::FindBetterChain(SDNode *N, SDValue OldChain) {
// If a single operand then chain to it. We don't need to revisit it. // If a single operand then chain to it. We don't need to revisit it.
return Aliases[0]; return Aliases[0];
} }
// Construct a custom tailored token factor. // Construct a custom tailored token factor.
return DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), MVT::Other, return DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), MVT::Other,
&Aliases[0], Aliases.size()); &Aliases[0], Aliases.size());
} }

View File

@ -197,12 +197,12 @@ unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(TargetOpcode::IMPLICIT_DEF), Reg); TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
} }
// If target-independent code couldn't handle the value, give target-specific // If target-independent code couldn't handle the value, give target-specific
// code a try. // code a try.
if (!Reg && isa<Constant>(V)) if (!Reg && isa<Constant>(V))
Reg = TargetMaterializeConstant(cast<Constant>(V)); Reg = TargetMaterializeConstant(cast<Constant>(V));
// Don't cache constant materializations in the general ValueMap. // Don't cache constant materializations in the general ValueMap.
// To do so would require tracking what uses they dominate. // To do so would require tracking what uses they dominate.
if (Reg != 0) { if (Reg != 0) {
@ -234,7 +234,7 @@ unsigned FastISel::UpdateValueMap(const Value *I, unsigned Reg) {
LocalValueMap[I] = Reg; LocalValueMap[I] = Reg;
return Reg; return Reg;
} }
unsigned &AssignedReg = FuncInfo.ValueMap[I]; unsigned &AssignedReg = FuncInfo.ValueMap[I];
if (AssignedReg == 0) if (AssignedReg == 0)
// Use the new register. // Use the new register.
@ -414,7 +414,7 @@ bool FastISel::SelectGetElementPtr(const User *I) {
// If this is a constant subscript, handle it quickly. // If this is a constant subscript, handle it quickly.
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) { if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
if (CI->isZero()) continue; if (CI->isZero()) continue;
uint64_t Offs = uint64_t Offs =
TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue(); TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT); N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
if (N == 0) if (N == 0)
@ -423,7 +423,7 @@ bool FastISel::SelectGetElementPtr(const User *I) {
NIsKill = true; NIsKill = true;
continue; continue;
} }
// N = N + Idx * ElementSize; // N = N + Idx * ElementSize;
uint64_t ElementSize = TD.getTypeAllocSize(Ty); uint64_t ElementSize = TD.getTypeAllocSize(Ty);
std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx); std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
@ -479,13 +479,13 @@ bool FastISel::SelectCall(const User *I) {
Offset = FuncInfo.getByValArgumentFrameIndex(Arg); Offset = FuncInfo.getByValArgumentFrameIndex(Arg);
if (Offset) if (Offset)
Reg = TRI.getFrameRegister(*FuncInfo.MF); Reg = TRI.getFrameRegister(*FuncInfo.MF);
} }
} }
if (!Reg) if (!Reg)
Reg = getRegForValue(Address); Reg = getRegForValue(Address);
if (Reg) if (Reg)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(TargetOpcode::DBG_VALUE)) TII.get(TargetOpcode::DBG_VALUE))
.addReg(Reg, RegState::Debug).addImm(Offset) .addReg(Reg, RegState::Debug).addImm(Offset)
.addMetadata(DI->getVariable()); .addMetadata(DI->getVariable());
@ -521,7 +521,7 @@ bool FastISel::SelectCall(const User *I) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
.addReg(0U).addImm(DI->getOffset()) .addReg(0U).addImm(DI->getOffset())
.addMetadata(DI->getVariable()); .addMetadata(DI->getVariable());
} }
return true; return true;
} }
case Intrinsic::eh_exception: { case Intrinsic::eh_exception: {
@ -594,12 +594,12 @@ bool FastISel::SelectCall(const User *I) {
bool FastISel::SelectCast(const User *I, unsigned Opcode) { bool FastISel::SelectCast(const User *I, unsigned Opcode) {
EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType()); EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
EVT DstVT = TLI.getValueType(I->getType()); EVT DstVT = TLI.getValueType(I->getType());
if (SrcVT == MVT::Other || !SrcVT.isSimple() || if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
DstVT == MVT::Other || !DstVT.isSimple()) DstVT == MVT::Other || !DstVT.isSimple())
// Unhandled type. Halt "fast" selection and bail. // Unhandled type. Halt "fast" selection and bail.
return false; return false;
// Check if the destination type is legal. Or as a special case, // Check if the destination type is legal. Or as a special case,
// it may be i1 if we're doing a truncate because that's // it may be i1 if we're doing a truncate because that's
// easy and somewhat common. // easy and somewhat common.
@ -641,7 +641,7 @@ bool FastISel::SelectCast(const User *I, unsigned Opcode) {
InputReg, InputRegIsKill); InputReg, InputRegIsKill);
if (!ResultReg) if (!ResultReg)
return false; return false;
UpdateValueMap(I, ResultReg); UpdateValueMap(I, ResultReg);
return true; return true;
} }
@ -656,23 +656,23 @@ bool FastISel::SelectBitCast(const User *I) {
return true; return true;
} }
// Bitcasts of other values become reg-reg copies or BIT_CONVERT operators. // Bitcasts of other values become reg-reg copies or BITCAST operators.
EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType()); EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
EVT DstVT = TLI.getValueType(I->getType()); EVT DstVT = TLI.getValueType(I->getType());
if (SrcVT == MVT::Other || !SrcVT.isSimple() || if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
DstVT == MVT::Other || !DstVT.isSimple() || DstVT == MVT::Other || !DstVT.isSimple() ||
!TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT)) !TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
// Unhandled type. Halt "fast" selection and bail. // Unhandled type. Halt "fast" selection and bail.
return false; return false;
unsigned Op0 = getRegForValue(I->getOperand(0)); unsigned Op0 = getRegForValue(I->getOperand(0));
if (Op0 == 0) if (Op0 == 0)
// Unhandled operand. Halt "fast" selection and bail. // Unhandled operand. Halt "fast" selection and bail.
return false; return false;
bool Op0IsKill = hasTrivialKill(I->getOperand(0)); bool Op0IsKill = hasTrivialKill(I->getOperand(0));
// First, try to perform the bitcast by inserting a reg-reg copy. // First, try to perform the bitcast by inserting a reg-reg copy.
unsigned ResultReg = 0; unsigned ResultReg = 0;
if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) { if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
@ -685,15 +685,15 @@ bool FastISel::SelectBitCast(const User *I) {
ResultReg).addReg(Op0); ResultReg).addReg(Op0);
} }
} }
// If the reg-reg copy failed, select a BIT_CONVERT opcode. // If the reg-reg copy failed, select a BITCAST opcode.
if (!ResultReg) if (!ResultReg)
ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
ISD::BIT_CONVERT, Op0, Op0IsKill); ISD::BITCAST, Op0, Op0IsKill);
if (!ResultReg) if (!ResultReg)
return false; return false;
UpdateValueMap(I, ResultReg); UpdateValueMap(I, ResultReg);
return true; return true;
} }
@ -765,7 +765,7 @@ FastISel::SelectFNeg(const User *I) {
return false; return false;
unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(), unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
ISD::BIT_CONVERT, OpReg, OpRegIsKill); ISD::BITCAST, OpReg, OpRegIsKill);
if (IntReg == 0) if (IntReg == 0)
return false; return false;
@ -777,7 +777,7 @@ FastISel::SelectFNeg(const User *I) {
return false; return false;
ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(),
ISD::BIT_CONVERT, IntResultReg, /*Kill=*/true); ISD::BITCAST, IntResultReg, /*Kill=*/true);
if (ResultReg == 0) if (ResultReg == 0)
return false; return false;
@ -857,10 +857,10 @@ FastISel::SelectOperator(const User *I, unsigned Opcode) {
// Dynamic-sized alloca is not handled yet. // Dynamic-sized alloca is not handled yet.
return false; return false;
case Instruction::Call: case Instruction::Call:
return SelectCall(I); return SelectCall(I);
case Instruction::BitCast: case Instruction::BitCast:
return SelectBitCast(I); return SelectBitCast(I);
@ -923,7 +923,7 @@ unsigned FastISel::FastEmit_r(MVT, MVT,
return 0; return 0;
} }
unsigned FastISel::FastEmit_rr(MVT, MVT, unsigned FastISel::FastEmit_rr(MVT, MVT,
unsigned, unsigned,
unsigned /*Op0*/, bool /*Op0IsKill*/, unsigned /*Op0*/, bool /*Op0IsKill*/,
unsigned /*Op1*/, bool /*Op1IsKill*/) { unsigned /*Op1*/, bool /*Op1IsKill*/) {
@ -1151,7 +1151,7 @@ unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
uint64_t Imm) { uint64_t Imm) {
unsigned ResultReg = createResultReg(RC); unsigned ResultReg = createResultReg(RC);
const TargetInstrDesc &II = TII.get(MachineInstOpcode); const TargetInstrDesc &II = TII.get(MachineInstOpcode);
if (II.getNumDefs() >= 1) if (II.getNumDefs() >= 1)
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm);
else { else {

View File

@ -403,7 +403,7 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
// Expand to a bitconvert of the value to the integer type of the // Expand to a bitconvert of the value to the integer type of the
// same size, then a (misaligned) int store. // same size, then a (misaligned) int store.
// FIXME: Does not handle truncating floating point stores! // FIXME: Does not handle truncating floating point stores!
SDValue Result = DAG.getNode(ISD::BIT_CONVERT, dl, intVT, Val); SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val);
return DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(), return DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(),
ST->isVolatile(), ST->isNonTemporal(), Alignment); ST->isVolatile(), ST->isNonTemporal(), Alignment);
} else { } else {
@ -515,14 +515,14 @@ SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr, LD->getPointerInfo(), SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr, LD->getPointerInfo(),
LD->isVolatile(), LD->isVolatile(),
LD->isNonTemporal(), LD->getAlignment()); LD->isNonTemporal(), LD->getAlignment());
SDValue Result = DAG.getNode(ISD::BIT_CONVERT, dl, LoadedVT, newLoad); SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad);
if (VT.isFloatingPoint() && LoadedVT != VT) if (VT.isFloatingPoint() && LoadedVT != VT)
Result = DAG.getNode(ISD::FP_EXTEND, dl, VT, Result); Result = DAG.getNode(ISD::FP_EXTEND, dl, VT, Result);
SDValue Ops[] = { Result, Chain }; SDValue Ops[] = { Result, Chain };
return DAG.getMergeValues(Ops, 2, dl); return DAG.getMergeValues(Ops, 2, dl);
} }
// Copy the value to a (aligned) stack slot using (unaligned) integer // Copy the value to a (aligned) stack slot using (unaligned) integer
// loads and stores, then do a (aligned) load from the stack slot. // loads and stores, then do a (aligned) load from the stack slot.
EVT RegVT = TLI.getRegisterType(*DAG.getContext(), intVT); EVT RegVT = TLI.getRegisterType(*DAG.getContext(), intVT);
@ -733,7 +733,7 @@ SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) {
return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(),
isVolatile, isNonTemporal, Alignment); isVolatile, isNonTemporal, Alignment);
} }
if (CFP->getValueType(0) == MVT::f64) { if (CFP->getValueType(0) == MVT::f64) {
// If this target supports 64-bit registers, do a single 64-bit store. // If this target supports 64-bit registers, do a single 64-bit store.
if (getTypeAction(MVT::i64) == Legal) { if (getTypeAction(MVT::i64) == Legal) {
@ -742,7 +742,7 @@ SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) {
return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(), return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(),
isVolatile, isNonTemporal, Alignment); isVolatile, isNonTemporal, Alignment);
} }
if (getTypeAction(MVT::i32) == Legal && !ST->isVolatile()) { if (getTypeAction(MVT::i32) == Legal && !ST->isVolatile()) {
// Otherwise, if the target supports 32-bit registers, use 2 32-bit // Otherwise, if the target supports 32-bit registers, use 2 32-bit
// stores. If the target supports neither 32- nor 64-bits, this // stores. If the target supports neither 32- nor 64-bits, this
@ -1145,7 +1145,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
Tmp1 = DAG.getLoad(NVT, dl, Tmp1, Tmp2, LD->getPointerInfo(), Tmp1 = DAG.getLoad(NVT, dl, Tmp1, Tmp2, LD->getPointerInfo(),
LD->isVolatile(), LD->isNonTemporal(), LD->isVolatile(), LD->isNonTemporal(),
LD->getAlignment()); LD->getAlignment());
Tmp3 = LegalizeOp(DAG.getNode(ISD::BIT_CONVERT, dl, VT, Tmp1)); Tmp3 = LegalizeOp(DAG.getNode(ISD::BITCAST, dl, VT, Tmp1));
Tmp4 = LegalizeOp(Tmp1.getValue(1)); Tmp4 = LegalizeOp(Tmp1.getValue(1));
break; break;
} }
@ -1156,7 +1156,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
AddLegalizedOperand(SDValue(Node, 1), Tmp4); AddLegalizedOperand(SDValue(Node, 1), Tmp4);
return Op.getResNo() ? Tmp4 : Tmp3; return Op.getResNo() ? Tmp4 : Tmp3;
} }
EVT SrcVT = LD->getMemoryVT(); EVT SrcVT = LD->getMemoryVT();
unsigned SrcWidth = SrcVT.getSizeInBits(); unsigned SrcWidth = SrcVT.getSizeInBits();
unsigned Alignment = LD->getAlignment(); unsigned Alignment = LD->getAlignment();
@ -1410,7 +1410,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
break; break;
case TargetLowering::Promote: case TargetLowering::Promote:
assert(VT.isVector() && "Unknown legal promote case!"); assert(VT.isVector() && "Unknown legal promote case!");
Tmp3 = DAG.getNode(ISD::BIT_CONVERT, dl, Tmp3 = DAG.getNode(ISD::BITCAST, dl,
TLI.getTypeToPromoteTo(ISD::STORE, VT), Tmp3); TLI.getTypeToPromoteTo(ISD::STORE, VT), Tmp3);
Result = DAG.getStore(Tmp1, dl, Tmp3, Tmp2, Result = DAG.getStore(Tmp1, dl, Tmp3, Tmp2,
ST->getPointerInfo(), isVolatile, ST->getPointerInfo(), isVolatile,
@ -1629,7 +1629,7 @@ SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) {
EVT IVT = EVT::getIntegerVT(*DAG.getContext(), FloatVT.getSizeInBits()); EVT IVT = EVT::getIntegerVT(*DAG.getContext(), FloatVT.getSizeInBits());
if (isTypeLegal(IVT)) { if (isTypeLegal(IVT)) {
// Convert to an integer with the same sign bit. // Convert to an integer with the same sign bit.
SignBit = DAG.getNode(ISD::BIT_CONVERT, dl, IVT, Tmp2); SignBit = DAG.getNode(ISD::BITCAST, dl, IVT, Tmp2);
} else { } else {
// Store the float to memory, then load the sign part out as an integer. // Store the float to memory, then load the sign part out as an integer.
MVT LoadTy = TLI.getPointerTy(); MVT LoadTy = TLI.getPointerTy();
@ -2120,8 +2120,8 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
DAG.getConstant(32, MVT::i64)); DAG.getConstant(32, MVT::i64));
SDValue LoOr = DAG.getNode(ISD::OR, dl, MVT::i64, Lo, TwoP52); SDValue LoOr = DAG.getNode(ISD::OR, dl, MVT::i64, Lo, TwoP52);
SDValue HiOr = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, TwoP84); SDValue HiOr = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, TwoP84);
SDValue LoFlt = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, LoOr); SDValue LoFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, LoOr);
SDValue HiFlt = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, HiOr); SDValue HiFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, HiOr);
SDValue HiSub = DAG.getNode(ISD::FSUB, dl, MVT::f64, HiFlt, SDValue HiSub = DAG.getNode(ISD::FSUB, dl, MVT::f64, HiFlt,
TwoP84PlusTwoP52); TwoP84PlusTwoP52);
return DAG.getNode(ISD::FADD, dl, MVT::f64, LoFlt, HiSub); return DAG.getNode(ISD::FADD, dl, MVT::f64, LoFlt, HiSub);
@ -2134,28 +2134,28 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
// algorithm from the x86_64 __floatundidf in compiler_rt. // algorithm from the x86_64 __floatundidf in compiler_rt.
if (!isSigned) { if (!isSigned) {
SDValue Fast = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Op0); SDValue Fast = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Op0);
SDValue ShiftConst = DAG.getConstant(1, TLI.getShiftAmountTy()); SDValue ShiftConst = DAG.getConstant(1, TLI.getShiftAmountTy());
SDValue Shr = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0, ShiftConst); SDValue Shr = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0, ShiftConst);
SDValue AndConst = DAG.getConstant(1, MVT::i64); SDValue AndConst = DAG.getConstant(1, MVT::i64);
SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, AndConst); SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, AndConst);
SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And, Shr); SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And, Shr);
SDValue SignCvt = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Or); SDValue SignCvt = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Or);
SDValue Slow = DAG.getNode(ISD::FADD, dl, MVT::f32, SignCvt, SignCvt); SDValue Slow = DAG.getNode(ISD::FADD, dl, MVT::f32, SignCvt, SignCvt);
// TODO: This really should be implemented using a branch rather than a // TODO: This really should be implemented using a branch rather than a
// select. We happen to get lucky and machinesink does the right // select. We happen to get lucky and machinesink does the right
// thing most of the time. This would be a good candidate for a // thing most of the time. This would be a good candidate for a
//pseudo-op, or, even better, for whole-function isel. //pseudo-op, or, even better, for whole-function isel.
SDValue SignBitTest = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64), SDValue SignBitTest = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64),
Op0, DAG.getConstant(0, MVT::i64), ISD::SETLT); Op0, DAG.getConstant(0, MVT::i64), ISD::SETLT);
return DAG.getNode(ISD::SELECT, dl, MVT::f32, SignBitTest, Slow, Fast); return DAG.getNode(ISD::SELECT, dl, MVT::f32, SignBitTest, Slow, Fast);
} }
// Otherwise, implement the fully general conversion. // Otherwise, implement the fully general conversion.
EVT SHVT = TLI.getShiftAmountTy(); EVT SHVT = TLI.getShiftAmountTy();
SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0,
DAG.getConstant(UINT64_C(0xfffffffffffff800), MVT::i64)); DAG.getConstant(UINT64_C(0xfffffffffffff800), MVT::i64));
SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And, SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And,
@ -2169,7 +2169,7 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
Op0, DAG.getConstant(UINT64_C(0x0020000000000000), MVT::i64), Op0, DAG.getConstant(UINT64_C(0x0020000000000000), MVT::i64),
ISD::SETUGE); ISD::SETUGE);
SDValue Sel2 = DAG.getNode(ISD::SELECT, dl, MVT::i64, Ge, Sel, Op0); SDValue Sel2 = DAG.getNode(ISD::SELECT, dl, MVT::i64, Ge, Sel, Op0);
SDValue Sh = DAG.getNode(ISD::SRL, dl, MVT::i64, Sel2, SDValue Sh = DAG.getNode(ISD::SRL, dl, MVT::i64, Sel2,
DAG.getConstant(32, SHVT)); DAG.getConstant(32, SHVT));
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Sh); SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Sh);
@ -2617,7 +2617,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
break; break;
} }
case ISD::FP_ROUND: case ISD::FP_ROUND:
case ISD::BIT_CONVERT: case ISD::BITCAST:
Tmp1 = EmitStackConvert(Node->getOperand(0), Node->getValueType(0), Tmp1 = EmitStackConvert(Node->getOperand(0), Node->getValueType(0),
Node->getValueType(0), dl); Node->getValueType(0), dl);
Results.push_back(Tmp1); Results.push_back(Tmp1);
@ -2739,7 +2739,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
case ISD::EXTRACT_VECTOR_ELT: case ISD::EXTRACT_VECTOR_ELT:
if (Node->getOperand(0).getValueType().getVectorNumElements() == 1) if (Node->getOperand(0).getValueType().getVectorNumElements() == 1)
// This must be an access of the only element. Return it. // This must be an access of the only element. Return it.
Tmp1 = DAG.getNode(ISD::BIT_CONVERT, dl, Node->getValueType(0), Tmp1 = DAG.getNode(ISD::BITCAST, dl, Node->getValueType(0),
Node->getOperand(0)); Node->getOperand(0));
else else
Tmp1 = ExpandExtractFromVectorThroughStack(SDValue(Node, 0)); Tmp1 = ExpandExtractFromVectorThroughStack(SDValue(Node, 0));
@ -3361,8 +3361,8 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node,
case ISD::XOR: { case ISD::XOR: {
unsigned ExtOp, TruncOp; unsigned ExtOp, TruncOp;
if (OVT.isVector()) { if (OVT.isVector()) {
ExtOp = ISD::BIT_CONVERT; ExtOp = ISD::BITCAST;
TruncOp = ISD::BIT_CONVERT; TruncOp = ISD::BITCAST;
} else { } else {
assert(OVT.isInteger() && "Cannot promote logic operation"); assert(OVT.isInteger() && "Cannot promote logic operation");
ExtOp = ISD::ANY_EXTEND; ExtOp = ISD::ANY_EXTEND;
@ -3379,8 +3379,8 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node,
case ISD::SELECT: { case ISD::SELECT: {
unsigned ExtOp, TruncOp; unsigned ExtOp, TruncOp;
if (Node->getValueType(0).isVector()) { if (Node->getValueType(0).isVector()) {
ExtOp = ISD::BIT_CONVERT; ExtOp = ISD::BITCAST;
TruncOp = ISD::BIT_CONVERT; TruncOp = ISD::BITCAST;
} else if (Node->getValueType(0).isInteger()) { } else if (Node->getValueType(0).isInteger()) {
ExtOp = ISD::ANY_EXTEND; ExtOp = ISD::ANY_EXTEND;
TruncOp = ISD::TRUNCATE; TruncOp = ISD::TRUNCATE;
@ -3407,12 +3407,12 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node,
cast<ShuffleVectorSDNode>(Node)->getMask(Mask); cast<ShuffleVectorSDNode>(Node)->getMask(Mask);
// Cast the two input vectors. // Cast the two input vectors.
Tmp1 = DAG.getNode(ISD::BIT_CONVERT, dl, NVT, Node->getOperand(0)); Tmp1 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(0));
Tmp2 = DAG.getNode(ISD::BIT_CONVERT, dl, NVT, Node->getOperand(1)); Tmp2 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(1));
// Convert the shuffle mask to the right # elements. // Convert the shuffle mask to the right # elements.
Tmp1 = ShuffleWithNarrowerEltType(NVT, OVT, dl, Tmp1, Tmp2, Mask); Tmp1 = ShuffleWithNarrowerEltType(NVT, OVT, dl, Tmp1, Tmp2, Mask);
Tmp1 = DAG.getNode(ISD::BIT_CONVERT, dl, OVT, Tmp1); Tmp1 = DAG.getNode(ISD::BITCAST, dl, OVT, Tmp1);
Results.push_back(Tmp1); Results.push_back(Tmp1);
break; break;
} }

View File

@ -55,7 +55,7 @@ void DAGTypeLegalizer::SoftenFloatResult(SDNode *N, unsigned ResNo) {
#endif #endif
llvm_unreachable("Do not know how to soften the result of this operator!"); llvm_unreachable("Do not know how to soften the result of this operator!");
case ISD::BIT_CONVERT: R = SoftenFloatRes_BIT_CONVERT(N); break; case ISD::BITCAST: R = SoftenFloatRes_BITCAST(N); break;
case ISD::BUILD_PAIR: R = SoftenFloatRes_BUILD_PAIR(N); break; case ISD::BUILD_PAIR: R = SoftenFloatRes_BUILD_PAIR(N); break;
case ISD::ConstantFP: case ISD::ConstantFP:
R = SoftenFloatRes_ConstantFP(cast<ConstantFPSDNode>(N)); R = SoftenFloatRes_ConstantFP(cast<ConstantFPSDNode>(N));
@ -102,7 +102,7 @@ void DAGTypeLegalizer::SoftenFloatResult(SDNode *N, unsigned ResNo) {
SetSoftenedFloat(SDValue(N, ResNo), R); SetSoftenedFloat(SDValue(N, ResNo), R);
} }
SDValue DAGTypeLegalizer::SoftenFloatRes_BIT_CONVERT(SDNode *N) { SDValue DAGTypeLegalizer::SoftenFloatRes_BITCAST(SDNode *N) {
return BitConvertToInteger(N->getOperand(0)); return BitConvertToInteger(N->getOperand(0));
} }
@ -557,7 +557,7 @@ bool DAGTypeLegalizer::SoftenFloatOperand(SDNode *N, unsigned OpNo) {
#endif #endif
llvm_unreachable("Do not know how to soften this operator's operand!"); llvm_unreachable("Do not know how to soften this operator's operand!");
case ISD::BIT_CONVERT: Res = SoftenFloatOp_BIT_CONVERT(N); break; case ISD::BITCAST: Res = SoftenFloatOp_BITCAST(N); break;
case ISD::BR_CC: Res = SoftenFloatOp_BR_CC(N); break; case ISD::BR_CC: Res = SoftenFloatOp_BR_CC(N); break;
case ISD::FP_ROUND: Res = SoftenFloatOp_FP_ROUND(N); break; case ISD::FP_ROUND: Res = SoftenFloatOp_FP_ROUND(N); break;
case ISD::FP_TO_SINT: Res = SoftenFloatOp_FP_TO_SINT(N); break; case ISD::FP_TO_SINT: Res = SoftenFloatOp_FP_TO_SINT(N); break;
@ -669,8 +669,8 @@ void DAGTypeLegalizer::SoftenSetCCOperands(SDValue &NewLHS, SDValue &NewRHS,
} }
} }
SDValue DAGTypeLegalizer::SoftenFloatOp_BIT_CONVERT(SDNode *N) { SDValue DAGTypeLegalizer::SoftenFloatOp_BITCAST(SDNode *N) {
return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), N->getValueType(0), return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), N->getValueType(0),
GetSoftenedFloat(N->getOperand(0))); GetSoftenedFloat(N->getOperand(0)));
} }
@ -815,7 +815,7 @@ void DAGTypeLegalizer::ExpandFloatResult(SDNode *N, unsigned ResNo) {
case ISD::SELECT: SplitRes_SELECT(N, Lo, Hi); break; case ISD::SELECT: SplitRes_SELECT(N, Lo, Hi); break;
case ISD::SELECT_CC: SplitRes_SELECT_CC(N, Lo, Hi); break; case ISD::SELECT_CC: SplitRes_SELECT_CC(N, Lo, Hi); break;
case ISD::BIT_CONVERT: ExpandRes_BIT_CONVERT(N, Lo, Hi); break; case ISD::BITCAST: ExpandRes_BITCAST(N, Lo, Hi); break;
case ISD::BUILD_PAIR: ExpandRes_BUILD_PAIR(N, Lo, Hi); break; case ISD::BUILD_PAIR: ExpandRes_BUILD_PAIR(N, Lo, Hi); break;
case ISD::EXTRACT_ELEMENT: ExpandRes_EXTRACT_ELEMENT(N, Lo, Hi); break; case ISD::EXTRACT_ELEMENT: ExpandRes_EXTRACT_ELEMENT(N, Lo, Hi); break;
case ISD::EXTRACT_VECTOR_ELT: ExpandRes_EXTRACT_VECTOR_ELT(N, Lo, Hi); break; case ISD::EXTRACT_VECTOR_ELT: ExpandRes_EXTRACT_VECTOR_ELT(N, Lo, Hi); break;
@ -1220,7 +1220,7 @@ bool DAGTypeLegalizer::ExpandFloatOperand(SDNode *N, unsigned OpNo) {
#endif #endif
llvm_unreachable("Do not know how to expand this operator's operand!"); llvm_unreachable("Do not know how to expand this operator's operand!");
case ISD::BIT_CONVERT: Res = ExpandOp_BIT_CONVERT(N); break; case ISD::BITCAST: Res = ExpandOp_BITCAST(N); break;
case ISD::BUILD_VECTOR: Res = ExpandOp_BUILD_VECTOR(N); break; case ISD::BUILD_VECTOR: Res = ExpandOp_BUILD_VECTOR(N); break;
case ISD::EXTRACT_ELEMENT: Res = ExpandOp_EXTRACT_ELEMENT(N); break; case ISD::EXTRACT_ELEMENT: Res = ExpandOp_EXTRACT_ELEMENT(N); break;

View File

@ -49,7 +49,7 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
llvm_unreachable("Do not know how to promote this operator!"); llvm_unreachable("Do not know how to promote this operator!");
case ISD::AssertSext: Res = PromoteIntRes_AssertSext(N); break; case ISD::AssertSext: Res = PromoteIntRes_AssertSext(N); break;
case ISD::AssertZext: Res = PromoteIntRes_AssertZext(N); break; case ISD::AssertZext: Res = PromoteIntRes_AssertZext(N); break;
case ISD::BIT_CONVERT: Res = PromoteIntRes_BIT_CONVERT(N); break; case ISD::BITCAST: Res = PromoteIntRes_BITCAST(N); break;
case ISD::BSWAP: Res = PromoteIntRes_BSWAP(N); break; case ISD::BSWAP: Res = PromoteIntRes_BSWAP(N); break;
case ISD::BUILD_PAIR: Res = PromoteIntRes_BUILD_PAIR(N); break; case ISD::BUILD_PAIR: Res = PromoteIntRes_BUILD_PAIR(N); break;
case ISD::Constant: Res = PromoteIntRes_Constant(N); break; case ISD::Constant: Res = PromoteIntRes_Constant(N); break;
@ -162,7 +162,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_Atomic2(AtomicSDNode *N) {
return Res; return Res;
} }
SDValue DAGTypeLegalizer::PromoteIntRes_BIT_CONVERT(SDNode *N) { SDValue DAGTypeLegalizer::PromoteIntRes_BITCAST(SDNode *N) {
SDValue InOp = N->getOperand(0); SDValue InOp = N->getOperand(0);
EVT InVT = InOp.getValueType(); EVT InVT = InOp.getValueType();
EVT NInVT = TLI.getTypeToTransformTo(*DAG.getContext(), InVT); EVT NInVT = TLI.getTypeToTransformTo(*DAG.getContext(), InVT);
@ -179,8 +179,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_BIT_CONVERT(SDNode *N) {
case PromoteInteger: case PromoteInteger:
if (NOutVT.bitsEq(NInVT)) if (NOutVT.bitsEq(NInVT))
// The input promotes to the same size. Convert the promoted value. // The input promotes to the same size. Convert the promoted value.
return DAG.getNode(ISD::BIT_CONVERT, dl, return DAG.getNode(ISD::BITCAST, dl, NOutVT, GetPromotedInteger(InOp));
NOutVT, GetPromotedInteger(InOp));
break; break;
case SoftenFloat: case SoftenFloat:
// Promote the integer operand by hand. // Promote the integer operand by hand.
@ -193,7 +192,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_BIT_CONVERT(SDNode *N) {
return DAG.getNode(ISD::ANY_EXTEND, dl, NOutVT, return DAG.getNode(ISD::ANY_EXTEND, dl, NOutVT,
BitConvertToInteger(GetScalarizedVector(InOp))); BitConvertToInteger(GetScalarizedVector(InOp)));
case SplitVector: { case SplitVector: {
// For example, i32 = BIT_CONVERT v2i16 on alpha. Convert the split // For example, i32 = BITCAST v2i16 on alpha. Convert the split
// pieces of the input into integers and reassemble in the final type. // pieces of the input into integers and reassemble in the final type.
SDValue Lo, Hi; SDValue Lo, Hi;
GetSplitVector(N->getOperand(0), Lo, Hi); GetSplitVector(N->getOperand(0), Lo, Hi);
@ -207,12 +206,12 @@ SDValue DAGTypeLegalizer::PromoteIntRes_BIT_CONVERT(SDNode *N) {
EVT::getIntegerVT(*DAG.getContext(), EVT::getIntegerVT(*DAG.getContext(),
NOutVT.getSizeInBits()), NOutVT.getSizeInBits()),
JoinIntegers(Lo, Hi)); JoinIntegers(Lo, Hi));
return DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, InOp); return DAG.getNode(ISD::BITCAST, dl, NOutVT, InOp);
} }
case WidenVector: case WidenVector:
if (OutVT.bitsEq(NInVT)) if (OutVT.bitsEq(NInVT))
// The input is widened to the same size. Convert to the widened value. // The input is widened to the same size. Convert to the widened value.
return DAG.getNode(ISD::BIT_CONVERT, dl, OutVT, GetWidenedVector(InOp)); return DAG.getNode(ISD::BITCAST, dl, OutVT, GetWidenedVector(InOp));
} }
return DAG.getNode(ISD::ANY_EXTEND, dl, NOutVT, return DAG.getNode(ISD::ANY_EXTEND, dl, NOutVT,
@ -631,7 +630,7 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
llvm_unreachable("Do not know how to promote this operator's operand!"); llvm_unreachable("Do not know how to promote this operator's operand!");
case ISD::ANY_EXTEND: Res = PromoteIntOp_ANY_EXTEND(N); break; case ISD::ANY_EXTEND: Res = PromoteIntOp_ANY_EXTEND(N); break;
case ISD::BIT_CONVERT: Res = PromoteIntOp_BIT_CONVERT(N); break; case ISD::BITCAST: Res = PromoteIntOp_BITCAST(N); break;
case ISD::BR_CC: Res = PromoteIntOp_BR_CC(N, OpNo); break; case ISD::BR_CC: Res = PromoteIntOp_BR_CC(N, OpNo); break;
case ISD::BRCOND: Res = PromoteIntOp_BRCOND(N, OpNo); break; case ISD::BRCOND: Res = PromoteIntOp_BRCOND(N, OpNo); break;
case ISD::BUILD_PAIR: Res = PromoteIntOp_BUILD_PAIR(N); break; case ISD::BUILD_PAIR: Res = PromoteIntOp_BUILD_PAIR(N); break;
@ -713,7 +712,7 @@ SDValue DAGTypeLegalizer::PromoteIntOp_ANY_EXTEND(SDNode *N) {
return DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), N->getValueType(0), Op); return DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), N->getValueType(0), Op);
} }
SDValue DAGTypeLegalizer::PromoteIntOp_BIT_CONVERT(SDNode *N) { SDValue DAGTypeLegalizer::PromoteIntOp_BITCAST(SDNode *N) {
// This should only occur in unusual situations like bitcasting to an // This should only occur in unusual situations like bitcasting to an
// x86_fp80, so just turn it into a store+load // x86_fp80, so just turn it into a store+load
return CreateStackStoreLoad(N->getOperand(0), N->getValueType(0)); return CreateStackStoreLoad(N->getOperand(0), N->getValueType(0));
@ -950,7 +949,7 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::SELECT_CC: SplitRes_SELECT_CC(N, Lo, Hi); break; case ISD::SELECT_CC: SplitRes_SELECT_CC(N, Lo, Hi); break;
case ISD::UNDEF: SplitRes_UNDEF(N, Lo, Hi); break; case ISD::UNDEF: SplitRes_UNDEF(N, Lo, Hi); break;
case ISD::BIT_CONVERT: ExpandRes_BIT_CONVERT(N, Lo, Hi); break; case ISD::BITCAST: ExpandRes_BITCAST(N, Lo, Hi); break;
case ISD::BUILD_PAIR: ExpandRes_BUILD_PAIR(N, Lo, Hi); break; case ISD::BUILD_PAIR: ExpandRes_BUILD_PAIR(N, Lo, Hi); break;
case ISD::EXTRACT_ELEMENT: ExpandRes_EXTRACT_ELEMENT(N, Lo, Hi); break; case ISD::EXTRACT_ELEMENT: ExpandRes_EXTRACT_ELEMENT(N, Lo, Hi); break;
case ISD::EXTRACT_VECTOR_ELT: ExpandRes_EXTRACT_VECTOR_ELT(N, Lo, Hi); break; case ISD::EXTRACT_VECTOR_ELT: ExpandRes_EXTRACT_VECTOR_ELT(N, Lo, Hi); break;
@ -2076,7 +2075,7 @@ bool DAGTypeLegalizer::ExpandIntegerOperand(SDNode *N, unsigned OpNo) {
#endif #endif
llvm_unreachable("Do not know how to expand this operator's operand!"); llvm_unreachable("Do not know how to expand this operator's operand!");
case ISD::BIT_CONVERT: Res = ExpandOp_BIT_CONVERT(N); break; case ISD::BITCAST: Res = ExpandOp_BITCAST(N); break;
case ISD::BR_CC: Res = ExpandIntOp_BR_CC(N); break; case ISD::BR_CC: Res = ExpandIntOp_BR_CC(N); break;
case ISD::BUILD_VECTOR: Res = ExpandOp_BUILD_VECTOR(N); break; case ISD::BUILD_VECTOR: Res = ExpandOp_BUILD_VECTOR(N); break;
case ISD::EXTRACT_ELEMENT: Res = ExpandOp_EXTRACT_ELEMENT(N); break; case ISD::EXTRACT_ELEMENT: Res = ExpandOp_EXTRACT_ELEMENT(N); break;
@ -2320,7 +2319,7 @@ SDValue DAGTypeLegalizer::ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo) {
N->getMemoryVT(), isVolatile, isNonTemporal, N->getMemoryVT(), isVolatile, isNonTemporal,
Alignment); Alignment);
} }
if (TLI.isLittleEndian()) { if (TLI.isLittleEndian()) {
// Little-endian - low bits are at low addresses. // Little-endian - low bits are at low addresses.
GetExpandedInteger(N->getValue(), Lo, Hi); GetExpandedInteger(N->getValue(), Lo, Hi);

View File

@ -858,7 +858,7 @@ void DAGTypeLegalizer::SetWidenedVector(SDValue Op, SDValue Result) {
/// BitConvertToInteger - Convert to an integer of the same size. /// BitConvertToInteger - Convert to an integer of the same size.
SDValue DAGTypeLegalizer::BitConvertToInteger(SDValue Op) { SDValue DAGTypeLegalizer::BitConvertToInteger(SDValue Op) {
unsigned BitWidth = Op.getValueType().getSizeInBits(); unsigned BitWidth = Op.getValueType().getSizeInBits();
return DAG.getNode(ISD::BIT_CONVERT, Op.getDebugLoc(), return DAG.getNode(ISD::BITCAST, Op.getDebugLoc(),
EVT::getIntegerVT(*DAG.getContext(), BitWidth), Op); EVT::getIntegerVT(*DAG.getContext(), BitWidth), Op);
} }
@ -869,7 +869,7 @@ SDValue DAGTypeLegalizer::BitConvertVectorToIntegerVector(SDValue Op) {
unsigned EltWidth = Op.getValueType().getVectorElementType().getSizeInBits(); unsigned EltWidth = Op.getValueType().getVectorElementType().getSizeInBits();
EVT EltNVT = EVT::getIntegerVT(*DAG.getContext(), EltWidth); EVT EltNVT = EVT::getIntegerVT(*DAG.getContext(), EltWidth);
unsigned NumElts = Op.getValueType().getVectorNumElements(); unsigned NumElts = Op.getValueType().getVectorNumElements();
return DAG.getNode(ISD::BIT_CONVERT, Op.getDebugLoc(), return DAG.getNode(ISD::BITCAST, Op.getDebugLoc(),
EVT::getVectorVT(*DAG.getContext(), EltNVT, NumElts), Op); EVT::getVectorVT(*DAG.getContext(), EltNVT, NumElts), Op);
} }

View File

@ -99,7 +99,7 @@ private:
return SoftenFloat; return SoftenFloat;
return ExpandFloat; return ExpandFloat;
} }
if (VT.getVectorNumElements() == 1) if (VT.getVectorNumElements() == 1)
return ScalarizeVector; return ScalarizeVector;
return SplitVector; return SplitVector;
@ -244,7 +244,7 @@ private:
SDValue PromoteIntRes_AssertZext(SDNode *N); SDValue PromoteIntRes_AssertZext(SDNode *N);
SDValue PromoteIntRes_Atomic1(AtomicSDNode *N); SDValue PromoteIntRes_Atomic1(AtomicSDNode *N);
SDValue PromoteIntRes_Atomic2(AtomicSDNode *N); SDValue PromoteIntRes_Atomic2(AtomicSDNode *N);
SDValue PromoteIntRes_BIT_CONVERT(SDNode *N); SDValue PromoteIntRes_BITCAST(SDNode *N);
SDValue PromoteIntRes_BSWAP(SDNode *N); SDValue PromoteIntRes_BSWAP(SDNode *N);
SDValue PromoteIntRes_BUILD_PAIR(SDNode *N); SDValue PromoteIntRes_BUILD_PAIR(SDNode *N);
SDValue PromoteIntRes_Constant(SDNode *N); SDValue PromoteIntRes_Constant(SDNode *N);
@ -278,7 +278,7 @@ private:
// Integer Operand Promotion. // Integer Operand Promotion.
bool PromoteIntegerOperand(SDNode *N, unsigned OperandNo); bool PromoteIntegerOperand(SDNode *N, unsigned OperandNo);
SDValue PromoteIntOp_ANY_EXTEND(SDNode *N); SDValue PromoteIntOp_ANY_EXTEND(SDNode *N);
SDValue PromoteIntOp_BIT_CONVERT(SDNode *N); SDValue PromoteIntOp_BITCAST(SDNode *N);
SDValue PromoteIntOp_BUILD_PAIR(SDNode *N); SDValue PromoteIntOp_BUILD_PAIR(SDNode *N);
SDValue PromoteIntOp_BR_CC(SDNode *N, unsigned OpNo); SDValue PromoteIntOp_BR_CC(SDNode *N, unsigned OpNo);
SDValue PromoteIntOp_BRCOND(SDNode *N, unsigned OpNo); SDValue PromoteIntOp_BRCOND(SDNode *N, unsigned OpNo);
@ -352,7 +352,7 @@ private:
// Integer Operand Expansion. // Integer Operand Expansion.
bool ExpandIntegerOperand(SDNode *N, unsigned OperandNo); bool ExpandIntegerOperand(SDNode *N, unsigned OperandNo);
SDValue ExpandIntOp_BIT_CONVERT(SDNode *N); SDValue ExpandIntOp_BITCAST(SDNode *N);
SDValue ExpandIntOp_BR_CC(SDNode *N); SDValue ExpandIntOp_BR_CC(SDNode *N);
SDValue ExpandIntOp_BUILD_VECTOR(SDNode *N); SDValue ExpandIntOp_BUILD_VECTOR(SDNode *N);
SDValue ExpandIntOp_EXTRACT_ELEMENT(SDNode *N); SDValue ExpandIntOp_EXTRACT_ELEMENT(SDNode *N);
@ -387,7 +387,7 @@ private:
// Result Float to Integer Conversion. // Result Float to Integer Conversion.
void SoftenFloatResult(SDNode *N, unsigned OpNo); void SoftenFloatResult(SDNode *N, unsigned OpNo);
SDValue SoftenFloatRes_BIT_CONVERT(SDNode *N); SDValue SoftenFloatRes_BITCAST(SDNode *N);
SDValue SoftenFloatRes_BUILD_PAIR(SDNode *N); SDValue SoftenFloatRes_BUILD_PAIR(SDNode *N);
SDValue SoftenFloatRes_ConstantFP(ConstantFPSDNode *N); SDValue SoftenFloatRes_ConstantFP(ConstantFPSDNode *N);
SDValue SoftenFloatRes_EXTRACT_VECTOR_ELT(SDNode *N); SDValue SoftenFloatRes_EXTRACT_VECTOR_ELT(SDNode *N);
@ -426,7 +426,7 @@ private:
// Operand Float to Integer Conversion. // Operand Float to Integer Conversion.
bool SoftenFloatOperand(SDNode *N, unsigned OpNo); bool SoftenFloatOperand(SDNode *N, unsigned OpNo);
SDValue SoftenFloatOp_BIT_CONVERT(SDNode *N); SDValue SoftenFloatOp_BITCAST(SDNode *N);
SDValue SoftenFloatOp_BR_CC(SDNode *N); SDValue SoftenFloatOp_BR_CC(SDNode *N);
SDValue SoftenFloatOp_FP_ROUND(SDNode *N); SDValue SoftenFloatOp_FP_ROUND(SDNode *N);
SDValue SoftenFloatOp_FP_TO_SINT(SDNode *N); SDValue SoftenFloatOp_FP_TO_SINT(SDNode *N);
@ -515,7 +515,7 @@ private:
SDValue ScalarizeVecRes_UnaryOp(SDNode *N); SDValue ScalarizeVecRes_UnaryOp(SDNode *N);
SDValue ScalarizeVecRes_InregOp(SDNode *N); SDValue ScalarizeVecRes_InregOp(SDNode *N);
SDValue ScalarizeVecRes_BIT_CONVERT(SDNode *N); SDValue ScalarizeVecRes_BITCAST(SDNode *N);
SDValue ScalarizeVecRes_CONVERT_RNDSAT(SDNode *N); SDValue ScalarizeVecRes_CONVERT_RNDSAT(SDNode *N);
SDValue ScalarizeVecRes_EXTRACT_SUBVECTOR(SDNode *N); SDValue ScalarizeVecRes_EXTRACT_SUBVECTOR(SDNode *N);
SDValue ScalarizeVecRes_FPOWI(SDNode *N); SDValue ScalarizeVecRes_FPOWI(SDNode *N);
@ -532,7 +532,7 @@ private:
// Vector Operand Scalarization: <1 x ty> -> ty. // Vector Operand Scalarization: <1 x ty> -> ty.
bool ScalarizeVectorOperand(SDNode *N, unsigned OpNo); bool ScalarizeVectorOperand(SDNode *N, unsigned OpNo);
SDValue ScalarizeVecOp_BIT_CONVERT(SDNode *N); SDValue ScalarizeVecOp_BITCAST(SDNode *N);
SDValue ScalarizeVecOp_CONCAT_VECTORS(SDNode *N); SDValue ScalarizeVecOp_CONCAT_VECTORS(SDNode *N);
SDValue ScalarizeVecOp_EXTRACT_VECTOR_ELT(SDNode *N); SDValue ScalarizeVecOp_EXTRACT_VECTOR_ELT(SDNode *N);
SDValue ScalarizeVecOp_STORE(StoreSDNode *N, unsigned OpNo); SDValue ScalarizeVecOp_STORE(StoreSDNode *N, unsigned OpNo);
@ -557,7 +557,7 @@ private:
void SplitVecRes_UnaryOp(SDNode *N, SDValue &Lo, SDValue &Hi); void SplitVecRes_UnaryOp(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_InregOp(SDNode *N, SDValue &Lo, SDValue &Hi); void SplitVecRes_InregOp(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_BIT_CONVERT(SDNode *N, SDValue &Lo, SDValue &Hi); void SplitVecRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_BUILD_PAIR(SDNode *N, SDValue &Lo, SDValue &Hi); void SplitVecRes_BUILD_PAIR(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_BUILD_VECTOR(SDNode *N, SDValue &Lo, SDValue &Hi); void SplitVecRes_BUILD_VECTOR(SDNode *N, SDValue &Lo, SDValue &Hi);
void SplitVecRes_CONCAT_VECTORS(SDNode *N, SDValue &Lo, SDValue &Hi); void SplitVecRes_CONCAT_VECTORS(SDNode *N, SDValue &Lo, SDValue &Hi);
@ -577,7 +577,7 @@ private:
bool SplitVectorOperand(SDNode *N, unsigned OpNo); bool SplitVectorOperand(SDNode *N, unsigned OpNo);
SDValue SplitVecOp_UnaryOp(SDNode *N); SDValue SplitVecOp_UnaryOp(SDNode *N);
SDValue SplitVecOp_BIT_CONVERT(SDNode *N); SDValue SplitVecOp_BITCAST(SDNode *N);
SDValue SplitVecOp_EXTRACT_SUBVECTOR(SDNode *N); SDValue SplitVecOp_EXTRACT_SUBVECTOR(SDNode *N);
SDValue SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N); SDValue SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N);
SDValue SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo); SDValue SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo);
@ -603,7 +603,7 @@ private:
// Widen Vector Result Promotion. // Widen Vector Result Promotion.
void WidenVectorResult(SDNode *N, unsigned ResNo); void WidenVectorResult(SDNode *N, unsigned ResNo);
SDValue WidenVecRes_BIT_CONVERT(SDNode* N); SDValue WidenVecRes_BITCAST(SDNode* N);
SDValue WidenVecRes_BUILD_VECTOR(SDNode* N); SDValue WidenVecRes_BUILD_VECTOR(SDNode* N);
SDValue WidenVecRes_CONCAT_VECTORS(SDNode* N); SDValue WidenVecRes_CONCAT_VECTORS(SDNode* N);
SDValue WidenVecRes_CONVERT_RNDSAT(SDNode* N); SDValue WidenVecRes_CONVERT_RNDSAT(SDNode* N);
@ -628,7 +628,7 @@ private:
// Widen Vector Operand. // Widen Vector Operand.
bool WidenVectorOperand(SDNode *N, unsigned ResNo); bool WidenVectorOperand(SDNode *N, unsigned ResNo);
SDValue WidenVecOp_BIT_CONVERT(SDNode *N); SDValue WidenVecOp_BITCAST(SDNode *N);
SDValue WidenVecOp_CONCAT_VECTORS(SDNode *N); SDValue WidenVecOp_CONCAT_VECTORS(SDNode *N);
SDValue WidenVecOp_EXTRACT_VECTOR_ELT(SDNode *N); SDValue WidenVecOp_EXTRACT_VECTOR_ELT(SDNode *N);
SDValue WidenVecOp_EXTRACT_SUBVECTOR(SDNode *N); SDValue WidenVecOp_EXTRACT_SUBVECTOR(SDNode *N);
@ -721,7 +721,7 @@ private:
} }
// Generic Result Expansion. // Generic Result Expansion.
void ExpandRes_BIT_CONVERT (SDNode *N, SDValue &Lo, SDValue &Hi); void ExpandRes_BITCAST (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandRes_BUILD_PAIR (SDNode *N, SDValue &Lo, SDValue &Hi); void ExpandRes_BUILD_PAIR (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandRes_EXTRACT_ELEMENT (SDNode *N, SDValue &Lo, SDValue &Hi); void ExpandRes_EXTRACT_ELEMENT (SDNode *N, SDValue &Lo, SDValue &Hi);
void ExpandRes_EXTRACT_VECTOR_ELT(SDNode *N, SDValue &Lo, SDValue &Hi); void ExpandRes_EXTRACT_VECTOR_ELT(SDNode *N, SDValue &Lo, SDValue &Hi);
@ -729,7 +729,7 @@ private:
void ExpandRes_VAARG (SDNode *N, SDValue &Lo, SDValue &Hi); void ExpandRes_VAARG (SDNode *N, SDValue &Lo, SDValue &Hi);
// Generic Operand Expansion. // Generic Operand Expansion.
SDValue ExpandOp_BIT_CONVERT (SDNode *N); SDValue ExpandOp_BITCAST (SDNode *N);
SDValue ExpandOp_BUILD_VECTOR (SDNode *N); SDValue ExpandOp_BUILD_VECTOR (SDNode *N);
SDValue ExpandOp_EXTRACT_ELEMENT (SDNode *N); SDValue ExpandOp_EXTRACT_ELEMENT (SDNode *N);
SDValue ExpandOp_INSERT_VECTOR_ELT(SDNode *N); SDValue ExpandOp_INSERT_VECTOR_ELT(SDNode *N);

View File

@ -32,8 +32,7 @@ using namespace llvm;
// little/big-endian machines, followed by the Hi/Lo part. This means that // little/big-endian machines, followed by the Hi/Lo part. This means that
// they cannot be used as is on vectors, for which Lo is always stored first. // they cannot be used as is on vectors, for which Lo is always stored first.
void DAGTypeLegalizer::ExpandRes_BIT_CONVERT(SDNode *N, SDValue &Lo, void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {
SDValue &Hi) {
EVT OutVT = N->getValueType(0); EVT OutVT = N->getValueType(0);
EVT NOutVT = TLI.getTypeToTransformTo(*DAG.getContext(), OutVT); EVT NOutVT = TLI.getTypeToTransformTo(*DAG.getContext(), OutVT);
SDValue InOp = N->getOperand(0); SDValue InOp = N->getOperand(0);
@ -50,31 +49,31 @@ void DAGTypeLegalizer::ExpandRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
case SoftenFloat: case SoftenFloat:
// Convert the integer operand instead. // Convert the integer operand instead.
SplitInteger(GetSoftenedFloat(InOp), Lo, Hi); SplitInteger(GetSoftenedFloat(InOp), Lo, Hi);
Lo = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Lo); Lo = DAG.getNode(ISD::BITCAST, dl, NOutVT, Lo);
Hi = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Hi); Hi = DAG.getNode(ISD::BITCAST, dl, NOutVT, Hi);
return; return;
case ExpandInteger: case ExpandInteger:
case ExpandFloat: case ExpandFloat:
// Convert the expanded pieces of the input. // Convert the expanded pieces of the input.
GetExpandedOp(InOp, Lo, Hi); GetExpandedOp(InOp, Lo, Hi);
Lo = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Lo); Lo = DAG.getNode(ISD::BITCAST, dl, NOutVT, Lo);
Hi = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Hi); Hi = DAG.getNode(ISD::BITCAST, dl, NOutVT, Hi);
return; return;
case SplitVector: case SplitVector:
GetSplitVector(InOp, Lo, Hi); GetSplitVector(InOp, Lo, Hi);
if (TLI.isBigEndian()) if (TLI.isBigEndian())
std::swap(Lo, Hi); std::swap(Lo, Hi);
Lo = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Lo); Lo = DAG.getNode(ISD::BITCAST, dl, NOutVT, Lo);
Hi = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Hi); Hi = DAG.getNode(ISD::BITCAST, dl, NOutVT, Hi);
return; return;
case ScalarizeVector: case ScalarizeVector:
// Convert the element instead. // Convert the element instead.
SplitInteger(BitConvertToInteger(GetScalarizedVector(InOp)), Lo, Hi); SplitInteger(BitConvertToInteger(GetScalarizedVector(InOp)), Lo, Hi);
Lo = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Lo); Lo = DAG.getNode(ISD::BITCAST, dl, NOutVT, Lo);
Hi = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Hi); Hi = DAG.getNode(ISD::BITCAST, dl, NOutVT, Hi);
return; return;
case WidenVector: { case WidenVector: {
assert(!(InVT.getVectorNumElements() & 1) && "Unsupported BIT_CONVERT"); assert(!(InVT.getVectorNumElements() & 1) && "Unsupported BITCAST");
InOp = GetWidenedVector(InOp); InOp = GetWidenedVector(InOp);
EVT InNVT = EVT::getVectorVT(*DAG.getContext(), InVT.getVectorElementType(), EVT InNVT = EVT::getVectorVT(*DAG.getContext(), InVT.getVectorElementType(),
InVT.getVectorNumElements()/2); InVT.getVectorNumElements()/2);
@ -84,19 +83,19 @@ void DAGTypeLegalizer::ExpandRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
DAG.getIntPtrConstant(InNVT.getVectorNumElements())); DAG.getIntPtrConstant(InNVT.getVectorNumElements()));
if (TLI.isBigEndian()) if (TLI.isBigEndian())
std::swap(Lo, Hi); std::swap(Lo, Hi);
Lo = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Lo); Lo = DAG.getNode(ISD::BITCAST, dl, NOutVT, Lo);
Hi = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Hi); Hi = DAG.getNode(ISD::BITCAST, dl, NOutVT, Hi);
return; return;
} }
} }
if (InVT.isVector() && OutVT.isInteger()) { if (InVT.isVector() && OutVT.isInteger()) {
// Handle cases like i64 = BIT_CONVERT v1i64 on x86, where the operand // Handle cases like i64 = BITCAST v1i64 on x86, where the operand
// is legal but the result is not. // is legal but the result is not.
EVT NVT = EVT::getVectorVT(*DAG.getContext(), NOutVT, 2); EVT NVT = EVT::getVectorVT(*DAG.getContext(), NOutVT, 2);
if (isTypeLegal(NVT)) { if (isTypeLegal(NVT)) {
SDValue CastInOp = DAG.getNode(ISD::BIT_CONVERT, dl, NVT, InOp); SDValue CastInOp = DAG.getNode(ISD::BITCAST, dl, NVT, InOp);
Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NOutVT, CastInOp, Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NOutVT, CastInOp,
DAG.getIntPtrConstant(0)); DAG.getIntPtrConstant(0));
Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NOutVT, CastInOp, Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NOutVT, CastInOp,
@ -173,7 +172,7 @@ void DAGTypeLegalizer::ExpandRes_EXTRACT_VECTOR_ELT(SDNode *N, SDValue &Lo,
EVT OldVT = N->getValueType(0); EVT OldVT = N->getValueType(0);
EVT NewVT = TLI.getTypeToTransformTo(*DAG.getContext(), OldVT); EVT NewVT = TLI.getTypeToTransformTo(*DAG.getContext(), OldVT);
SDValue NewVec = DAG.getNode(ISD::BIT_CONVERT, dl, SDValue NewVec = DAG.getNode(ISD::BITCAST, dl,
EVT::getVectorVT(*DAG.getContext(), EVT::getVectorVT(*DAG.getContext(),
NewVT, 2*OldElts), NewVT, 2*OldElts),
OldVec); OldVec);
@ -262,14 +261,14 @@ void DAGTypeLegalizer::ExpandRes_VAARG(SDNode *N, SDValue &Lo, SDValue &Hi) {
// Generic Operand Expansion. // Generic Operand Expansion.
//===--------------------------------------------------------------------===// //===--------------------------------------------------------------------===//
SDValue DAGTypeLegalizer::ExpandOp_BIT_CONVERT(SDNode *N) { SDValue DAGTypeLegalizer::ExpandOp_BITCAST(SDNode *N) {
DebugLoc dl = N->getDebugLoc(); DebugLoc dl = N->getDebugLoc();
if (N->getValueType(0).isVector()) { if (N->getValueType(0).isVector()) {
// An illegal expanding type is being converted to a legal vector type. // An illegal expanding type is being converted to a legal vector type.
// Make a two element vector out of the expanded parts and convert that // Make a two element vector out of the expanded parts and convert that
// instead, but only if the new vector type is legal (otherwise there // instead, but only if the new vector type is legal (otherwise there
// is no point, and it might create expansion loops). For example, on // is no point, and it might create expansion loops). For example, on
// x86 this turns v1i64 = BIT_CONVERT i64 into v1i64 = BIT_CONVERT v2i32. // x86 this turns v1i64 = BITCAST i64 into v1i64 = BITCAST v2i32.
EVT OVT = N->getOperand(0).getValueType(); EVT OVT = N->getOperand(0).getValueType();
EVT NVT = EVT::getVectorVT(*DAG.getContext(), EVT NVT = EVT::getVectorVT(*DAG.getContext(),
TLI.getTypeToTransformTo(*DAG.getContext(), OVT), TLI.getTypeToTransformTo(*DAG.getContext(), OVT),
@ -283,7 +282,7 @@ SDValue DAGTypeLegalizer::ExpandOp_BIT_CONVERT(SDNode *N) {
std::swap(Parts[0], Parts[1]); std::swap(Parts[0], Parts[1]);
SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, Parts, 2); SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, Parts, 2);
return DAG.getNode(ISD::BIT_CONVERT, dl, N->getValueType(0), Vec); return DAG.getNode(ISD::BITCAST, dl, N->getValueType(0), Vec);
} }
} }
@ -322,7 +321,7 @@ SDValue DAGTypeLegalizer::ExpandOp_BUILD_VECTOR(SDNode *N) {
&NewElts[0], NewElts.size()); &NewElts[0], NewElts.size());
// Convert the new vector to the old vector type. // Convert the new vector to the old vector type.
return DAG.getNode(ISD::BIT_CONVERT, dl, VecVT, NewVec); return DAG.getNode(ISD::BITCAST, dl, VecVT, NewVec);
} }
SDValue DAGTypeLegalizer::ExpandOp_EXTRACT_ELEMENT(SDNode *N) { SDValue DAGTypeLegalizer::ExpandOp_EXTRACT_ELEMENT(SDNode *N) {
@ -347,7 +346,7 @@ SDValue DAGTypeLegalizer::ExpandOp_INSERT_VECTOR_ELT(SDNode *N) {
// Bitconvert to a vector of twice the length with elements of the expanded // Bitconvert to a vector of twice the length with elements of the expanded
// type, insert the expanded vector elements, and then convert back. // type, insert the expanded vector elements, and then convert back.
EVT NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewEVT, NumElts*2); EVT NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewEVT, NumElts*2);
SDValue NewVec = DAG.getNode(ISD::BIT_CONVERT, dl, SDValue NewVec = DAG.getNode(ISD::BITCAST, dl,
NewVecVT, N->getOperand(0)); NewVecVT, N->getOperand(0));
SDValue Lo, Hi; SDValue Lo, Hi;
@ -363,7 +362,7 @@ SDValue DAGTypeLegalizer::ExpandOp_INSERT_VECTOR_ELT(SDNode *N) {
NewVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, NewVecVT, NewVec, Hi, Idx); NewVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, NewVecVT, NewVec, Hi, Idx);
// Convert the new vector to the old vector type. // Convert the new vector to the old vector type.
return DAG.getNode(ISD::BIT_CONVERT, dl, VecVT, NewVec); return DAG.getNode(ISD::BITCAST, dl, VecVT, NewVec);
} }
SDValue DAGTypeLegalizer::ExpandOp_SCALAR_TO_VECTOR(SDNode *N) { SDValue DAGTypeLegalizer::ExpandOp_SCALAR_TO_VECTOR(SDNode *N) {

View File

@ -241,14 +241,14 @@ SDValue VectorLegalizer::PromoteVectorOp(SDValue Op) {
for (unsigned j = 0; j != Op.getNumOperands(); ++j) { for (unsigned j = 0; j != Op.getNumOperands(); ++j) {
if (Op.getOperand(j).getValueType().isVector()) if (Op.getOperand(j).getValueType().isVector())
Operands[j] = DAG.getNode(ISD::BIT_CONVERT, dl, NVT, Op.getOperand(j)); Operands[j] = DAG.getNode(ISD::BITCAST, dl, NVT, Op.getOperand(j));
else else
Operands[j] = Op.getOperand(j); Operands[j] = Op.getOperand(j);
} }
Op = DAG.getNode(Op.getOpcode(), dl, NVT, &Operands[0], Operands.size()); Op = DAG.getNode(Op.getOpcode(), dl, NVT, &Operands[0], Operands.size());
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Op); return DAG.getNode(ISD::BITCAST, dl, VT, Op);
} }
SDValue VectorLegalizer::ExpandFNEG(SDValue Op) { SDValue VectorLegalizer::ExpandFNEG(SDValue Op) {

View File

@ -46,7 +46,7 @@ void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) {
#endif #endif
llvm_unreachable("Do not know how to scalarize the result of this operator!"); llvm_unreachable("Do not know how to scalarize the result of this operator!");
case ISD::BIT_CONVERT: R = ScalarizeVecRes_BIT_CONVERT(N); break; case ISD::BITCAST: R = ScalarizeVecRes_BITCAST(N); break;
case ISD::BUILD_VECTOR: R = N->getOperand(0); break; case ISD::BUILD_VECTOR: R = N->getOperand(0); break;
case ISD::CONVERT_RNDSAT: R = ScalarizeVecRes_CONVERT_RNDSAT(N); break; case ISD::CONVERT_RNDSAT: R = ScalarizeVecRes_CONVERT_RNDSAT(N); break;
case ISD::EXTRACT_SUBVECTOR: R = ScalarizeVecRes_EXTRACT_SUBVECTOR(N); break; case ISD::EXTRACT_SUBVECTOR: R = ScalarizeVecRes_EXTRACT_SUBVECTOR(N); break;
@ -122,9 +122,9 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_BinOp(SDNode *N) {
LHS.getValueType(), LHS, RHS); LHS.getValueType(), LHS, RHS);
} }
SDValue DAGTypeLegalizer::ScalarizeVecRes_BIT_CONVERT(SDNode *N) { SDValue DAGTypeLegalizer::ScalarizeVecRes_BITCAST(SDNode *N) {
EVT NewVT = N->getValueType(0).getVectorElementType(); EVT NewVT = N->getValueType(0).getVectorElementType();
return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), return DAG.getNode(ISD::BITCAST, N->getDebugLoc(),
NewVT, N->getOperand(0)); NewVT, N->getOperand(0));
} }
@ -296,8 +296,8 @@ bool DAGTypeLegalizer::ScalarizeVectorOperand(SDNode *N, unsigned OpNo) {
dbgs() << "\n"; dbgs() << "\n";
#endif #endif
llvm_unreachable("Do not know how to scalarize this operator's operand!"); llvm_unreachable("Do not know how to scalarize this operator's operand!");
case ISD::BIT_CONVERT: case ISD::BITCAST:
Res = ScalarizeVecOp_BIT_CONVERT(N); Res = ScalarizeVecOp_BITCAST(N);
break; break;
case ISD::CONCAT_VECTORS: case ISD::CONCAT_VECTORS:
Res = ScalarizeVecOp_CONCAT_VECTORS(N); Res = ScalarizeVecOp_CONCAT_VECTORS(N);
@ -326,11 +326,11 @@ bool DAGTypeLegalizer::ScalarizeVectorOperand(SDNode *N, unsigned OpNo) {
return false; return false;
} }
/// ScalarizeVecOp_BIT_CONVERT - If the value to convert is a vector that needs /// ScalarizeVecOp_BITCAST - If the value to convert is a vector that needs
/// to be scalarized, it must be <1 x ty>. Convert the element instead. /// to be scalarized, it must be <1 x ty>. Convert the element instead.
SDValue DAGTypeLegalizer::ScalarizeVecOp_BIT_CONVERT(SDNode *N) { SDValue DAGTypeLegalizer::ScalarizeVecOp_BITCAST(SDNode *N) {
SDValue Elt = GetScalarizedVector(N->getOperand(0)); SDValue Elt = GetScalarizedVector(N->getOperand(0));
return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), return DAG.getNode(ISD::BITCAST, N->getDebugLoc(),
N->getValueType(0), Elt); N->getValueType(0), Elt);
} }
@ -406,7 +406,7 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
case ISD::SELECT_CC: SplitRes_SELECT_CC(N, Lo, Hi); break; case ISD::SELECT_CC: SplitRes_SELECT_CC(N, Lo, Hi); break;
case ISD::UNDEF: SplitRes_UNDEF(N, Lo, Hi); break; case ISD::UNDEF: SplitRes_UNDEF(N, Lo, Hi); break;
case ISD::BIT_CONVERT: SplitVecRes_BIT_CONVERT(N, Lo, Hi); break; case ISD::BITCAST: SplitVecRes_BITCAST(N, Lo, Hi); break;
case ISD::BUILD_VECTOR: SplitVecRes_BUILD_VECTOR(N, Lo, Hi); break; case ISD::BUILD_VECTOR: SplitVecRes_BUILD_VECTOR(N, Lo, Hi); break;
case ISD::CONCAT_VECTORS: SplitVecRes_CONCAT_VECTORS(N, Lo, Hi); break; case ISD::CONCAT_VECTORS: SplitVecRes_CONCAT_VECTORS(N, Lo, Hi); break;
case ISD::CONVERT_RNDSAT: SplitVecRes_CONVERT_RNDSAT(N, Lo, Hi); break; case ISD::CONVERT_RNDSAT: SplitVecRes_CONVERT_RNDSAT(N, Lo, Hi); break;
@ -496,8 +496,8 @@ void DAGTypeLegalizer::SplitVecRes_BinOp(SDNode *N, SDValue &Lo,
Hi = DAG.getNode(N->getOpcode(), dl, LHSHi.getValueType(), LHSHi, RHSHi); Hi = DAG.getNode(N->getOpcode(), dl, LHSHi.getValueType(), LHSHi, RHSHi);
} }
void DAGTypeLegalizer::SplitVecRes_BIT_CONVERT(SDNode *N, SDValue &Lo, void DAGTypeLegalizer::SplitVecRes_BITCAST(SDNode *N, SDValue &Lo,
SDValue &Hi) { SDValue &Hi) {
// We know the result is a vector. The input may be either a vector or a // We know the result is a vector. The input may be either a vector or a
// scalar value. // scalar value.
EVT LoVT, HiVT; EVT LoVT, HiVT;
@ -525,8 +525,8 @@ void DAGTypeLegalizer::SplitVecRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
GetExpandedOp(InOp, Lo, Hi); GetExpandedOp(InOp, Lo, Hi);
if (TLI.isBigEndian()) if (TLI.isBigEndian())
std::swap(Lo, Hi); std::swap(Lo, Hi);
Lo = DAG.getNode(ISD::BIT_CONVERT, dl, LoVT, Lo); Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, Lo);
Hi = DAG.getNode(ISD::BIT_CONVERT, dl, HiVT, Hi); Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi);
return; return;
} }
break; break;
@ -534,8 +534,8 @@ void DAGTypeLegalizer::SplitVecRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
// If the input is a vector that needs to be split, convert each split // If the input is a vector that needs to be split, convert each split
// piece of the input now. // piece of the input now.
GetSplitVector(InOp, Lo, Hi); GetSplitVector(InOp, Lo, Hi);
Lo = DAG.getNode(ISD::BIT_CONVERT, dl, LoVT, Lo); Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, Lo);
Hi = DAG.getNode(ISD::BIT_CONVERT, dl, HiVT, Hi); Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi);
return; return;
} }
@ -549,8 +549,8 @@ void DAGTypeLegalizer::SplitVecRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
if (TLI.isBigEndian()) if (TLI.isBigEndian())
std::swap(Lo, Hi); std::swap(Lo, Hi);
Lo = DAG.getNode(ISD::BIT_CONVERT, dl, LoVT, Lo); Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, Lo);
Hi = DAG.getNode(ISD::BIT_CONVERT, dl, HiVT, Hi); Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi);
} }
void DAGTypeLegalizer::SplitVecRes_BUILD_VECTOR(SDNode *N, SDValue &Lo, void DAGTypeLegalizer::SplitVecRes_BUILD_VECTOR(SDNode *N, SDValue &Lo,
@ -978,7 +978,7 @@ bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) {
#endif #endif
llvm_unreachable("Do not know how to split this operator's operand!"); llvm_unreachable("Do not know how to split this operator's operand!");
case ISD::BIT_CONVERT: Res = SplitVecOp_BIT_CONVERT(N); break; case ISD::BITCAST: Res = SplitVecOp_BITCAST(N); break;
case ISD::EXTRACT_SUBVECTOR: Res = SplitVecOp_EXTRACT_SUBVECTOR(N); break; case ISD::EXTRACT_SUBVECTOR: Res = SplitVecOp_EXTRACT_SUBVECTOR(N); break;
case ISD::EXTRACT_VECTOR_ELT:Res = SplitVecOp_EXTRACT_VECTOR_ELT(N); break; case ISD::EXTRACT_VECTOR_ELT:Res = SplitVecOp_EXTRACT_VECTOR_ELT(N); break;
case ISD::CONCAT_VECTORS: Res = SplitVecOp_CONCAT_VECTORS(N); break; case ISD::CONCAT_VECTORS: Res = SplitVecOp_CONCAT_VECTORS(N); break;
@ -1034,8 +1034,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_UnaryOp(SDNode *N) {
return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi); return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
} }
SDValue DAGTypeLegalizer::SplitVecOp_BIT_CONVERT(SDNode *N) { SDValue DAGTypeLegalizer::SplitVecOp_BITCAST(SDNode *N) {
// For example, i64 = BIT_CONVERT v4i16 on alpha. Typically the vector will // For example, i64 = BITCAST v4i16 on alpha. Typically the vector will
// end up being split all the way down to individual components. Convert the // end up being split all the way down to individual components. Convert the
// split pieces into integers and reassemble. // split pieces into integers and reassemble.
SDValue Lo, Hi; SDValue Lo, Hi;
@ -1046,7 +1046,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_BIT_CONVERT(SDNode *N) {
if (TLI.isBigEndian()) if (TLI.isBigEndian())
std::swap(Lo, Hi); std::swap(Lo, Hi);
return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), N->getValueType(0), return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), N->getValueType(0),
JoinIntegers(Lo, Hi)); JoinIntegers(Lo, Hi));
} }
@ -1151,7 +1151,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo) {
SDValue DAGTypeLegalizer::SplitVecOp_CONCAT_VECTORS(SDNode *N) { SDValue DAGTypeLegalizer::SplitVecOp_CONCAT_VECTORS(SDNode *N) {
DebugLoc DL = N->getDebugLoc(); DebugLoc DL = N->getDebugLoc();
// The input operands all must have the same type, and we know the result the // The input operands all must have the same type, and we know the result the
// result type is valid. Convert this to a buildvector which extracts all the // result type is valid. Convert this to a buildvector which extracts all the
// input elements. // input elements.
@ -1168,7 +1168,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_CONCAT_VECTORS(SDNode *N) {
} }
} }
return DAG.getNode(ISD::BUILD_VECTOR, DL, N->getValueType(0), return DAG.getNode(ISD::BUILD_VECTOR, DL, N->getValueType(0),
&Elts[0], Elts.size()); &Elts[0], Elts.size());
} }
@ -1197,7 +1197,7 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
#endif #endif
llvm_unreachable("Do not know how to widen the result of this operator!"); llvm_unreachable("Do not know how to widen the result of this operator!");
case ISD::BIT_CONVERT: Res = WidenVecRes_BIT_CONVERT(N); break; case ISD::BITCAST: Res = WidenVecRes_BITCAST(N); break;
case ISD::BUILD_VECTOR: Res = WidenVecRes_BUILD_VECTOR(N); break; case ISD::BUILD_VECTOR: Res = WidenVecRes_BUILD_VECTOR(N); break;
case ISD::CONCAT_VECTORS: Res = WidenVecRes_CONCAT_VECTORS(N); break; case ISD::CONCAT_VECTORS: Res = WidenVecRes_CONCAT_VECTORS(N); break;
case ISD::CONVERT_RNDSAT: Res = WidenVecRes_CONVERT_RNDSAT(N); break; case ISD::CONVERT_RNDSAT: Res = WidenVecRes_CONVERT_RNDSAT(N); break;
@ -1304,11 +1304,11 @@ SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) {
SDValue InOp2 = GetWidenedVector(N->getOperand(1)); SDValue InOp2 = GetWidenedVector(N->getOperand(1));
return DAG.getNode(N->getOpcode(), dl, WidenVT, InOp1, InOp2); return DAG.getNode(N->getOpcode(), dl, WidenVT, InOp1, InOp2);
} }
// No legal vector version so unroll the vector operation and then widen. // No legal vector version so unroll the vector operation and then widen.
if (NumElts == 1) if (NumElts == 1)
return DAG.UnrollVectorOp(N, WidenVT.getVectorNumElements()); return DAG.UnrollVectorOp(N, WidenVT.getVectorNumElements());
// Since the operation can trap, apply operation on the original vector. // Since the operation can trap, apply operation on the original vector.
EVT MaxVT = VT; EVT MaxVT = VT;
SDValue InOp1 = GetWidenedVector(N->getOperand(0)); SDValue InOp1 = GetWidenedVector(N->getOperand(0));
@ -1341,9 +1341,9 @@ SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) {
if (NumElts == 1) { if (NumElts == 1) {
for (unsigned i = 0; i != CurNumElts; ++i, ++Idx) { for (unsigned i = 0; i != CurNumElts; ++i, ++Idx) {
SDValue EOp1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, WidenEltVT, SDValue EOp1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, WidenEltVT,
InOp1, DAG.getIntPtrConstant(Idx)); InOp1, DAG.getIntPtrConstant(Idx));
SDValue EOp2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, WidenEltVT, SDValue EOp2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, WidenEltVT,
InOp2, DAG.getIntPtrConstant(Idx)); InOp2, DAG.getIntPtrConstant(Idx));
ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, WidenEltVT, ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, WidenEltVT,
EOp1, EOp2); EOp1, EOp2);
@ -1411,7 +1411,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) {
if (VT == WidenVT) if (VT == WidenVT)
return ConcatOps[0]; return ConcatOps[0];
} }
// add undefs of size MaxVT until ConcatOps grows to length of WidenVT // add undefs of size MaxVT until ConcatOps grows to length of WidenVT
unsigned NumOps = WidenVT.getVectorNumElements()/MaxVT.getVectorNumElements(); unsigned NumOps = WidenVT.getVectorNumElements()/MaxVT.getVectorNumElements();
if (NumOps != ConcatEnd ) { if (NumOps != ConcatEnd ) {
@ -1532,7 +1532,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_InregOp(SDNode *N) {
WidenVT, WidenLHS, DAG.getValueType(ExtVT)); WidenVT, WidenLHS, DAG.getValueType(ExtVT));
} }
SDValue DAGTypeLegalizer::WidenVecRes_BIT_CONVERT(SDNode *N) { SDValue DAGTypeLegalizer::WidenVecRes_BITCAST(SDNode *N) {
SDValue InOp = N->getOperand(0); SDValue InOp = N->getOperand(0);
EVT InVT = InOp.getValueType(); EVT InVT = InOp.getValueType();
EVT VT = N->getValueType(0); EVT VT = N->getValueType(0);
@ -1551,7 +1551,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_BIT_CONVERT(SDNode *N) {
InOp = GetPromotedInteger(InOp); InOp = GetPromotedInteger(InOp);
InVT = InOp.getValueType(); InVT = InOp.getValueType();
if (WidenVT.bitsEq(InVT)) if (WidenVT.bitsEq(InVT))
return DAG.getNode(ISD::BIT_CONVERT, dl, WidenVT, InOp); return DAG.getNode(ISD::BITCAST, dl, WidenVT, InOp);
break; break;
case SoftenFloat: case SoftenFloat:
case ExpandInteger: case ExpandInteger:
@ -1566,7 +1566,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_BIT_CONVERT(SDNode *N) {
InVT = InOp.getValueType(); InVT = InOp.getValueType();
if (WidenVT.bitsEq(InVT)) if (WidenVT.bitsEq(InVT))
// The input widens to the same size. Convert to the widen value. // The input widens to the same size. Convert to the widen value.
return DAG.getNode(ISD::BIT_CONVERT, dl, WidenVT, InOp); return DAG.getNode(ISD::BITCAST, dl, WidenVT, InOp);
break; break;
} }
@ -1606,7 +1606,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_BIT_CONVERT(SDNode *N) {
else else
NewVec = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVec = DAG.getNode(ISD::BUILD_VECTOR, dl,
NewInVT, &Ops[0], NewNumElts); NewInVT, &Ops[0], NewNumElts);
return DAG.getNode(ISD::BIT_CONVERT, dl, WidenVT, NewVec); return DAG.getNode(ISD::BITCAST, dl, WidenVT, NewVec);
} }
} }
@ -1982,7 +1982,7 @@ bool DAGTypeLegalizer::WidenVectorOperand(SDNode *N, unsigned ResNo) {
#endif #endif
llvm_unreachable("Do not know how to widen this operator's operand!"); llvm_unreachable("Do not know how to widen this operator's operand!");
case ISD::BIT_CONVERT: Res = WidenVecOp_BIT_CONVERT(N); break; case ISD::BITCAST: Res = WidenVecOp_BITCAST(N); break;
case ISD::CONCAT_VECTORS: Res = WidenVecOp_CONCAT_VECTORS(N); break; case ISD::CONCAT_VECTORS: Res = WidenVecOp_CONCAT_VECTORS(N); break;
case ISD::EXTRACT_SUBVECTOR: Res = WidenVecOp_EXTRACT_SUBVECTOR(N); break; case ISD::EXTRACT_SUBVECTOR: Res = WidenVecOp_EXTRACT_SUBVECTOR(N); break;
case ISD::EXTRACT_VECTOR_ELT: Res = WidenVecOp_EXTRACT_VECTOR_ELT(N); break; case ISD::EXTRACT_VECTOR_ELT: Res = WidenVecOp_EXTRACT_VECTOR_ELT(N); break;
@ -2041,7 +2041,7 @@ SDValue DAGTypeLegalizer::WidenVecOp_Convert(SDNode *N) {
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], NumElts); return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], NumElts);
} }
SDValue DAGTypeLegalizer::WidenVecOp_BIT_CONVERT(SDNode *N) { SDValue DAGTypeLegalizer::WidenVecOp_BITCAST(SDNode *N) {
EVT VT = N->getValueType(0); EVT VT = N->getValueType(0);
SDValue InOp = GetWidenedVector(N->getOperand(0)); SDValue InOp = GetWidenedVector(N->getOperand(0));
EVT InWidenVT = InOp.getValueType(); EVT InWidenVT = InOp.getValueType();
@ -2055,7 +2055,7 @@ SDValue DAGTypeLegalizer::WidenVecOp_BIT_CONVERT(SDNode *N) {
unsigned NewNumElts = InWidenSize / Size; unsigned NewNumElts = InWidenSize / Size;
EVT NewVT = EVT::getVectorVT(*DAG.getContext(), VT, NewNumElts); EVT NewVT = EVT::getVectorVT(*DAG.getContext(), VT, NewNumElts);
if (TLI.isTypeLegal(NewVT)) { if (TLI.isTypeLegal(NewVT)) {
SDValue BitOp = DAG.getNode(ISD::BIT_CONVERT, dl, NewVT, InOp); SDValue BitOp = DAG.getNode(ISD::BITCAST, dl, NewVT, InOp);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, BitOp, return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, BitOp,
DAG.getIntPtrConstant(0)); DAG.getIntPtrConstant(0));
} }
@ -2144,7 +2144,7 @@ static EVT FindMemType(SelectionDAG& DAG, const TargetLowering &TLI,
if (Width == WidenEltWidth) if (Width == WidenEltWidth)
return RetVT; return RetVT;
// See if there is larger legal integer than the element type to load/store // See if there is larger legal integer than the element type to load/store
unsigned VT; unsigned VT;
for (VT = (unsigned)MVT::LAST_INTEGER_VALUETYPE; for (VT = (unsigned)MVT::LAST_INTEGER_VALUETYPE;
VT >= (unsigned)MVT::FIRST_INTEGER_VALUETYPE; --VT) { VT >= (unsigned)MVT::FIRST_INTEGER_VALUETYPE; --VT) {
@ -2199,7 +2199,7 @@ static SDValue BuildVectorFromScalar(SelectionDAG& DAG, EVT VecTy,
if (NewLdTy != LdTy) { if (NewLdTy != LdTy) {
NumElts = Width / NewLdTy.getSizeInBits(); NumElts = Width / NewLdTy.getSizeInBits();
NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewLdTy, NumElts); NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewLdTy, NumElts);
VecOp = DAG.getNode(ISD::BIT_CONVERT, dl, NewVecVT, VecOp); VecOp = DAG.getNode(ISD::BITCAST, dl, NewVecVT, VecOp);
// Readjust position and vector position based on new load type // Readjust position and vector position based on new load type
Idx = Idx * LdTy.getSizeInBits() / NewLdTy.getSizeInBits(); Idx = Idx * LdTy.getSizeInBits() / NewLdTy.getSizeInBits();
LdTy = NewLdTy; LdTy = NewLdTy;
@ -2207,7 +2207,7 @@ static SDValue BuildVectorFromScalar(SelectionDAG& DAG, EVT VecTy,
VecOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, NewVecVT, VecOp, LdOps[i], VecOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, NewVecVT, VecOp, LdOps[i],
DAG.getIntPtrConstant(Idx++)); DAG.getIntPtrConstant(Idx++));
} }
return DAG.getNode(ISD::BIT_CONVERT, dl, VecTy, VecOp); return DAG.getNode(ISD::BITCAST, dl, VecTy, VecOp);
} }
SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVector<SDValue, 16> &LdChain, SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVector<SDValue, 16> &LdChain,
@ -2247,7 +2247,7 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVector<SDValue, 16> &LdChain,
unsigned NumElts = WidenWidth / NewVTWidth; unsigned NumElts = WidenWidth / NewVTWidth;
EVT NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewVT, NumElts); EVT NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewVT, NumElts);
SDValue VecOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, NewVecVT, LdOp); SDValue VecOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, NewVecVT, LdOp);
return DAG.getNode(ISD::BIT_CONVERT, dl, WidenVT, VecOp); return DAG.getNode(ISD::BITCAST, dl, WidenVT, VecOp);
} }
if (NewVT == WidenVT) if (NewVT == WidenVT)
return LdOp; return LdOp;
@ -2297,7 +2297,7 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVector<SDValue, 16> &LdChain,
if (!LdOps[0].getValueType().isVector()) if (!LdOps[0].getValueType().isVector())
// All the loads are scalar loads. // All the loads are scalar loads.
return BuildVectorFromScalar(DAG, WidenVT, LdOps, 0, End); return BuildVectorFromScalar(DAG, WidenVT, LdOps, 0, End);
// If the load contains vectors, build the vector using concat vector. // If the load contains vectors, build the vector using concat vector.
// All of the vectors used to loads are power of 2 and the scalars load // All of the vectors used to loads are power of 2 and the scalars load
// can be combined to make a power of 2 vector. // can be combined to make a power of 2 vector.
@ -2441,7 +2441,7 @@ void DAGTypeLegalizer::GenWidenVectorStores(SmallVector<SDValue, 16>& StChain,
// Cast the vector to the scalar type we can store // Cast the vector to the scalar type we can store
unsigned NumElts = ValWidth / NewVTWidth; unsigned NumElts = ValWidth / NewVTWidth;
EVT NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewVT, NumElts); EVT NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewVT, NumElts);
SDValue VecOp = DAG.getNode(ISD::BIT_CONVERT, dl, NewVecVT, ValOp); SDValue VecOp = DAG.getNode(ISD::BITCAST, dl, NewVecVT, ValOp);
// Readjust index position based on new vector type // Readjust index position based on new vector type
Idx = Idx * ValEltWidth / NewVTWidth; Idx = Idx * ValEltWidth / NewVTWidth;
do { do {
@ -2474,7 +2474,7 @@ DAGTypeLegalizer::GenWidenVectorTruncStores(SmallVector<SDValue, 16>& StChain,
bool isNonTemporal = ST->isNonTemporal(); bool isNonTemporal = ST->isNonTemporal();
SDValue ValOp = GetWidenedVector(ST->getValue()); SDValue ValOp = GetWidenedVector(ST->getValue());
DebugLoc dl = ST->getDebugLoc(); DebugLoc dl = ST->getDebugLoc();
EVT StVT = ST->getMemoryVT(); EVT StVT = ST->getMemoryVT();
EVT ValVT = ValOp.getValueType(); EVT ValVT = ValOp.getValueType();

View File

@ -111,7 +111,7 @@ bool ConstantFPSDNode::isValueValidForType(EVT VT,
/// BUILD_VECTOR where all of the elements are ~0 or undef. /// BUILD_VECTOR where all of the elements are ~0 or undef.
bool ISD::isBuildVectorAllOnes(const SDNode *N) { bool ISD::isBuildVectorAllOnes(const SDNode *N) {
// Look through a bit convert. // Look through a bit convert.
if (N->getOpcode() == ISD::BIT_CONVERT) if (N->getOpcode() == ISD::BITCAST)
N = N->getOperand(0).getNode(); N = N->getOperand(0).getNode();
if (N->getOpcode() != ISD::BUILD_VECTOR) return false; if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
@ -152,7 +152,7 @@ bool ISD::isBuildVectorAllOnes(const SDNode *N) {
/// BUILD_VECTOR where all of the elements are 0 or undef. /// BUILD_VECTOR where all of the elements are 0 or undef.
bool ISD::isBuildVectorAllZeros(const SDNode *N) { bool ISD::isBuildVectorAllZeros(const SDNode *N) {
// Look through a bit convert. // Look through a bit convert.
if (N->getOpcode() == ISD::BIT_CONVERT) if (N->getOpcode() == ISD::BITCAST)
N = N->getOperand(0).getNode(); N = N->getOperand(0).getNode();
if (N->getOpcode() != ISD::BUILD_VECTOR) return false; if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
@ -1356,7 +1356,7 @@ SDValue SelectionDAG::getEHLabel(DebugLoc dl, SDValue Root, MCSymbol *Label) {
void *IP = 0; void *IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0); return SDValue(E, 0);
SDNode *N = new (NodeAllocator) EHLabelSDNode(dl, Root, Label); SDNode *N = new (NodeAllocator) EHLabelSDNode(dl, Root, Label);
CSEMap.InsertNode(N, IP); CSEMap.InsertNode(N, IP);
AllNodes.push_back(N); AllNodes.push_back(N);
@ -1406,11 +1406,11 @@ SDValue SelectionDAG::getMDNode(const MDNode *MD) {
FoldingSetNodeID ID; FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), 0, 0); AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), 0, 0);
ID.AddPointer(MD); ID.AddPointer(MD);
void *IP = 0; void *IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
return SDValue(E, 0); return SDValue(E, 0);
SDNode *N = new (NodeAllocator) MDNodeSDNode(MD); SDNode *N = new (NodeAllocator) MDNodeSDNode(MD);
CSEMap.InsertNode(N, IP); CSEMap.InsertNode(N, IP);
AllNodes.push_back(N); AllNodes.push_back(N);
@ -2365,7 +2365,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
APFloat::rmNearestTiesToEven); APFloat::rmNearestTiesToEven);
return getConstantFP(apf, VT); return getConstantFP(apf, VT);
} }
case ISD::BIT_CONVERT: case ISD::BITCAST:
if (VT == MVT::f32 && C->getValueType(0) == MVT::i32) if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
return getConstantFP(Val.bitsToFloat(), VT); return getConstantFP(Val.bitsToFloat(), VT);
else if (VT == MVT::f64 && C->getValueType(0) == MVT::i64) else if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
@ -2416,7 +2416,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
APInt api(VT.getSizeInBits(), 2, x); APInt api(VT.getSizeInBits(), 2, x);
return getConstant(api, VT); return getConstant(api, VT);
} }
case ISD::BIT_CONVERT: case ISD::BITCAST:
if (VT == MVT::i32 && C->getValueType(0) == MVT::f32) if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), VT); return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), VT);
else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64) else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
@ -2518,13 +2518,13 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
return Operand.getNode()->getOperand(0); return Operand.getNode()->getOperand(0);
} }
break; break;
case ISD::BIT_CONVERT: case ISD::BITCAST:
// Basic sanity checking. // Basic sanity checking.
assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits() assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits()
&& "Cannot BIT_CONVERT between types of different sizes!"); && "Cannot BITCAST between types of different sizes!");
if (VT == Operand.getValueType()) return Operand; // noop conversion. if (VT == Operand.getValueType()) return Operand; // noop conversion.
if (OpOpcode == ISD::BIT_CONVERT) // bitconv(bitconv(x)) -> bitconv(x) if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
return getNode(ISD::BIT_CONVERT, DL, VT, Operand.getOperand(0)); return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
if (OpOpcode == ISD::UNDEF) if (OpOpcode == ISD::UNDEF)
return getUNDEF(VT); return getUNDEF(VT);
break; break;
@ -3060,7 +3060,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
case ISD::VECTOR_SHUFFLE: case ISD::VECTOR_SHUFFLE:
llvm_unreachable("should use getVectorShuffle constructor!"); llvm_unreachable("should use getVectorShuffle constructor!");
break; break;
case ISD::BIT_CONVERT: case ISD::BITCAST:
// Fold bit_convert nodes from a type to themselves. // Fold bit_convert nodes from a type to themselves.
if (N1.getValueType() == VT) if (N1.getValueType() == VT)
return N1; return N1;
@ -3177,7 +3177,7 @@ static SDValue getMemsetStringVal(EVT VT, DebugLoc dl, SelectionDAG &DAG,
else if (VT.isVector()) { else if (VT.isVector()) {
unsigned NumElts = VT.getVectorNumElements(); unsigned NumElts = VT.getVectorNumElements();
MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64; MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, return DAG.getNode(ISD::BITCAST, dl, VT,
DAG.getConstant(0, EVT::getVectorVT(*DAG.getContext(), DAG.getConstant(0, EVT::getVectorVT(*DAG.getContext(),
EltVT, NumElts))); EltVT, NumElts)));
} else } else
@ -3274,7 +3274,7 @@ static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
if (VT.bitsGT(LVT)) if (VT.bitsGT(LVT))
VT = LVT; VT = LVT;
} }
// If we're optimizing for size, and there is a limit, bump the maximum number // If we're optimizing for size, and there is a limit, bump the maximum number
// of operations inserted down to 4. This is a wild guess that approximates // of operations inserted down to 4. This is a wild guess that approximates
// the size of a call to memcpy or memset (3 arguments + call). // the size of a call to memcpy or memset (3 arguments + call).
@ -3340,7 +3340,7 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
bool CopyFromStr = isMemSrcFromString(Src, Str); bool CopyFromStr = isMemSrcFromString(Src, Str);
bool isZeroStr = CopyFromStr && Str.empty(); bool isZeroStr = CopyFromStr && Str.empty();
unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(); unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy();
if (!FindOptimalMemOpLowering(MemOps, Limit, Size, if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
(DstAlignCanChange ? 0 : Align), (DstAlignCanChange ? 0 : Align),
(isZeroStr ? 0 : SrcAlign), (isZeroStr ? 0 : SrcAlign),
@ -3682,7 +3682,7 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
if (Result.getNode()) if (Result.getNode())
return Result; return Result;
// Emit a library call. // Emit a library call.
const Type *IntPtrTy = TLI.getTargetData()->getIntPtrType(*getContext()); const Type *IntPtrTy = TLI.getTargetData()->getIntPtrType(*getContext());
TargetLowering::ArgListTy Args; TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry; TargetLowering::ArgListEntry Entry;
@ -3912,7 +3912,7 @@ static MachinePointerInfo InferPointerInfo(SDValue Ptr, int64_t Offset = 0) {
!isa<ConstantSDNode>(Ptr.getOperand(1)) || !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
!isa<FrameIndexSDNode>(Ptr.getOperand(0))) !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
return MachinePointerInfo(); return MachinePointerInfo();
int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex(); int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
return MachinePointerInfo::getFixedStack(FI, Offset+ return MachinePointerInfo::getFixedStack(FI, Offset+
cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue()); cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
@ -3930,7 +3930,7 @@ static MachinePointerInfo InferPointerInfo(SDValue Ptr, SDValue OffsetOp) {
return InferPointerInfo(Ptr); return InferPointerInfo(Ptr);
return MachinePointerInfo(); return MachinePointerInfo();
} }
SDValue SDValue
SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
@ -3947,12 +3947,12 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
Flags |= MachineMemOperand::MOVolatile; Flags |= MachineMemOperand::MOVolatile;
if (isNonTemporal) if (isNonTemporal)
Flags |= MachineMemOperand::MONonTemporal; Flags |= MachineMemOperand::MONonTemporal;
// If we don't have a PtrInfo, infer the trivial frame index case to simplify // If we don't have a PtrInfo, infer the trivial frame index case to simplify
// clients. // clients.
if (PtrInfo.V == 0) if (PtrInfo.V == 0)
PtrInfo = InferPointerInfo(Ptr, Offset); PtrInfo = InferPointerInfo(Ptr, Offset);
MachineFunction &MF = getMachineFunction(); MachineFunction &MF = getMachineFunction();
MachineMemOperand *MMO = MachineMemOperand *MMO =
MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment, MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment,
@ -3961,7 +3961,7 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
} }
SDValue SDValue
SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType, SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
EVT VT, DebugLoc dl, SDValue Chain, EVT VT, DebugLoc dl, SDValue Chain,
SDValue Ptr, SDValue Offset, EVT MemVT, SDValue Ptr, SDValue Offset, EVT MemVT,
MachineMemOperand *MMO) { MachineMemOperand *MMO) {
@ -4052,7 +4052,7 @@ SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val,
Flags |= MachineMemOperand::MOVolatile; Flags |= MachineMemOperand::MOVolatile;
if (isNonTemporal) if (isNonTemporal)
Flags |= MachineMemOperand::MONonTemporal; Flags |= MachineMemOperand::MONonTemporal;
if (PtrInfo.V == 0) if (PtrInfo.V == 0)
PtrInfo = InferPointerInfo(Ptr); PtrInfo = InferPointerInfo(Ptr);
@ -4101,7 +4101,7 @@ SDValue SelectionDAG::getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val,
Flags |= MachineMemOperand::MOVolatile; Flags |= MachineMemOperand::MOVolatile;
if (isNonTemporal) if (isNonTemporal)
Flags |= MachineMemOperand::MONonTemporal; Flags |= MachineMemOperand::MONonTemporal;
if (PtrInfo.V == 0) if (PtrInfo.V == 0)
PtrInfo = InferPointerInfo(Ptr); PtrInfo = InferPointerInfo(Ptr);
@ -5431,7 +5431,7 @@ MemSDNode::MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs, EVT memvt,
} }
MemSDNode::MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs, MemSDNode::MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs,
const SDValue *Ops, unsigned NumOps, EVT memvt, const SDValue *Ops, unsigned NumOps, EVT memvt,
MachineMemOperand *mmo) MachineMemOperand *mmo)
: SDNode(Opc, dl, VTs, Ops, NumOps), : SDNode(Opc, dl, VTs, Ops, NumOps),
MemoryVT(memvt), MMO(mmo) { MemoryVT(memvt), MMO(mmo) {
@ -5450,7 +5450,7 @@ void SDNode::Profile(FoldingSetNodeID &ID) const {
namespace { namespace {
struct EVTArray { struct EVTArray {
std::vector<EVT> VTs; std::vector<EVT> VTs;
EVTArray() { EVTArray() {
VTs.reserve(MVT::LAST_VALUETYPE); VTs.reserve(MVT::LAST_VALUETYPE);
for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i) for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
@ -5542,8 +5542,8 @@ bool SDNode::isOperandOf(SDNode *N) const {
/// reachesChainWithoutSideEffects - Return true if this operand (which must /// reachesChainWithoutSideEffects - Return true if this operand (which must
/// be a chain) reaches the specified operand without crossing any /// be a chain) reaches the specified operand without crossing any
/// side-effecting instructions on any chain path. In practice, this looks /// side-effecting instructions on any chain path. In practice, this looks
/// through token factors and non-volatile loads. In order to remain efficient, /// through token factors and non-volatile loads. In order to remain efficient,
/// this only looks a couple of nodes in, it does not do an exhaustive search. /// this only looks a couple of nodes in, it does not do an exhaustive search.
bool SDValue::reachesChainWithoutSideEffects(SDValue Dest, bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
unsigned Depth) const { unsigned Depth) const {
@ -5788,7 +5788,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::UINT_TO_FP: return "uint_to_fp"; case ISD::UINT_TO_FP: return "uint_to_fp";
case ISD::FP_TO_SINT: return "fp_to_sint"; case ISD::FP_TO_SINT: return "fp_to_sint";
case ISD::FP_TO_UINT: return "fp_to_uint"; case ISD::FP_TO_UINT: return "fp_to_uint";
case ISD::BIT_CONVERT: return "bit_convert"; case ISD::BITCAST: return "bit_convert";
case ISD::FP16_TO_FP32: return "fp16_to_fp32"; case ISD::FP16_TO_FP32: return "fp16_to_fp32";
case ISD::FP32_TO_FP16: return "fp32_to_fp16"; case ISD::FP32_TO_FP16: return "fp32_to_fp16";
@ -6051,7 +6051,7 @@ void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
const char *AM = getIndexedModeName(ST->getAddressingMode()); const char *AM = getIndexedModeName(ST->getAddressingMode());
if (*AM) if (*AM)
OS << ", " << AM; OS << ", " << AM;
OS << ">"; OS << ">";
} else if (const MemSDNode* M = dyn_cast<MemSDNode>(this)) { } else if (const MemSDNode* M = dyn_cast<MemSDNode>(this)) {
OS << "<" << *M->getMemOperand() << ">"; OS << "<" << *M->getMemOperand() << ">";
@ -6102,7 +6102,7 @@ void SDNode::print(raw_ostream &OS, const SelectionDAG *G) const {
static void printrWithDepthHelper(raw_ostream &OS, const SDNode *N, static void printrWithDepthHelper(raw_ostream &OS, const SDNode *N,
const SelectionDAG *G, unsigned depth, const SelectionDAG *G, unsigned depth,
unsigned indent) unsigned indent)
{ {
if (depth == 0) if (depth == 0)
return; return;
@ -6123,7 +6123,7 @@ static void printrWithDepthHelper(raw_ostream &OS, const SDNode *N,
void SDNode::printrWithDepth(raw_ostream &OS, const SelectionDAG *G, void SDNode::printrWithDepth(raw_ostream &OS, const SelectionDAG *G,
unsigned depth) const { unsigned depth) const {
printrWithDepthHelper(OS, this, G, depth, 0); printrWithDepthHelper(OS, this, G, depth, 0);
} }
void SDNode::printrFull(raw_ostream &OS, const SelectionDAG *G) const { void SDNode::printrFull(raw_ostream &OS, const SelectionDAG *G) const {
// Don't print impossibly deep things. // Don't print impossibly deep things.
@ -6137,7 +6137,7 @@ void SDNode::dumprWithDepth(const SelectionDAG *G, unsigned depth) const {
void SDNode::dumprFull(const SelectionDAG *G) const { void SDNode::dumprFull(const SelectionDAG *G) const {
// Don't print impossibly deep things. // Don't print impossibly deep things.
dumprWithDepth(G, 100); dumprWithDepth(G, 100);
} }
static void DumpNodes(const SDNode *N, unsigned indent, const SelectionDAG *G) { static void DumpNodes(const SDNode *N, unsigned indent, const SelectionDAG *G) {
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
@ -6221,10 +6221,10 @@ SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
} }
/// isConsecutiveLoad - Return true if LD is loading 'Bytes' bytes from a /// isConsecutiveLoad - Return true if LD is loading 'Bytes' bytes from a
/// location that is 'Dist' units away from the location that the 'Base' load /// location that is 'Dist' units away from the location that the 'Base' load
/// is loading from. /// is loading from.
bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base, bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
unsigned Bytes, int Dist) const { unsigned Bytes, int Dist) const {
if (LD->getChain() != Base->getChain()) if (LD->getChain() != Base->getChain())
return false; return false;
@ -6477,7 +6477,7 @@ static void checkForCyclesHelper(const SDNode *N,
// If this node has already been checked, don't check it again. // If this node has already been checked, don't check it again.
if (Checked.count(N)) if (Checked.count(N))
return; return;
// If a node has already been visited on this depth-first walk, reject it as // If a node has already been visited on this depth-first walk, reject it as
// a cycle. // a cycle.
if (!Visited.insert(N)) { if (!Visited.insert(N)) {
@ -6486,10 +6486,10 @@ static void checkForCyclesHelper(const SDNode *N,
errs() << "Detected cycle in SelectionDAG\n"; errs() << "Detected cycle in SelectionDAG\n";
abort(); abort();
} }
for(unsigned i = 0, e = N->getNumOperands(); i != e; ++i) for(unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
checkForCyclesHelper(N->getOperand(i).getNode(), Visited, Checked); checkForCyclesHelper(N->getOperand(i).getNode(), Visited, Checked);
Checked.insert(N); Checked.insert(N);
Visited.erase(N); Visited.erase(N);
} }

View File

@ -131,8 +131,8 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc DL,
Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2, Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
RoundParts / 2, PartVT, HalfVT); RoundParts / 2, PartVT, HalfVT);
} else { } else {
Lo = DAG.getNode(ISD::BIT_CONVERT, DL, HalfVT, Parts[0]); Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
Hi = DAG.getNode(ISD::BIT_CONVERT, DL, HalfVT, Parts[1]); Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
} }
if (TLI.isBigEndian()) if (TLI.isBigEndian())
@ -164,8 +164,8 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc DL,
assert(ValueVT == EVT(MVT::ppcf128) && PartVT == EVT(MVT::f64) && assert(ValueVT == EVT(MVT::ppcf128) && PartVT == EVT(MVT::f64) &&
"Unexpected split"); "Unexpected split");
SDValue Lo, Hi; SDValue Lo, Hi;
Lo = DAG.getNode(ISD::BIT_CONVERT, DL, EVT(MVT::f64), Parts[0]); Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
Hi = DAG.getNode(ISD::BIT_CONVERT, DL, EVT(MVT::f64), Parts[1]); Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
if (TLI.isBigEndian()) if (TLI.isBigEndian())
std::swap(Lo, Hi); std::swap(Lo, Hi);
Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi); Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
@ -207,7 +207,7 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc DL,
} }
if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) if (PartVT.getSizeInBits() == ValueVT.getSizeInBits())
return DAG.getNode(ISD::BIT_CONVERT, DL, ValueVT, Val); return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
llvm_unreachable("Unknown mismatch!"); llvm_unreachable("Unknown mismatch!");
return SDValue(); return SDValue();
@ -284,7 +284,7 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, DebugLoc DL,
} }
// Vector/Vector bitcast. // Vector/Vector bitcast.
return DAG.getNode(ISD::BIT_CONVERT, DL, ValueVT, Val); return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
} }
assert(ValueVT.getVectorElementType() == PartVT && assert(ValueVT.getVectorElementType() == PartVT &&
@ -342,7 +342,7 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc DL,
} else if (PartBits == ValueVT.getSizeInBits()) { } else if (PartBits == ValueVT.getSizeInBits()) {
// Different types of the same size. // Different types of the same size.
assert(NumParts == 1 && PartVT != ValueVT); assert(NumParts == 1 && PartVT != ValueVT);
Val = DAG.getNode(ISD::BIT_CONVERT, DL, PartVT, Val); Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
} else if (NumParts * PartBits < ValueVT.getSizeInBits()) { } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
// If the parts cover less bits than value has, truncate the value. // If the parts cover less bits than value has, truncate the value.
assert(PartVT.isInteger() && ValueVT.isInteger() && assert(PartVT.isInteger() && ValueVT.isInteger() &&
@ -385,7 +385,7 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc DL,
// The number of parts is a power of 2. Repeatedly bisect the value using // The number of parts is a power of 2. Repeatedly bisect the value using
// EXTRACT_ELEMENT. // EXTRACT_ELEMENT.
Parts[0] = DAG.getNode(ISD::BIT_CONVERT, DL, Parts[0] = DAG.getNode(ISD::BITCAST, DL,
EVT::getIntegerVT(*DAG.getContext(), EVT::getIntegerVT(*DAG.getContext(),
ValueVT.getSizeInBits()), ValueVT.getSizeInBits()),
Val); Val);
@ -403,8 +403,8 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc DL,
ThisVT, Part0, DAG.getIntPtrConstant(0)); ThisVT, Part0, DAG.getIntPtrConstant(0));
if (ThisBits == PartBits && ThisVT != PartVT) { if (ThisBits == PartBits && ThisVT != PartVT) {
Part0 = DAG.getNode(ISD::BIT_CONVERT, DL, PartVT, Part0); Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
Part1 = DAG.getNode(ISD::BIT_CONVERT, DL, PartVT, Part1); Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
} }
} }
} }
@ -428,7 +428,7 @@ static void getCopyToPartsVector(SelectionDAG &DAG, DebugLoc DL,
// Nothing to do. // Nothing to do.
} else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) { } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
// Bitconvert vector->vector case. // Bitconvert vector->vector case.
Val = DAG.getNode(ISD::BIT_CONVERT, DL, PartVT, Val); Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
} else if (PartVT.isVector() && } else if (PartVT.isVector() &&
PartVT.getVectorElementType() == ValueVT.getVectorElementType()&& PartVT.getVectorElementType() == ValueVT.getVectorElementType()&&
PartVT.getVectorNumElements() > ValueVT.getVectorNumElements()) { PartVT.getVectorNumElements() > ValueVT.getVectorNumElements()) {
@ -2579,9 +2579,9 @@ void SelectionDAGBuilder::visitBitCast(const User &I) {
EVT DestVT = TLI.getValueType(I.getType()); EVT DestVT = TLI.getValueType(I.getType());
// BitCast assures us that source and destination are the same size so this is // BitCast assures us that source and destination are the same size so this is
// either a BIT_CONVERT or a no-op. // either a BITCAST or a no-op.
if (DestVT != N.getValueType()) if (DestVT != N.getValueType())
setValue(&I, DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(), setValue(&I, DAG.getNode(ISD::BITCAST, getCurDebugLoc(),
DestVT, N)); // convert types. DestVT, N)); // convert types.
else else
setValue(&I, N); // noop cast. setValue(&I, N); // noop cast.
@ -3021,7 +3021,7 @@ void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
// Do not serialize non-volatile loads against each other. // Do not serialize non-volatile loads against each other.
Root = DAG.getRoot(); Root = DAG.getRoot();
} }
SmallVector<SDValue, 4> Values(NumValues); SmallVector<SDValue, 4> Values(NumValues);
SmallVector<SDValue, 4> Chains(std::min(unsigned(MaxParallelChains), SmallVector<SDValue, 4> Chains(std::min(unsigned(MaxParallelChains),
NumValues)); NumValues));
@ -3198,7 +3198,7 @@ void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
if (!I.getType()->isVoidTy()) { if (!I.getType()->isVoidTy()) {
if (const VectorType *PTy = dyn_cast<VectorType>(I.getType())) { if (const VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
EVT VT = TLI.getValueType(PTy); EVT VT = TLI.getValueType(PTy);
Result = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(), VT, Result); Result = DAG.getNode(ISD::BITCAST, getCurDebugLoc(), VT, Result);
} }
setValue(&I, Result); setValue(&I, Result);
@ -3217,7 +3217,7 @@ GetSignificand(SelectionDAG &DAG, SDValue Op, DebugLoc dl) {
DAG.getConstant(0x007fffff, MVT::i32)); DAG.getConstant(0x007fffff, MVT::i32));
SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1, SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
DAG.getConstant(0x3f800000, MVT::i32)); DAG.getConstant(0x3f800000, MVT::i32));
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t2); return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
} }
/// GetExponent - Get the exponent: /// GetExponent - Get the exponent:
@ -3316,13 +3316,13 @@ SelectionDAGBuilder::visitExp(const CallInst &I) {
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
getF32Constant(DAG, 0x3f7f5e7e)); getF32Constant(DAG, 0x3f7f5e7e));
SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t5); SDValue TwoToFracPartOfX = DAG.getNode(ISD::BITCAST, dl,MVT::i32, t5);
// Add the exponent into the result in integer domain. // Add the exponent into the result in integer domain.
SDValue t6 = DAG.getNode(ISD::ADD, dl, MVT::i32, SDValue t6 = DAG.getNode(ISD::ADD, dl, MVT::i32,
TwoToFracPartOfX, IntegerPartOfX); TwoToFracPartOfX, IntegerPartOfX);
result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t6); result = DAG.getNode(ISD::BITCAST, dl, MVT::f32, t6);
} else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) { } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
// For floating-point precision of 12: // For floating-point precision of 12:
// //
@ -3342,13 +3342,13 @@ SelectionDAGBuilder::visitExp(const CallInst &I) {
SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6, SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
getF32Constant(DAG, 0x3f7ff8fd)); getF32Constant(DAG, 0x3f7ff8fd));
SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t7); SDValue TwoToFracPartOfX = DAG.getNode(ISD::BITCAST, dl,MVT::i32, t7);
// Add the exponent into the result in integer domain. // Add the exponent into the result in integer domain.
SDValue t8 = DAG.getNode(ISD::ADD, dl, MVT::i32, SDValue t8 = DAG.getNode(ISD::ADD, dl, MVT::i32,
TwoToFracPartOfX, IntegerPartOfX); TwoToFracPartOfX, IntegerPartOfX);
result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t8); result = DAG.getNode(ISD::BITCAST, dl, MVT::f32, t8);
} else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
// For floating-point precision of 18: // For floating-point precision of 18:
// //
@ -3380,14 +3380,14 @@ SelectionDAGBuilder::visitExp(const CallInst &I) {
SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X); SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12, SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
getF32Constant(DAG, 0x3f800000)); getF32Constant(DAG, 0x3f800000));
SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl, SDValue TwoToFracPartOfX = DAG.getNode(ISD::BITCAST, dl,
MVT::i32, t13); MVT::i32, t13);
// Add the exponent into the result in integer domain. // Add the exponent into the result in integer domain.
SDValue t14 = DAG.getNode(ISD::ADD, dl, MVT::i32, SDValue t14 = DAG.getNode(ISD::ADD, dl, MVT::i32,
TwoToFracPartOfX, IntegerPartOfX); TwoToFracPartOfX, IntegerPartOfX);
result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t14); result = DAG.getNode(ISD::BITCAST, dl, MVT::f32, t14);
} }
} else { } else {
// No special expansion. // No special expansion.
@ -3409,7 +3409,7 @@ SelectionDAGBuilder::visitLog(const CallInst &I) {
if (getValue(I.getArgOperand(0)).getValueType() == MVT::f32 && if (getValue(I.getArgOperand(0)).getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
SDValue Op = getValue(I.getArgOperand(0)); SDValue Op = getValue(I.getArgOperand(0));
SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op); SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
// Scale the exponent by log(2) [0.69314718f]. // Scale the exponent by log(2) [0.69314718f].
SDValue Exp = GetExponent(DAG, Op1, TLI, dl); SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
@ -3519,7 +3519,7 @@ SelectionDAGBuilder::visitLog2(const CallInst &I) {
if (getValue(I.getArgOperand(0)).getValueType() == MVT::f32 && if (getValue(I.getArgOperand(0)).getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
SDValue Op = getValue(I.getArgOperand(0)); SDValue Op = getValue(I.getArgOperand(0));
SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op); SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
// Get the exponent. // Get the exponent.
SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl); SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
@ -3628,7 +3628,7 @@ SelectionDAGBuilder::visitLog10(const CallInst &I) {
if (getValue(I.getArgOperand(0)).getValueType() == MVT::f32 && if (getValue(I.getArgOperand(0)).getValueType() == MVT::f32 &&
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
SDValue Op = getValue(I.getArgOperand(0)); SDValue Op = getValue(I.getArgOperand(0));
SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op); SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
// Scale the exponent by log10(2) [0.30102999f]. // Scale the exponent by log10(2) [0.30102999f].
SDValue Exp = GetExponent(DAG, Op1, TLI, dl); SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
@ -3756,11 +3756,11 @@ SelectionDAGBuilder::visitExp2(const CallInst &I) {
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
getF32Constant(DAG, 0x3f7f5e7e)); getF32Constant(DAG, 0x3f7f5e7e));
SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5); SDValue t6 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, t5);
SDValue TwoToFractionalPartOfX = SDValue TwoToFractionalPartOfX =
DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX); DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
result = DAG.getNode(ISD::BIT_CONVERT, dl, result = DAG.getNode(ISD::BITCAST, dl,
MVT::f32, TwoToFractionalPartOfX); MVT::f32, TwoToFractionalPartOfX);
} else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) { } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
// For floating-point precision of 12: // For floating-point precision of 12:
@ -3781,11 +3781,11 @@ SelectionDAGBuilder::visitExp2(const CallInst &I) {
SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6, SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
getF32Constant(DAG, 0x3f7ff8fd)); getF32Constant(DAG, 0x3f7ff8fd));
SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7); SDValue t8 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, t7);
SDValue TwoToFractionalPartOfX = SDValue TwoToFractionalPartOfX =
DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX); DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
result = DAG.getNode(ISD::BIT_CONVERT, dl, result = DAG.getNode(ISD::BITCAST, dl,
MVT::f32, TwoToFractionalPartOfX); MVT::f32, TwoToFractionalPartOfX);
} else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
// For floating-point precision of 18: // For floating-point precision of 18:
@ -3817,11 +3817,11 @@ SelectionDAGBuilder::visitExp2(const CallInst &I) {
SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X); SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12, SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
getF32Constant(DAG, 0x3f800000)); getF32Constant(DAG, 0x3f800000));
SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13); SDValue t14 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, t13);
SDValue TwoToFractionalPartOfX = SDValue TwoToFractionalPartOfX =
DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX); DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
result = DAG.getNode(ISD::BIT_CONVERT, dl, result = DAG.getNode(ISD::BITCAST, dl,
MVT::f32, TwoToFractionalPartOfX); MVT::f32, TwoToFractionalPartOfX);
} }
} else { } else {
@ -3889,11 +3889,11 @@ SelectionDAGBuilder::visitPow(const CallInst &I) {
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
getF32Constant(DAG, 0x3f7f5e7e)); getF32Constant(DAG, 0x3f7f5e7e));
SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5); SDValue t6 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, t5);
SDValue TwoToFractionalPartOfX = SDValue TwoToFractionalPartOfX =
DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX); DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
result = DAG.getNode(ISD::BIT_CONVERT, dl, result = DAG.getNode(ISD::BITCAST, dl,
MVT::f32, TwoToFractionalPartOfX); MVT::f32, TwoToFractionalPartOfX);
} else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) { } else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
// For floating-point precision of 12: // For floating-point precision of 12:
@ -3914,11 +3914,11 @@ SelectionDAGBuilder::visitPow(const CallInst &I) {
SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6, SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
getF32Constant(DAG, 0x3f7ff8fd)); getF32Constant(DAG, 0x3f7ff8fd));
SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7); SDValue t8 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, t7);
SDValue TwoToFractionalPartOfX = SDValue TwoToFractionalPartOfX =
DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX); DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
result = DAG.getNode(ISD::BIT_CONVERT, dl, result = DAG.getNode(ISD::BITCAST, dl,
MVT::f32, TwoToFractionalPartOfX); MVT::f32, TwoToFractionalPartOfX);
} else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18 } else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
// For floating-point precision of 18: // For floating-point precision of 18:
@ -3950,11 +3950,11 @@ SelectionDAGBuilder::visitPow(const CallInst &I) {
SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X); SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12, SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
getF32Constant(DAG, 0x3f800000)); getF32Constant(DAG, 0x3f800000));
SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13); SDValue t14 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, t13);
SDValue TwoToFractionalPartOfX = SDValue TwoToFractionalPartOfX =
DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX); DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
result = DAG.getNode(ISD::BIT_CONVERT, dl, result = DAG.getNode(ISD::BITCAST, dl,
MVT::f32, TwoToFractionalPartOfX); MVT::f32, TwoToFractionalPartOfX);
} }
} else { } else {
@ -4072,11 +4072,11 @@ SelectionDAGBuilder::EmitFuncArgumentDbgValue(const Value *V, MDNode *Variable,
if (VMI != FuncInfo.ValueMap.end()) if (VMI != FuncInfo.ValueMap.end())
Reg = VMI->second; Reg = VMI->second;
} }
if (!Reg && N.getNode()) { if (!Reg && N.getNode()) {
// Check if frame index is available. // Check if frame index is available.
if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(N.getNode())) if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(N.getNode()))
if (FrameIndexSDNode *FINode = if (FrameIndexSDNode *FINode =
dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode())) { dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode())) {
Reg = TRI->getFrameRegister(MF); Reg = TRI->getFrameRegister(MF);
Offset = FINode->getIndex(); Offset = FINode->getIndex();
@ -4476,7 +4476,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
ShOps[1] = DAG.getConstant(0, MVT::i32); ShOps[1] = DAG.getConstant(0, MVT::i32);
ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, ShAmtVT, &ShOps[0], 2); ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, ShAmtVT, &ShOps[0], 2);
EVT DestVT = TLI.getValueType(I.getType()); EVT DestVT = TLI.getValueType(I.getType());
ShAmt = DAG.getNode(ISD::BIT_CONVERT, dl, DestVT, ShAmt); ShAmt = DAG.getNode(ISD::BITCAST, dl, DestVT, ShAmt);
Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT, Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
DAG.getConstant(NewIntrinsic, MVT::i32), DAG.getConstant(NewIntrinsic, MVT::i32),
getValue(I.getArgOperand(0)), ShAmt); getValue(I.getArgOperand(0)), ShAmt);
@ -4713,7 +4713,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
Ops[3] = getValue(I.getArgOperand(2)); Ops[3] = getValue(I.getArgOperand(2));
DAG.setRoot(DAG.getMemIntrinsicNode(ISD::PREFETCH, dl, DAG.setRoot(DAG.getMemIntrinsicNode(ISD::PREFETCH, dl,
DAG.getVTList(MVT::Other), DAG.getVTList(MVT::Other),
&Ops[0], 4, &Ops[0], 4,
EVT::getIntegerVT(*Context, 8), EVT::getIntegerVT(*Context, 8),
MachinePointerInfo(I.getArgOperand(0)), MachinePointerInfo(I.getArgOperand(0)),
0, /* align */ 0, /* align */
@ -5119,7 +5119,7 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) {
!MMI.callsExternalVAFunctionWithFloatingPointArguments()) { !MMI.callsExternalVAFunctionWithFloatingPointArguments()) {
for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) { for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
const Type* T = I.getArgOperand(i)->getType(); const Type* T = I.getArgOperand(i)->getType();
for (po_iterator<const Type*> i = po_begin(T), e = po_end(T); for (po_iterator<const Type*> i = po_begin(T), e = po_end(T);
i != e; ++i) { i != e; ++i) {
if (!i->isFloatingPointTy()) continue; if (!i->isFloatingPointTy()) continue;
MMI.setCallsExternalVAFunctionWithFloatingPointArguments(true); MMI.setCallsExternalVAFunctionWithFloatingPointArguments(true);
@ -5419,7 +5419,7 @@ GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
// vector types). // vector types).
EVT RegVT = *PhysReg.second->vt_begin(); EVT RegVT = *PhysReg.second->vt_begin();
if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) { if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(), OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, getCurDebugLoc(),
RegVT, OpInfo.CallOperand); RegVT, OpInfo.CallOperand);
OpInfo.ConstraintVT = RegVT; OpInfo.ConstraintVT = RegVT;
} else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) { } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
@ -5429,7 +5429,7 @@ GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
// machine. // machine.
RegVT = EVT::getIntegerVT(Context, RegVT = EVT::getIntegerVT(Context,
OpInfo.ConstraintVT.getSizeInBits()); OpInfo.ConstraintVT.getSizeInBits());
OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(), OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, getCurDebugLoc(),
RegVT, OpInfo.CallOperand); RegVT, OpInfo.CallOperand);
OpInfo.ConstraintVT = RegVT; OpInfo.ConstraintVT = RegVT;
} }
@ -5945,7 +5945,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
// not have the same VT as was expected. Convert it to the right type // not have the same VT as was expected. Convert it to the right type
// with bit_convert. // with bit_convert.
if (ResultType != Val.getValueType() && Val.getValueType().isVector()) { if (ResultType != Val.getValueType() && Val.getValueType().isVector()) {
Val = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(), Val = DAG.getNode(ISD::BITCAST, getCurDebugLoc(),
ResultType, Val); ResultType, Val);
} else if (ResultType != Val.getValueType() && } else if (ResultType != Val.getValueType() &&

File diff suppressed because it is too large Load Diff

View File

@ -1519,7 +1519,7 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
break; break;
} }
case CCValAssign::BCvt: { case CCValAssign::BCvt: {
unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BIT_CONVERT, Arg, unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg,
/*TODO: Kill=*/false); /*TODO: Kill=*/false);
assert(BC != 0 && "Failed to emit a bitcast!"); assert(BC != 0 && "Failed to emit a bitcast!");
Arg = BC; Arg = BC;

View File

@ -238,7 +238,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setLibcallName(RTLIB::SRA_I128, 0); setLibcallName(RTLIB::SRA_I128, 0);
if (Subtarget->isAAPCS_ABI()) { if (Subtarget->isAAPCS_ABI()) {
// Double-precision floating-point arithmetic helper functions // Double-precision floating-point arithmetic helper functions
// RTABI chapter 4.1.2, Table 2 // RTABI chapter 4.1.2, Table 2
setLibcallName(RTLIB::ADD_F64, "__aeabi_dadd"); setLibcallName(RTLIB::ADD_F64, "__aeabi_dadd");
setLibcallName(RTLIB::DIV_F64, "__aeabi_ddiv"); setLibcallName(RTLIB::DIV_F64, "__aeabi_ddiv");
@ -338,7 +338,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setLibcallName(RTLIB::FPROUND_F64_F32, "__aeabi_d2f"); setLibcallName(RTLIB::FPROUND_F64_F32, "__aeabi_d2f");
setLibcallName(RTLIB::FPEXT_F32_F64, "__aeabi_f2d"); setLibcallName(RTLIB::FPEXT_F32_F64, "__aeabi_f2d");
setLibcallCallingConv(RTLIB::FPROUND_F64_F32, CallingConv::ARM_AAPCS); setLibcallCallingConv(RTLIB::FPROUND_F64_F32, CallingConv::ARM_AAPCS);
setLibcallCallingConv(RTLIB::FPEXT_F32_F64, CallingConv::ARM_AAPCS); setLibcallCallingConv(RTLIB::FPEXT_F32_F64, CallingConv::ARM_AAPCS);
// Integer to floating-point conversions. // Integer to floating-point conversions.
// RTABI chapter 4.1.2, Table 8 // RTABI chapter 4.1.2, Table 8
@ -387,7 +387,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
setLibcallCallingConv(RTLIB::SDIV_I32, CallingConv::ARM_AAPCS); setLibcallCallingConv(RTLIB::SDIV_I32, CallingConv::ARM_AAPCS);
setLibcallCallingConv(RTLIB::UDIV_I8, CallingConv::ARM_AAPCS); setLibcallCallingConv(RTLIB::UDIV_I8, CallingConv::ARM_AAPCS);
setLibcallCallingConv(RTLIB::UDIV_I16, CallingConv::ARM_AAPCS); setLibcallCallingConv(RTLIB::UDIV_I16, CallingConv::ARM_AAPCS);
setLibcallCallingConv(RTLIB::UDIV_I32, CallingConv::ARM_AAPCS); setLibcallCallingConv(RTLIB::UDIV_I32, CallingConv::ARM_AAPCS);
} }
if (Subtarget->isThumb1Only()) if (Subtarget->isThumb1Only())
@ -609,7 +609,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) { if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
// Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR
// iff target supports vfp2. // iff target supports vfp2.
setOperationAction(ISD::BIT_CONVERT, MVT::i64, Custom); setOperationAction(ISD::BITCAST, MVT::i64, Custom);
setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
} }
@ -1061,7 +1061,7 @@ ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
default: llvm_unreachable("Unknown loc info!"); default: llvm_unreachable("Unknown loc info!");
case CCValAssign::Full: break; case CCValAssign::Full: break;
case CCValAssign::BCvt: case CCValAssign::BCvt:
Val = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), Val); Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
break; break;
} }
@ -1209,7 +1209,7 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
break; break;
case CCValAssign::BCvt: case CCValAssign::BCvt:
Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg); Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
break; break;
} }
@ -1666,7 +1666,7 @@ ARMTargetLowering::LowerReturn(SDValue Chain,
default: llvm_unreachable("Unknown loc info!"); default: llvm_unreachable("Unknown loc info!");
case CCValAssign::Full: break; case CCValAssign::Full: break;
case CCValAssign::BCvt: case CCValAssign::BCvt:
Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg); Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
break; break;
} }
@ -2223,7 +2223,7 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
default: llvm_unreachable("Unknown loc info!"); default: llvm_unreachable("Unknown loc info!");
case CCValAssign::Full: break; case CCValAssign::Full: break;
case CCValAssign::BCvt: case CCValAssign::BCvt:
ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), ArgValue); ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
break; break;
case CCValAssign::SExt: case CCValAssign::SExt:
ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue, ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
@ -2689,7 +2689,7 @@ static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
break; break;
} }
Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0)); Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0));
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op); return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
} }
static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) { static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
@ -2708,7 +2708,7 @@ static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
break; break;
} }
Op = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Op.getOperand(0)); Op = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0));
return DAG.getNode(Opc, dl, VT, Op); return DAG.getNode(Opc, dl, VT, Op);
} }
@ -2765,12 +2765,12 @@ SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
return FrameAddr; return FrameAddr;
} }
/// ExpandBIT_CONVERT - If the target supports VFP, this function is called to /// ExpandBITCAST - If the target supports VFP, this function is called to
/// expand a bit convert where either the source or destination type is i64 to /// expand a bit convert where either the source or destination type is i64 to
/// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64 /// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64
/// operand type is illegal (e.g., v2f32 for a target that doesn't support /// operand type is illegal (e.g., v2f32 for a target that doesn't support
/// vectors), since the legalizer won't know what to do with that. /// vectors), since the legalizer won't know what to do with that.
static SDValue ExpandBIT_CONVERT(SDNode *N, SelectionDAG &DAG) { static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo(); const TargetLowering &TLI = DAG.getTargetLoweringInfo();
DebugLoc dl = N->getDebugLoc(); DebugLoc dl = N->getDebugLoc();
SDValue Op = N->getOperand(0); SDValue Op = N->getOperand(0);
@ -2780,7 +2780,7 @@ static SDValue ExpandBIT_CONVERT(SDNode *N, SelectionDAG &DAG) {
EVT SrcVT = Op.getValueType(); EVT SrcVT = Op.getValueType();
EVT DstVT = N->getValueType(0); EVT DstVT = N->getValueType(0);
assert((SrcVT == MVT::i64 || DstVT == MVT::i64) && assert((SrcVT == MVT::i64 || DstVT == MVT::i64) &&
"ExpandBIT_CONVERT called for non-i64 type"); "ExpandBITCAST called for non-i64 type");
// Turn i64->f64 into VMOVDRR. // Turn i64->f64 into VMOVDRR.
if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) { if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) {
@ -2788,7 +2788,7 @@ static SDValue ExpandBIT_CONVERT(SDNode *N, SelectionDAG &DAG) {
DAG.getConstant(0, MVT::i32)); DAG.getConstant(0, MVT::i32));
SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op, SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
DAG.getConstant(1, MVT::i32)); DAG.getConstant(1, MVT::i32));
return DAG.getNode(ISD::BIT_CONVERT, dl, DstVT, return DAG.getNode(ISD::BITCAST, dl, DstVT,
DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi)); DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi));
} }
@ -2815,7 +2815,7 @@ static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32); SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32);
EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32; EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal); SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal);
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vmov); return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
} }
/// LowerShiftRightParts - Lower SRA_PARTS, which returns two /// LowerShiftRightParts - Lower SRA_PARTS, which returns two
@ -3068,13 +3068,13 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
AndOp = Op1; AndOp = Op1;
// Ignore bitconvert. // Ignore bitconvert.
if (AndOp.getNode() && AndOp.getOpcode() == ISD::BIT_CONVERT) if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST)
AndOp = AndOp.getOperand(0); AndOp = AndOp.getOperand(0);
if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) { if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) {
Opc = ARMISD::VTST; Opc = ARMISD::VTST;
Op0 = DAG.getNode(ISD::BIT_CONVERT, dl, VT, AndOp.getOperand(0)); Op0 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(0));
Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, VT, AndOp.getOperand(1)); Op1 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(1));
Invert = !Invert; Invert = !Invert;
} }
} }
@ -3095,7 +3095,7 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
Opc = ARMISD::VCLTZ; Opc = ARMISD::VCLTZ;
SingleOp = Op1; SingleOp = Op1;
} }
SDValue Result; SDValue Result;
if (SingleOp.getNode()) { if (SingleOp.getNode()) {
switch (Opc) { switch (Opc) {
@ -3499,7 +3499,7 @@ static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
VMOVModImm); VMOVModImm);
if (Val.getNode()) { if (Val.getNode()) {
SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val); SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val);
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vmov); return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
} }
// Try an immediate VMVN. // Try an immediate VMVN.
@ -3507,11 +3507,11 @@ static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
((1LL << SplatBitSize) - 1)); ((1LL << SplatBitSize) - 1));
Val = isNEONModifiedImm(NegatedImm, Val = isNEONModifiedImm(NegatedImm,
SplatUndef.getZExtValue(), SplatBitSize, SplatUndef.getZExtValue(), SplatBitSize,
DAG, VmovVT, VT.is128BitVector(), DAG, VmovVT, VT.is128BitVector(),
VMVNModImm); VMVNModImm);
if (Val.getNode()) { if (Val.getNode()) {
SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val); SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val);
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vmov); return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
} }
} }
} }
@ -3553,13 +3553,13 @@ static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
if (VT.getVectorElementType().isFloatingPoint()) { if (VT.getVectorElementType().isFloatingPoint()) {
SmallVector<SDValue, 8> Ops; SmallVector<SDValue, 8> Ops;
for (unsigned i = 0; i < NumElts; ++i) for (unsigned i = 0; i < NumElts; ++i)
Ops.push_back(DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32,
Op.getOperand(i))); Op.getOperand(i)));
EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts); EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, &Ops[0], NumElts); SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, &Ops[0], NumElts);
Val = LowerBUILD_VECTOR(Val, DAG, ST); Val = LowerBUILD_VECTOR(Val, DAG, ST);
if (Val.getNode()) if (Val.getNode())
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Val); return DAG.getNode(ISD::BITCAST, dl, VT, Val);
} }
SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl); SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl);
if (Val.getNode()) if (Val.getNode())
@ -3582,9 +3582,9 @@ static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
SmallVector<SDValue, 8> Ops; SmallVector<SDValue, 8> Ops;
for (unsigned i = 0; i < NumElts; ++i) for (unsigned i = 0; i < NumElts; ++i)
Ops.push_back(DAG.getNode(ISD::BIT_CONVERT, dl, EltVT, Op.getOperand(i))); Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i)));
SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts);
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Val); return DAG.getNode(ISD::BITCAST, dl, VT, Val);
} }
return SDValue(); return SDValue();
@ -3805,8 +3805,8 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
// registers are defined to use, and since i64 is not legal. // registers are defined to use, and since i64 is not legal.
EVT EltVT = EVT::getFloatingPointVT(EltSize); EVT EltVT = EVT::getFloatingPointVT(EltSize);
EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts); EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
V1 = DAG.getNode(ISD::BIT_CONVERT, dl, VecVT, V1); V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1);
V2 = DAG.getNode(ISD::BIT_CONVERT, dl, VecVT, V2); V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2);
SmallVector<SDValue, 8> Ops; SmallVector<SDValue, 8> Ops;
for (unsigned i = 0; i < NumElts; ++i) { for (unsigned i = 0; i < NumElts; ++i) {
if (ShuffleMask[i] < 0) if (ShuffleMask[i] < 0)
@ -3818,7 +3818,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
MVT::i32))); MVT::i32)));
} }
SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts); SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts);
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Val); return DAG.getNode(ISD::BITCAST, dl, VT, Val);
} }
return SDValue(); return SDValue();
@ -3851,13 +3851,13 @@ static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
SDValue Op1 = Op.getOperand(1); SDValue Op1 = Op.getOperand(1);
if (Op0.getOpcode() != ISD::UNDEF) if (Op0.getOpcode() != ISD::UNDEF)
Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op0), DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0),
DAG.getIntPtrConstant(0)); DAG.getIntPtrConstant(0));
if (Op1.getOpcode() != ISD::UNDEF) if (Op1.getOpcode() != ISD::UNDEF)
Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val, Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op1), DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1),
DAG.getIntPtrConstant(1)); DAG.getIntPtrConstant(1));
return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Val); return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val);
} }
/// SkipExtension - For a node that is either a SIGN_EXTEND, ZERO_EXTEND, or /// SkipExtension - For a node that is either a SIGN_EXTEND, ZERO_EXTEND, or
@ -3933,7 +3933,7 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::EH_SJLJ_DISPATCHSETUP: return LowerEH_SJLJ_DISPATCHSETUP(Op, DAG); case ISD::EH_SJLJ_DISPATCHSETUP: return LowerEH_SJLJ_DISPATCHSETUP(Op, DAG);
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG, case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG,
Subtarget); Subtarget);
case ISD::BIT_CONVERT: return ExpandBIT_CONVERT(Op.getNode(), DAG); case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG);
case ISD::SHL: case ISD::SHL:
case ISD::SRL: case ISD::SRL:
case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget);
@ -3962,8 +3962,8 @@ void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
default: default:
llvm_unreachable("Don't know how to custom expand this!"); llvm_unreachable("Don't know how to custom expand this!");
break; break;
case ISD::BIT_CONVERT: case ISD::BITCAST:
Res = ExpandBIT_CONVERT(N, DAG); Res = ExpandBITCAST(N, DAG);
break; break;
case ISD::SRL: case ISD::SRL:
case ISD::SRA: case ISD::SRA:
@ -4497,7 +4497,7 @@ static SDValue PerformANDCombine(SDNode *N,
DebugLoc dl = N->getDebugLoc(); DebugLoc dl = N->getDebugLoc();
EVT VT = N->getValueType(0); EVT VT = N->getValueType(0);
SelectionDAG &DAG = DCI.DAG; SelectionDAG &DAG = DCI.DAG;
APInt SplatBits, SplatUndef; APInt SplatBits, SplatUndef;
unsigned SplatBitSize; unsigned SplatBitSize;
bool HasAnyUndefs; bool HasAnyUndefs;
@ -4507,17 +4507,17 @@ static SDValue PerformANDCombine(SDNode *N,
EVT VbicVT; EVT VbicVT;
SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(), SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(),
SplatUndef.getZExtValue(), SplatBitSize, SplatUndef.getZExtValue(), SplatBitSize,
DAG, VbicVT, VT.is128BitVector(), DAG, VbicVT, VT.is128BitVector(),
OtherModImm); OtherModImm);
if (Val.getNode()) { if (Val.getNode()) {
SDValue Input = SDValue Input =
DAG.getNode(ISD::BIT_CONVERT, dl, VbicVT, N->getOperand(0)); DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0));
SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val); SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val);
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vbic); return DAG.getNode(ISD::BITCAST, dl, VT, Vbic);
} }
} }
} }
return SDValue(); return SDValue();
} }
@ -4530,7 +4530,7 @@ static SDValue PerformORCombine(SDNode *N,
DebugLoc dl = N->getDebugLoc(); DebugLoc dl = N->getDebugLoc();
EVT VT = N->getValueType(0); EVT VT = N->getValueType(0);
SelectionDAG &DAG = DCI.DAG; SelectionDAG &DAG = DCI.DAG;
APInt SplatBits, SplatUndef; APInt SplatBits, SplatUndef;
unsigned SplatBitSize; unsigned SplatBitSize;
bool HasAnyUndefs; bool HasAnyUndefs;
@ -4544,9 +4544,9 @@ static SDValue PerformORCombine(SDNode *N,
OtherModImm); OtherModImm);
if (Val.getNode()) { if (Val.getNode()) {
SDValue Input = SDValue Input =
DAG.getNode(ISD::BIT_CONVERT, dl, VorrVT, N->getOperand(0)); DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0));
SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val); SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val);
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vorr); return DAG.getNode(ISD::BITCAST, dl, VT, Vorr);
} }
} }
} }
@ -4640,7 +4640,7 @@ static SDValue PerformORCombine(SDNode *N,
DCI.CombineTo(N, Res, false); DCI.CombineTo(N, Res, false);
} }
} }
return SDValue(); return SDValue();
} }
@ -4661,14 +4661,14 @@ static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) {
// N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X) // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X)
SDValue Op0 = N->getOperand(0); SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1); SDValue Op1 = N->getOperand(1);
if (Op0.getOpcode() == ISD::BIT_CONVERT) if (Op0.getOpcode() == ISD::BITCAST)
Op0 = Op0.getOperand(0); Op0 = Op0.getOperand(0);
if (Op1.getOpcode() == ISD::BIT_CONVERT) if (Op1.getOpcode() == ISD::BITCAST)
Op1 = Op1.getOperand(0); Op1 = Op1.getOperand(0);
if (Op0.getOpcode() == ARMISD::VMOVRRD && if (Op0.getOpcode() == ARMISD::VMOVRRD &&
Op0.getNode() == Op1.getNode() && Op0.getNode() == Op1.getNode() &&
Op0.getResNo() == 0 && Op1.getResNo() == 1) Op0.getResNo() == 0 && Op1.getResNo() == 1)
return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), return DAG.getNode(ISD::BITCAST, N->getDebugLoc(),
N->getValueType(0), Op0.getOperand(0)); N->getValueType(0), Op0.getOperand(0));
return SDValue(); return SDValue();
} }
@ -4748,7 +4748,7 @@ static SDValue PerformVDUPLANECombine(SDNode *N, SelectionDAG &DAG) {
EVT VT = N->getValueType(0); EVT VT = N->getValueType(0);
// Ignore bit_converts. // Ignore bit_converts.
while (Op.getOpcode() == ISD::BIT_CONVERT) while (Op.getOpcode() == ISD::BITCAST)
Op = Op.getOperand(0); Op = Op.getOperand(0);
if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM) if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM)
return SDValue(); return SDValue();
@ -4763,7 +4763,7 @@ static SDValue PerformVDUPLANECombine(SDNode *N, SelectionDAG &DAG) {
if (EltSize > VT.getVectorElementType().getSizeInBits()) if (EltSize > VT.getVectorElementType().getSizeInBits())
return SDValue(); return SDValue();
return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), VT, Op); return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op);
} }
/// getVShiftImm - Check if this is a valid build_vector for the immediate /// getVShiftImm - Check if this is a valid build_vector for the immediate
@ -4771,7 +4771,7 @@ static SDValue PerformVDUPLANECombine(SDNode *N, SelectionDAG &DAG) {
/// build_vector must have the same constant integer value. /// build_vector must have the same constant integer value.
static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
// Ignore bit_converts. // Ignore bit_converts.
while (Op.getOpcode() == ISD::BIT_CONVERT) while (Op.getOpcode() == ISD::BITCAST)
Op = Op.getOperand(0); Op = Op.getOperand(0);
BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
APInt SplatBits, SplatUndef; APInt SplatBits, SplatUndef;
@ -5935,7 +5935,7 @@ bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
return false; return false;
} }
/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
/// MemIntrinsicNodes. The associated MachineMemOperands record the alignment /// MemIntrinsicNodes. The associated MachineMemOperands record the alignment
/// specified in the intrinsic calls. /// specified in the intrinsic calls.
bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,

View File

@ -125,7 +125,7 @@ AlphaTargetLowering::AlphaTargetLowering(TargetMachine &TM)
setOperationAction(ISD::SETCC, MVT::f32, Promote); setOperationAction(ISD::SETCC, MVT::f32, Promote);
setOperationAction(ISD::BIT_CONVERT, MVT::f32, Promote); setOperationAction(ISD::BITCAST, MVT::f32, Promote);
setOperationAction(ISD::EH_LABEL, MVT::Other, Expand); setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
@ -616,7 +616,7 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op,
"Unhandled SINT_TO_FP type in custom expander!"); "Unhandled SINT_TO_FP type in custom expander!");
SDValue LD; SDValue LD;
bool isDouble = Op.getValueType() == MVT::f64; bool isDouble = Op.getValueType() == MVT::f64;
LD = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op.getOperand(0)); LD = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op.getOperand(0));
SDValue FP = DAG.getNode(isDouble?AlphaISD::CVTQT_:AlphaISD::CVTQS_, dl, SDValue FP = DAG.getNode(isDouble?AlphaISD::CVTQT_:AlphaISD::CVTQS_, dl,
isDouble?MVT::f64:MVT::f32, LD); isDouble?MVT::f64:MVT::f32, LD);
return FP; return FP;
@ -630,7 +630,7 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op,
src = DAG.getNode(AlphaISD::CVTTQ_, dl, MVT::f64, src); src = DAG.getNode(AlphaISD::CVTTQ_, dl, MVT::f64, src);
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, src); return DAG.getNode(ISD::BITCAST, dl, MVT::i64, src);
} }
case ISD::ConstantPool: { case ISD::ConstantPool: {
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
@ -648,11 +648,11 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op,
case ISD::GlobalAddress: { case ISD::GlobalAddress: {
GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op); GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
const GlobalValue *GV = GSDN->getGlobal(); const GlobalValue *GV = GSDN->getGlobal();
SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i64, SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i64,
GSDN->getOffset()); GSDN->getOffset());
// FIXME there isn't really any debug info here // FIXME there isn't really any debug info here
// if (!GV->hasWeakLinkage() && !GV->isDeclaration() // if (!GV->hasWeakLinkage() && !GV->isDeclaration()
// && !GV->hasLinkOnceLinkage()) { // && !GV->hasLinkOnceLinkage()) {
if (GV->hasLocalLinkage()) { if (GV->hasLocalLinkage()) {
SDValue Hi = DAG.getNode(AlphaISD::GPRelHi, dl, MVT::i64, GA, SDValue Hi = DAG.getNode(AlphaISD::GPRelHi, dl, MVT::i64, GA,
@ -727,7 +727,7 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op,
SDValue Val = DAG.getLoad(getPointerTy(), dl, Chain, SrcP, SDValue Val = DAG.getLoad(getPointerTy(), dl, Chain, SrcP,
MachinePointerInfo(SrcS), MachinePointerInfo(SrcS),
false, false, 0); false, false, 0);
SDValue Result = DAG.getStore(Val.getValue(1), dl, Val, DestP, SDValue Result = DAG.getStore(Val.getValue(1), dl, Val, DestP,
MachinePointerInfo(DestS), MachinePointerInfo(DestS),
false, false, 0); false, false, 0);
SDValue NP = DAG.getNode(ISD::ADD, dl, MVT::i64, SrcP, SDValue NP = DAG.getNode(ISD::ADD, dl, MVT::i64, SrcP,
@ -779,7 +779,7 @@ void AlphaTargetLowering::ReplaceNodeResults(SDNode *N,
SDValue Chain, DataPtr; SDValue Chain, DataPtr;
LowerVAARG(N, Chain, DataPtr, DAG); LowerVAARG(N, Chain, DataPtr, DAG);
SDValue Res = DAG.getLoad(N->getValueType(0), dl, Chain, DataPtr, SDValue Res = DAG.getLoad(N->getValueType(0), dl, Chain, DataPtr,
MachinePointerInfo(), MachinePointerInfo(),
false, false, 0); false, false, 0);
Results.push_back(Res); Results.push_back(Res);

View File

@ -213,7 +213,7 @@ namespace {
unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
SDValue CGPoolOffset = SDValue CGPoolOffset =
SPU::LowerConstantPool(CPIdx, *CurDAG, TM); SPU::LowerConstantPool(CPIdx, *CurDAG, TM);
HandleSDNode Dummy(CurDAG->getLoad(vecVT, dl, HandleSDNode Dummy(CurDAG->getLoad(vecVT, dl,
CurDAG->getEntryNode(), CGPoolOffset, CurDAG->getEntryNode(), CGPoolOffset,
MachinePointerInfo::getConstantPool(), MachinePointerInfo::getConstantPool(),
@ -308,9 +308,9 @@ namespace {
assert(II && "No InstrInfo?"); assert(II && "No InstrInfo?");
return new SPUHazardRecognizer(*II); return new SPUHazardRecognizer(*II);
} }
private: private:
SDValue getRC( MVT ); SDValue getRC( MVT );
// Include the pieces autogenerated from the target description. // Include the pieces autogenerated from the target description.
#include "SPUGenDAGISel.inc" #include "SPUGenDAGISel.inc"
@ -512,8 +512,8 @@ SPUDAGToDAGISel::DFormAddressPredicate(SDNode *Op, SDValue N, SDValue &Base,
Base = CurDAG->getTargetConstant(0, N.getValueType()); Base = CurDAG->getTargetConstant(0, N.getValueType());
Index = N; Index = N;
return true; return true;
} else if (Opc == ISD::Register } else if (Opc == ISD::Register
||Opc == ISD::CopyFromReg ||Opc == ISD::CopyFromReg
||Opc == ISD::UNDEF ||Opc == ISD::UNDEF
||Opc == ISD::Constant) { ||Opc == ISD::Constant) {
unsigned OpOpc = Op->getOpcode(); unsigned OpOpc = Op->getOpcode();
@ -574,7 +574,7 @@ SPUDAGToDAGISel::SelectXFormAddr(SDNode *Op, SDValue N, SDValue &Base,
} }
/*! /*!
Utility function to use with COPY_TO_REGCLASS instructions. Returns a SDValue Utility function to use with COPY_TO_REGCLASS instructions. Returns a SDValue
to be used as the last parameter of a to be used as the last parameter of a
CurDAG->getMachineNode(COPY_TO_REGCLASS,..., ) function call CurDAG->getMachineNode(COPY_TO_REGCLASS,..., ) function call
\arg VT the value type for which we want a register class \arg VT the value type for which we want a register class
@ -582,19 +582,19 @@ CurDAG->getMachineNode(COPY_TO_REGCLASS,..., ) function call
SDValue SPUDAGToDAGISel::getRC( MVT VT ) { SDValue SPUDAGToDAGISel::getRC( MVT VT ) {
switch( VT.SimpleTy ) { switch( VT.SimpleTy ) {
case MVT::i8: case MVT::i8:
return CurDAG->getTargetConstant(SPU::R8CRegClass.getID(), MVT::i32); return CurDAG->getTargetConstant(SPU::R8CRegClass.getID(), MVT::i32);
break; break;
case MVT::i16: case MVT::i16:
return CurDAG->getTargetConstant(SPU::R16CRegClass.getID(), MVT::i32); return CurDAG->getTargetConstant(SPU::R16CRegClass.getID(), MVT::i32);
break; break;
case MVT::i32: case MVT::i32:
return CurDAG->getTargetConstant(SPU::R32CRegClass.getID(), MVT::i32); return CurDAG->getTargetConstant(SPU::R32CRegClass.getID(), MVT::i32);
break; break;
case MVT::f32: case MVT::f32:
return CurDAG->getTargetConstant(SPU::R32FPRegClass.getID(), MVT::i32); return CurDAG->getTargetConstant(SPU::R32FPRegClass.getID(), MVT::i32);
break; break;
case MVT::i64: case MVT::i64:
return CurDAG->getTargetConstant(SPU::R64CRegClass.getID(), MVT::i32); return CurDAG->getTargetConstant(SPU::R64CRegClass.getID(), MVT::i32);
break; break;
case MVT::v16i8: case MVT::v16i8:
case MVT::v8i16: case MVT::v8i16:
@ -602,7 +602,7 @@ SDValue SPUDAGToDAGISel::getRC( MVT VT ) {
case MVT::v4f32: case MVT::v4f32:
case MVT::v2i64: case MVT::v2i64:
case MVT::v2f64: case MVT::v2f64:
return CurDAG->getTargetConstant(SPU::VECREGRegClass.getID(), MVT::i32); return CurDAG->getTargetConstant(SPU::VECREGRegClass.getID(), MVT::i32);
break; break;
default: default:
assert( false && "add a new case here" ); assert( false && "add a new case here" );
@ -654,7 +654,7 @@ SPUDAGToDAGISel::Select(SDNode *N) {
EVT Op0VT = Op0.getValueType(); EVT Op0VT = Op0.getValueType();
EVT Op0VecVT = EVT::getVectorVT(*CurDAG->getContext(), EVT Op0VecVT = EVT::getVectorVT(*CurDAG->getContext(),
Op0VT, (128 / Op0VT.getSizeInBits())); Op0VT, (128 / Op0VT.getSizeInBits()));
EVT OpVecVT = EVT::getVectorVT(*CurDAG->getContext(), EVT OpVecVT = EVT::getVectorVT(*CurDAG->getContext(),
OpVT, (128 / OpVT.getSizeInBits())); OpVT, (128 / OpVT.getSizeInBits()));
SDValue shufMask; SDValue shufMask;
@ -688,19 +688,19 @@ SPUDAGToDAGISel::Select(SDNode *N) {
} }
SDNode *shufMaskLoad = emitBuildVector(shufMask.getNode()); SDNode *shufMaskLoad = emitBuildVector(shufMask.getNode());
HandleSDNode PromoteScalar(CurDAG->getNode(SPUISD::PREFSLOT2VEC, dl, HandleSDNode PromoteScalar(CurDAG->getNode(SPUISD::PREFSLOT2VEC, dl,
Op0VecVT, Op0)); Op0VecVT, Op0));
SDValue PromScalar; SDValue PromScalar;
if (SDNode *N = SelectCode(PromoteScalar.getValue().getNode())) if (SDNode *N = SelectCode(PromoteScalar.getValue().getNode()))
PromScalar = SDValue(N, 0); PromScalar = SDValue(N, 0);
else else
PromScalar = PromoteScalar.getValue(); PromScalar = PromoteScalar.getValue();
SDValue zextShuffle = SDValue zextShuffle =
CurDAG->getNode(SPUISD::SHUFB, dl, OpVecVT, CurDAG->getNode(SPUISD::SHUFB, dl, OpVecVT,
PromScalar, PromScalar, PromScalar, PromScalar,
SDValue(shufMaskLoad, 0)); SDValue(shufMaskLoad, 0));
HandleSDNode Dummy2(zextShuffle); HandleSDNode Dummy2(zextShuffle);
@ -710,7 +710,7 @@ SPUDAGToDAGISel::Select(SDNode *N) {
zextShuffle = Dummy2.getValue(); zextShuffle = Dummy2.getValue();
HandleSDNode Dummy(CurDAG->getNode(SPUISD::VEC2PREFSLOT, dl, OpVT, HandleSDNode Dummy(CurDAG->getNode(SPUISD::VEC2PREFSLOT, dl, OpVT,
zextShuffle)); zextShuffle));
CurDAG->ReplaceAllUsesWith(N, Dummy.getValue().getNode()); CurDAG->ReplaceAllUsesWith(N, Dummy.getValue().getNode());
SelectCode(Dummy.getValue().getNode()); SelectCode(Dummy.getValue().getNode());
return Dummy.getValue().getNode(); return Dummy.getValue().getNode();
@ -721,7 +721,7 @@ SPUDAGToDAGISel::Select(SDNode *N) {
HandleSDNode Dummy(CurDAG->getNode(SPUISD::ADD64_MARKER, dl, OpVT, HandleSDNode Dummy(CurDAG->getNode(SPUISD::ADD64_MARKER, dl, OpVT,
N->getOperand(0), N->getOperand(1), N->getOperand(0), N->getOperand(1),
SDValue(CGLoad, 0))); SDValue(CGLoad, 0)));
CurDAG->ReplaceAllUsesWith(N, Dummy.getValue().getNode()); CurDAG->ReplaceAllUsesWith(N, Dummy.getValue().getNode());
if (SDNode *N = SelectCode(Dummy.getValue().getNode())) if (SDNode *N = SelectCode(Dummy.getValue().getNode()))
return N; return N;
@ -733,7 +733,7 @@ SPUDAGToDAGISel::Select(SDNode *N) {
HandleSDNode Dummy(CurDAG->getNode(SPUISD::SUB64_MARKER, dl, OpVT, HandleSDNode Dummy(CurDAG->getNode(SPUISD::SUB64_MARKER, dl, OpVT,
N->getOperand(0), N->getOperand(1), N->getOperand(0), N->getOperand(1),
SDValue(CGLoad, 0))); SDValue(CGLoad, 0)));
CurDAG->ReplaceAllUsesWith(N, Dummy.getValue().getNode()); CurDAG->ReplaceAllUsesWith(N, Dummy.getValue().getNode());
if (SDNode *N = SelectCode(Dummy.getValue().getNode())) if (SDNode *N = SelectCode(Dummy.getValue().getNode()))
return N; return N;
@ -847,12 +847,12 @@ SPUDAGToDAGISel::Select(SDNode *N) {
SDValue Arg = N->getOperand(0); SDValue Arg = N->getOperand(0);
SDValue Chain = N->getOperand(1); SDValue Chain = N->getOperand(1);
SDNode *Result; SDNode *Result;
Result = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl, VT, Result = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl, VT,
MVT::Other, Arg, MVT::Other, Arg,
getRC( VT.getSimpleVT()), Chain); getRC( VT.getSimpleVT()), Chain);
return Result; return Result;
} else if (Opc == SPUISD::IndirectAddr) { } else if (Opc == SPUISD::IndirectAddr) {
// Look at the operands: SelectCode() will catch the cases that aren't // Look at the operands: SelectCode() will catch the cases that aren't
// specifically handled here. // specifically handled here.
@ -878,10 +878,10 @@ SPUDAGToDAGISel::Select(SDNode *N) {
NewOpc = SPU::AIr32; NewOpc = SPU::AIr32;
Ops[1] = Op1; Ops[1] = Op1;
} else { } else {
Ops[1] = SDValue(CurDAG->getMachineNode(SPU::ILr32, dl, Ops[1] = SDValue(CurDAG->getMachineNode(SPU::ILr32, dl,
N->getValueType(0), N->getValueType(0),
Op1), Op1),
0); 0);
} }
} }
Ops[0] = Op0; Ops[0] = Op0;
@ -913,7 +913,7 @@ SPUDAGToDAGISel::Select(SDNode *N) {
SDNode * SDNode *
SPUDAGToDAGISel::SelectSHLi64(SDNode *N, EVT OpVT) { SPUDAGToDAGISel::SelectSHLi64(SDNode *N, EVT OpVT) {
SDValue Op0 = N->getOperand(0); SDValue Op0 = N->getOperand(0);
EVT VecVT = EVT::getVectorVT(*CurDAG->getContext(), EVT VecVT = EVT::getVectorVT(*CurDAG->getContext(),
OpVT, (128 / OpVT.getSizeInBits())); OpVT, (128 / OpVT.getSizeInBits()));
SDValue ShiftAmt = N->getOperand(1); SDValue ShiftAmt = N->getOperand(1);
EVT ShiftAmtVT = ShiftAmt.getValueType(); EVT ShiftAmtVT = ShiftAmt.getValueType();
@ -966,7 +966,7 @@ SPUDAGToDAGISel::SelectSHLi64(SDNode *N, EVT OpVT) {
SDValue(Shift, 0), SDValue(Bits, 0)); SDValue(Shift, 0), SDValue(Bits, 0));
} }
return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl, return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
OpVT, SDValue(Shift, 0), getRC(MVT::i64)); OpVT, SDValue(Shift, 0), getRC(MVT::i64));
} }
@ -1035,7 +1035,7 @@ SPUDAGToDAGISel::SelectSRLi64(SDNode *N, EVT OpVT) {
SDValue(Shift, 0), SDValue(Bits, 0)); SDValue(Shift, 0), SDValue(Bits, 0));
} }
return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl, return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
OpVT, SDValue(Shift, 0), getRC(MVT::i64)); OpVT, SDValue(Shift, 0), getRC(MVT::i64));
} }
@ -1050,14 +1050,14 @@ SPUDAGToDAGISel::SelectSRLi64(SDNode *N, EVT OpVT) {
SDNode * SDNode *
SPUDAGToDAGISel::SelectSRAi64(SDNode *N, EVT OpVT) { SPUDAGToDAGISel::SelectSRAi64(SDNode *N, EVT OpVT) {
// Promote Op0 to vector // Promote Op0 to vector
EVT VecVT = EVT::getVectorVT(*CurDAG->getContext(), EVT VecVT = EVT::getVectorVT(*CurDAG->getContext(),
OpVT, (128 / OpVT.getSizeInBits())); OpVT, (128 / OpVT.getSizeInBits()));
SDValue ShiftAmt = N->getOperand(1); SDValue ShiftAmt = N->getOperand(1);
EVT ShiftAmtVT = ShiftAmt.getValueType(); EVT ShiftAmtVT = ShiftAmt.getValueType();
DebugLoc dl = N->getDebugLoc(); DebugLoc dl = N->getDebugLoc();
SDNode *VecOp0 = SDNode *VecOp0 =
CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl, CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
VecVT, N->getOperand(0), getRC(MVT::v2i64)); VecVT, N->getOperand(0), getRC(MVT::v2i64));
SDValue SignRotAmt = CurDAG->getTargetConstant(31, ShiftAmtVT); SDValue SignRotAmt = CurDAG->getTargetConstant(31, ShiftAmtVT);
@ -1065,7 +1065,7 @@ SPUDAGToDAGISel::SelectSRAi64(SDNode *N, EVT OpVT) {
CurDAG->getMachineNode(SPU::ROTMAIv2i64_i32, dl, MVT::v2i64, CurDAG->getMachineNode(SPU::ROTMAIv2i64_i32, dl, MVT::v2i64,
SDValue(VecOp0, 0), SignRotAmt); SDValue(VecOp0, 0), SignRotAmt);
SDNode *UpperHalfSign = SDNode *UpperHalfSign =
CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl, CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
MVT::i32, SDValue(SignRot, 0), getRC(MVT::i32)); MVT::i32, SDValue(SignRot, 0), getRC(MVT::i32));
SDNode *UpperHalfSignMask = SDNode *UpperHalfSignMask =
@ -1113,7 +1113,7 @@ SPUDAGToDAGISel::SelectSRAi64(SDNode *N, EVT OpVT) {
SDValue(Shift, 0), SDValue(NegShift, 0)); SDValue(Shift, 0), SDValue(NegShift, 0));
} }
return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl, return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
OpVT, SDValue(Shift, 0), getRC(MVT::i64)); OpVT, SDValue(Shift, 0), getRC(MVT::i64));
} }
@ -1135,7 +1135,7 @@ SDNode *SPUDAGToDAGISel::SelectI64Constant(uint64_t Value64, EVT OpVT,
// Here's where it gets interesting, because we have to parse out the // Here's where it gets interesting, because we have to parse out the
// subtree handed back in i64vec: // subtree handed back in i64vec:
if (i64vec.getOpcode() == ISD::BIT_CONVERT) { if (i64vec.getOpcode() == ISD::BITCAST) {
// The degenerate case where the upper and lower bits in the splat are // The degenerate case where the upper and lower bits in the splat are
// identical: // identical:
SDValue Op0 = i64vec.getOperand(0); SDValue Op0 = i64vec.getOperand(0);
@ -1149,7 +1149,7 @@ SDNode *SPUDAGToDAGISel::SelectI64Constant(uint64_t Value64, EVT OpVT,
SDValue rhs = i64vec.getOperand(1); SDValue rhs = i64vec.getOperand(1);
SDValue shufmask = i64vec.getOperand(2); SDValue shufmask = i64vec.getOperand(2);
if (lhs.getOpcode() == ISD::BIT_CONVERT) { if (lhs.getOpcode() == ISD::BITCAST) {
ReplaceUses(lhs, lhs.getOperand(0)); ReplaceUses(lhs, lhs.getOperand(0));
lhs = lhs.getOperand(0); lhs = lhs.getOperand(0);
} }
@ -1158,7 +1158,7 @@ SDNode *SPUDAGToDAGISel::SelectI64Constant(uint64_t Value64, EVT OpVT,
? lhs.getNode() ? lhs.getNode()
: emitBuildVector(lhs.getNode())); : emitBuildVector(lhs.getNode()));
if (rhs.getOpcode() == ISD::BIT_CONVERT) { if (rhs.getOpcode() == ISD::BITCAST) {
ReplaceUses(rhs, rhs.getOperand(0)); ReplaceUses(rhs, rhs.getOperand(0));
rhs = rhs.getOperand(0); rhs = rhs.getOperand(0);
} }
@ -1167,7 +1167,7 @@ SDNode *SPUDAGToDAGISel::SelectI64Constant(uint64_t Value64, EVT OpVT,
? rhs.getNode() ? rhs.getNode()
: emitBuildVector(rhs.getNode())); : emitBuildVector(rhs.getNode()));
if (shufmask.getOpcode() == ISD::BIT_CONVERT) { if (shufmask.getOpcode() == ISD::BITCAST) {
ReplaceUses(shufmask, shufmask.getOperand(0)); ReplaceUses(shufmask, shufmask.getOperand(0));
shufmask = shufmask.getOperand(0); shufmask = shufmask.getOperand(0);
} }
@ -1183,8 +1183,8 @@ SDNode *SPUDAGToDAGISel::SelectI64Constant(uint64_t Value64, EVT OpVT,
HandleSDNode Dummy(shufNode); HandleSDNode Dummy(shufNode);
SDNode *SN = SelectCode(Dummy.getValue().getNode()); SDNode *SN = SelectCode(Dummy.getValue().getNode());
if (SN == 0) SN = Dummy.getValue().getNode(); if (SN == 0) SN = Dummy.getValue().getNode();
return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl, return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
OpVT, SDValue(SN, 0), getRC(MVT::i64)); OpVT, SDValue(SN, 0), getRC(MVT::i64));
} else if (i64vec.getOpcode() == ISD::BUILD_VECTOR) { } else if (i64vec.getOpcode() == ISD::BUILD_VECTOR) {
return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl, OpVT, return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl, OpVT,

View File

@ -45,9 +45,9 @@ namespace {
// Byte offset of the preferred slot (counted from the MSB) // Byte offset of the preferred slot (counted from the MSB)
int prefslotOffset(EVT VT) { int prefslotOffset(EVT VT) {
int retval=0; int retval=0;
if (VT==MVT::i1) retval=3; if (VT==MVT::i1) retval=3;
if (VT==MVT::i8) retval=3; if (VT==MVT::i8) retval=3;
if (VT==MVT::i16) retval=2; if (VT==MVT::i16) retval=2;
return retval; return retval;
} }
@ -348,10 +348,10 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
setOperationAction(ISD::BIT_CONVERT, MVT::i32, Legal); setOperationAction(ISD::BITCAST, MVT::i32, Legal);
setOperationAction(ISD::BIT_CONVERT, MVT::f32, Legal); setOperationAction(ISD::BITCAST, MVT::f32, Legal);
setOperationAction(ISD::BIT_CONVERT, MVT::i64, Legal); setOperationAction(ISD::BITCAST, MVT::i64, Legal);
setOperationAction(ISD::BIT_CONVERT, MVT::f64, Legal); setOperationAction(ISD::BITCAST, MVT::f64, Legal);
// We cannot sextinreg(i1). Expand to shifts. // We cannot sextinreg(i1). Expand to shifts.
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
@ -550,13 +550,13 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
(128 / InVT.getSizeInBits())); (128 / InVT.getSizeInBits()));
// two sanity checks // two sanity checks
assert( LN->getAddressingMode() == ISD::UNINDEXED assert( LN->getAddressingMode() == ISD::UNINDEXED
&& "we should get only UNINDEXED adresses"); && "we should get only UNINDEXED adresses");
// clean aligned loads can be selected as-is // clean aligned loads can be selected as-is
if (InVT.getSizeInBits() == 128 && alignment == 16) if (InVT.getSizeInBits() == 128 && alignment == 16)
return SDValue(); return SDValue();
// Get pointerinfos to the memory chunk(s) that contain the data to load // Get pointerinfos to the memory chunk(s) that contain the data to load
uint64_t mpi_offset = LN->getPointerInfo().Offset; uint64_t mpi_offset = LN->getPointerInfo().Offset;
mpi_offset -= mpi_offset%16; mpi_offset -= mpi_offset%16;
MachinePointerInfo lowMemPtr(LN->getPointerInfo().V, mpi_offset); MachinePointerInfo lowMemPtr(LN->getPointerInfo().V, mpi_offset);
@ -649,7 +649,7 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
SDValue low = DAG.getLoad(MVT::i128, dl, the_chain, basePtr, SDValue low = DAG.getLoad(MVT::i128, dl, the_chain, basePtr,
lowMemPtr, lowMemPtr,
LN->isVolatile(), LN->isNonTemporal(), 16); LN->isVolatile(), LN->isNonTemporal(), 16);
// When the size is not greater than alignment we get all data with just // When the size is not greater than alignment we get all data with just
// one load // one load
if (alignment >= InVT.getSizeInBits()/8) { if (alignment >= InVT.getSizeInBits()/8) {
@ -662,30 +662,30 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
// Convert the loaded v16i8 vector to the appropriate vector type // Convert the loaded v16i8 vector to the appropriate vector type
// specified by the operand: // specified by the operand:
EVT vecVT = EVT::getVectorVT(*DAG.getContext(), EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
InVT, (128 / InVT.getSizeInBits())); InVT, (128 / InVT.getSizeInBits()));
result = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, InVT, result = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, InVT,
DAG.getNode(ISD::BIT_CONVERT, dl, vecVT, result)); DAG.getNode(ISD::BITCAST, dl, vecVT, result));
} }
// When alignment is less than the size, we might need (known only at // When alignment is less than the size, we might need (known only at
// run-time) two loads // run-time) two loads
// TODO: if the memory address is composed only from constants, we have // TODO: if the memory address is composed only from constants, we have
// extra kowledge, and might avoid the second load // extra kowledge, and might avoid the second load
else { else {
// storage position offset from lower 16 byte aligned memory chunk // storage position offset from lower 16 byte aligned memory chunk
SDValue offset = DAG.getNode(ISD::AND, dl, MVT::i32, SDValue offset = DAG.getNode(ISD::AND, dl, MVT::i32,
basePtr, DAG.getConstant( 0xf, MVT::i32 ) ); basePtr, DAG.getConstant( 0xf, MVT::i32 ) );
// 16 - offset // 16 - offset
SDValue offset_compl = DAG.getNode(ISD::SUB, dl, MVT::i32, SDValue offset_compl = DAG.getNode(ISD::SUB, dl, MVT::i32,
DAG.getConstant( 16, MVT::i32), DAG.getConstant( 16, MVT::i32),
offset ); offset );
// get a registerfull of ones. (this implementation is a workaround: LLVM // get a registerfull of ones. (this implementation is a workaround: LLVM
// cannot handle 128 bit signed int constants) // cannot handle 128 bit signed int constants)
SDValue ones = DAG.getConstant(-1, MVT::v4i32 ); SDValue ones = DAG.getConstant(-1, MVT::v4i32 );
ones = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, ones); ones = DAG.getNode(ISD::BITCAST, dl, MVT::i128, ones);
SDValue high = DAG.getLoad(MVT::i128, dl, the_chain, SDValue high = DAG.getLoad(MVT::i128, dl, the_chain,
DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getNode(ISD::ADD, dl, PtrVT,
basePtr, basePtr,
DAG.getConstant(16, PtrVT)), DAG.getConstant(16, PtrVT)),
highMemPtr, highMemPtr,
@ -695,20 +695,20 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
high.getValue(1)); high.getValue(1));
// Shift the (possible) high part right to compensate the misalignemnt. // Shift the (possible) high part right to compensate the misalignemnt.
// if there is no highpart (i.e. value is i64 and offset is 4), this // if there is no highpart (i.e. value is i64 and offset is 4), this
// will zero out the high value. // will zero out the high value.
high = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, high, high = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, high,
DAG.getNode(ISD::SUB, dl, MVT::i32, DAG.getNode(ISD::SUB, dl, MVT::i32,
DAG.getConstant( 16, MVT::i32), DAG.getConstant( 16, MVT::i32),
offset offset
)); ));
// Shift the low similarily // Shift the low similarily
// TODO: add SPUISD::SHL_BYTES // TODO: add SPUISD::SHL_BYTES
low = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, low, offset ); low = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, low, offset );
// Merge the two parts // Merge the two parts
result = DAG.getNode(ISD::BIT_CONVERT, dl, vecVT, result = DAG.getNode(ISD::BITCAST, dl, vecVT,
DAG.getNode(ISD::OR, dl, MVT::i128, low, high)); DAG.getNode(ISD::OR, dl, MVT::i128, low, high));
if (!InVT.isVector()) { if (!InVT.isVector()) {
@ -759,7 +759,7 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
SDValue result; SDValue result;
EVT vecVT = StVT.isVector()? StVT: EVT::getVectorVT(*DAG.getContext(), StVT, EVT vecVT = StVT.isVector()? StVT: EVT::getVectorVT(*DAG.getContext(), StVT,
(128 / StVT.getSizeInBits())); (128 / StVT.getSizeInBits()));
// Get pointerinfos to the memory chunk(s) that contain the data to load // Get pointerinfos to the memory chunk(s) that contain the data to load
uint64_t mpi_offset = SN->getPointerInfo().Offset; uint64_t mpi_offset = SN->getPointerInfo().Offset;
mpi_offset -= mpi_offset%16; mpi_offset -= mpi_offset%16;
MachinePointerInfo lowMemPtr(SN->getPointerInfo().V, mpi_offset); MachinePointerInfo lowMemPtr(SN->getPointerInfo().V, mpi_offset);
@ -767,7 +767,7 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
// two sanity checks // two sanity checks
assert( SN->getAddressingMode() == ISD::UNINDEXED assert( SN->getAddressingMode() == ISD::UNINDEXED
&& "we should get only UNINDEXED adresses"); && "we should get only UNINDEXED adresses");
// clean aligned loads can be selected as-is // clean aligned loads can be selected as-is
if (StVT.getSizeInBits() == 128 && alignment == 16) if (StVT.getSizeInBits() == 128 && alignment == 16)
@ -876,12 +876,12 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
SDValue insertEltOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, vecVT, SDValue insertEltOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, vecVT,
insertEltOffs); insertEltOffs);
SDValue vectorizeOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, vecVT, SDValue vectorizeOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, vecVT,
theValue); theValue);
result = DAG.getNode(SPUISD::SHUFB, dl, vecVT, result = DAG.getNode(SPUISD::SHUFB, dl, vecVT,
vectorizeOp, low, vectorizeOp, low,
DAG.getNode(ISD::BIT_CONVERT, dl, DAG.getNode(ISD::BITCAST, dl,
MVT::v4i32, insertEltOp)); MVT::v4i32, insertEltOp));
result = DAG.getStore(the_chain, dl, result, basePtr, result = DAG.getStore(the_chain, dl, result, basePtr,
@ -892,59 +892,59 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
} }
// do the store when it might cross the 16 byte memory access boundary. // do the store when it might cross the 16 byte memory access boundary.
else { else {
// TODO issue a warning if SN->isVolatile()== true? This is likely not // TODO issue a warning if SN->isVolatile()== true? This is likely not
// what the user wanted. // what the user wanted.
// address offset from nearest lower 16byte alinged address // address offset from nearest lower 16byte alinged address
SDValue offset = DAG.getNode(ISD::AND, dl, MVT::i32, SDValue offset = DAG.getNode(ISD::AND, dl, MVT::i32,
SN->getBasePtr(), SN->getBasePtr(),
DAG.getConstant(0xf, MVT::i32)); DAG.getConstant(0xf, MVT::i32));
// 16 - offset // 16 - offset
SDValue offset_compl = DAG.getNode(ISD::SUB, dl, MVT::i32, SDValue offset_compl = DAG.getNode(ISD::SUB, dl, MVT::i32,
DAG.getConstant( 16, MVT::i32), DAG.getConstant( 16, MVT::i32),
offset); offset);
SDValue hi_shift = DAG.getNode(ISD::SUB, dl, MVT::i32, SDValue hi_shift = DAG.getNode(ISD::SUB, dl, MVT::i32,
DAG.getConstant( VT.getSizeInBits()/8, DAG.getConstant( VT.getSizeInBits()/8,
MVT::i32), MVT::i32),
offset_compl); offset_compl);
// 16 - sizeof(Value) // 16 - sizeof(Value)
SDValue surplus = DAG.getNode(ISD::SUB, dl, MVT::i32, SDValue surplus = DAG.getNode(ISD::SUB, dl, MVT::i32,
DAG.getConstant( 16, MVT::i32), DAG.getConstant( 16, MVT::i32),
DAG.getConstant( VT.getSizeInBits()/8, DAG.getConstant( VT.getSizeInBits()/8,
MVT::i32)); MVT::i32));
// get a registerfull of ones // get a registerfull of ones
SDValue ones = DAG.getConstant(-1, MVT::v4i32); SDValue ones = DAG.getConstant(-1, MVT::v4i32);
ones = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, ones); ones = DAG.getNode(ISD::BITCAST, dl, MVT::i128, ones);
// Create the 128 bit masks that have ones where the data to store is // Create the 128 bit masks that have ones where the data to store is
// located. // located.
SDValue lowmask, himask; SDValue lowmask, himask;
// if the value to store don't fill up the an entire 128 bits, zero // if the value to store don't fill up the an entire 128 bits, zero
// out the last bits of the mask so that only the value we want to store // out the last bits of the mask so that only the value we want to store
// is masked. // is masked.
// this is e.g. in the case of store i32, align 2 // this is e.g. in the case of store i32, align 2
if (!VT.isVector()){ if (!VT.isVector()){
Value = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, Value); Value = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, Value);
lowmask = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, ones, surplus); lowmask = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, ones, surplus);
lowmask = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, lowmask, lowmask = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, lowmask,
surplus); surplus);
Value = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, Value); Value = DAG.getNode(ISD::BITCAST, dl, MVT::i128, Value);
Value = DAG.getNode(ISD::AND, dl, MVT::i128, Value, lowmask); Value = DAG.getNode(ISD::AND, dl, MVT::i128, Value, lowmask);
} }
else { else {
lowmask = ones; lowmask = ones;
Value = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, Value); Value = DAG.getNode(ISD::BITCAST, dl, MVT::i128, Value);
} }
// this will zero, if there are no data that goes to the high quad // this will zero, if there are no data that goes to the high quad
himask = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, lowmask, himask = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, lowmask,
offset_compl); offset_compl);
lowmask = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, lowmask, lowmask = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, lowmask,
offset); offset);
// Load in the old data and zero out the parts that will be overwritten with // Load in the old data and zero out the parts that will be overwritten with
// the new data to store. // the new data to store.
SDValue hi = DAG.getLoad(MVT::i128, dl, the_chain, SDValue hi = DAG.getLoad(MVT::i128, dl, the_chain,
DAG.getNode(ISD::ADD, dl, PtrVT, basePtr, DAG.getNode(ISD::ADD, dl, PtrVT, basePtr,
DAG.getConstant( 16, PtrVT)), DAG.getConstant( 16, PtrVT)),
highMemPtr, highMemPtr,
@ -952,40 +952,40 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
the_chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(1), the_chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(1),
hi.getValue(1)); hi.getValue(1));
low = DAG.getNode(ISD::AND, dl, MVT::i128, low = DAG.getNode(ISD::AND, dl, MVT::i128,
DAG.getNode( ISD::BIT_CONVERT, dl, MVT::i128, low), DAG.getNode( ISD::BITCAST, dl, MVT::i128, low),
DAG.getNode( ISD::XOR, dl, MVT::i128, lowmask, ones)); DAG.getNode( ISD::XOR, dl, MVT::i128, lowmask, ones));
hi = DAG.getNode(ISD::AND, dl, MVT::i128, hi = DAG.getNode(ISD::AND, dl, MVT::i128,
DAG.getNode( ISD::BIT_CONVERT, dl, MVT::i128, hi), DAG.getNode( ISD::BITCAST, dl, MVT::i128, hi),
DAG.getNode( ISD::XOR, dl, MVT::i128, himask, ones)); DAG.getNode( ISD::XOR, dl, MVT::i128, himask, ones));
// Shift the Value to store into place. rlow contains the parts that go to // Shift the Value to store into place. rlow contains the parts that go to
// the lower memory chunk, rhi has the parts that go to the upper one. // the lower memory chunk, rhi has the parts that go to the upper one.
SDValue rlow = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, Value, offset); SDValue rlow = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, Value, offset);
rlow = DAG.getNode(ISD::AND, dl, MVT::i128, rlow, lowmask); rlow = DAG.getNode(ISD::AND, dl, MVT::i128, rlow, lowmask);
SDValue rhi = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, Value, SDValue rhi = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, Value,
offset_compl); offset_compl);
// Merge the old data and the new data and store the results // Merge the old data and the new data and store the results
// Need to convert vectors here to integer as 'OR'ing floats assert // Need to convert vectors here to integer as 'OR'ing floats assert
rlow = DAG.getNode(ISD::OR, dl, MVT::i128, rlow = DAG.getNode(ISD::OR, dl, MVT::i128,
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, low), DAG.getNode(ISD::BITCAST, dl, MVT::i128, low),
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, rlow)); DAG.getNode(ISD::BITCAST, dl, MVT::i128, rlow));
rhi = DAG.getNode(ISD::OR, dl, MVT::i128, rhi = DAG.getNode(ISD::OR, dl, MVT::i128,
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, hi), DAG.getNode(ISD::BITCAST, dl, MVT::i128, hi),
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, rhi)); DAG.getNode(ISD::BITCAST, dl, MVT::i128, rhi));
low = DAG.getStore(the_chain, dl, rlow, basePtr, low = DAG.getStore(the_chain, dl, rlow, basePtr,
lowMemPtr, lowMemPtr,
SN->isVolatile(), SN->isNonTemporal(), 16); SN->isVolatile(), SN->isNonTemporal(), 16);
hi = DAG.getStore(the_chain, dl, rhi, hi = DAG.getStore(the_chain, dl, rhi,
DAG.getNode(ISD::ADD, dl, PtrVT, basePtr, DAG.getNode(ISD::ADD, dl, PtrVT, basePtr,
DAG.getConstant( 16, PtrVT)), DAG.getConstant( 16, PtrVT)),
highMemPtr, highMemPtr,
SN->isVolatile(), SN->isNonTemporal(), 16); SN->isVolatile(), SN->isNonTemporal(), 16);
result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(0), result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(0),
hi.getValue(0)); hi.getValue(0));
} }
return result; return result;
} }
@ -1095,7 +1095,7 @@ LowerConstantFP(SDValue Op, SelectionDAG &DAG) {
SDValue T = DAG.getConstant(dbits, MVT::i64); SDValue T = DAG.getConstant(dbits, MVT::i64);
SDValue Tvec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T); SDValue Tvec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T);
return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Tvec)); DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Tvec));
} }
return SDValue(); return SDValue();
@ -1194,8 +1194,8 @@ SPUTargetLowering::LowerFormalArguments(SDValue Chain,
// vararg handling: // vararg handling:
if (isVarArg) { if (isVarArg) {
// FIXME: we should be able to query the argument registers from // FIXME: we should be able to query the argument registers from
// tablegen generated code. // tablegen generated code.
static const unsigned ArgRegs[] = { static const unsigned ArgRegs[] = {
SPU::R3, SPU::R4, SPU::R5, SPU::R6, SPU::R7, SPU::R8, SPU::R9, SPU::R3, SPU::R4, SPU::R5, SPU::R6, SPU::R7, SPU::R8, SPU::R9,
SPU::R10, SPU::R11, SPU::R12, SPU::R13, SPU::R14, SPU::R15, SPU::R16, SPU::R10, SPU::R11, SPU::R12, SPU::R13, SPU::R14, SPU::R15, SPU::R16,
@ -1270,10 +1270,10 @@ SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
SmallVector<CCValAssign, 16> ArgLocs; SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs, CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
*DAG.getContext()); *DAG.getContext());
// FIXME: allow for other calling conventions // FIXME: allow for other calling conventions
CCInfo.AnalyzeCallOperands(Outs, CCC_SPU); CCInfo.AnalyzeCallOperands(Outs, CCC_SPU);
const unsigned NumArgRegs = ArgLocs.size(); const unsigned NumArgRegs = ArgLocs.size();
@ -1438,7 +1438,7 @@ SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// If the call has results, copy the values out of the ret val registers. // If the call has results, copy the values out of the ret val registers.
for (unsigned i = 0; i != RVLocs.size(); ++i) { for (unsigned i = 0; i != RVLocs.size(); ++i) {
CCValAssign VA = RVLocs[i]; CCValAssign VA = RVLocs[i];
SDValue Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(), SDValue Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
InFlag); InFlag);
Chain = Val.getValue(1); Chain = Val.getValue(1);
@ -1671,7 +1671,7 @@ LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
&& "LowerBUILD_VECTOR: Unexpected floating point vector element."); && "LowerBUILD_VECTOR: Unexpected floating point vector element.");
// NOTE: pretend the constant is an integer. LLVM won't load FP constants // NOTE: pretend the constant is an integer. LLVM won't load FP constants
SDValue T = DAG.getConstant(Value32, MVT::i32); SDValue T = DAG.getConstant(Value32, MVT::i32);
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32, return DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, T,T,T,T)); DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, T,T,T,T));
break; break;
} }
@ -1681,7 +1681,7 @@ LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
&& "LowerBUILD_VECTOR: 64-bit float vector size > 8 bytes."); && "LowerBUILD_VECTOR: 64-bit float vector size > 8 bytes.");
// NOTE: pretend the constant is an integer. LLVM won't load FP constants // NOTE: pretend the constant is an integer. LLVM won't load FP constants
SDValue T = DAG.getConstant(f64val, MVT::i64); SDValue T = DAG.getConstant(f64val, MVT::i64);
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, return DAG.getNode(ISD::BITCAST, dl, MVT::v2f64,
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T)); DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T));
break; break;
} }
@ -1691,7 +1691,7 @@ LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
SmallVector<SDValue, 8> Ops; SmallVector<SDValue, 8> Ops;
Ops.assign(8, DAG.getConstant(Value16, MVT::i16)); Ops.assign(8, DAG.getConstant(Value16, MVT::i16));
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, return DAG.getNode(ISD::BITCAST, dl, VT,
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i16, &Ops[0], Ops.size())); DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i16, &Ops[0], Ops.size()));
} }
case MVT::v8i16: { case MVT::v8i16: {
@ -1725,7 +1725,7 @@ SPU::LowerV2I64Splat(EVT OpVT, SelectionDAG& DAG, uint64_t SplatVal,
if (upper == lower) { if (upper == lower) {
// Magic constant that can be matched by IL, ILA, et. al. // Magic constant that can be matched by IL, ILA, et. al.
SDValue Val = DAG.getTargetConstant(upper, MVT::i32); SDValue Val = DAG.getTargetConstant(upper, MVT::i32);
return DAG.getNode(ISD::BIT_CONVERT, dl, OpVT, return DAG.getNode(ISD::BITCAST, dl, OpVT,
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
Val, Val, Val, Val)); Val, Val, Val, Val));
} else { } else {
@ -1754,7 +1754,7 @@ SPU::LowerV2I64Splat(EVT OpVT, SelectionDAG& DAG, uint64_t SplatVal,
// Create lower vector if not a special pattern // Create lower vector if not a special pattern
if (!lower_special) { if (!lower_special) {
SDValue LO32C = DAG.getConstant(lower, MVT::i32); SDValue LO32C = DAG.getConstant(lower, MVT::i32);
LO32 = DAG.getNode(ISD::BIT_CONVERT, dl, OpVT, LO32 = DAG.getNode(ISD::BITCAST, dl, OpVT,
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
LO32C, LO32C, LO32C, LO32C)); LO32C, LO32C, LO32C, LO32C));
} }
@ -1762,7 +1762,7 @@ SPU::LowerV2I64Splat(EVT OpVT, SelectionDAG& DAG, uint64_t SplatVal,
// Create upper vector if not a special pattern // Create upper vector if not a special pattern
if (!upper_special) { if (!upper_special) {
SDValue HI32C = DAG.getConstant(upper, MVT::i32); SDValue HI32C = DAG.getConstant(upper, MVT::i32);
HI32 = DAG.getNode(ISD::BIT_CONVERT, dl, OpVT, HI32 = DAG.getNode(ISD::BITCAST, dl, OpVT,
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
HI32C, HI32C, HI32C, HI32C)); HI32C, HI32C, HI32C, HI32C));
} }
@ -1846,7 +1846,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
if (EltVT == MVT::i8) { if (EltVT == MVT::i8) {
V2EltIdx0 = 16; V2EltIdx0 = 16;
maskVT = MVT::v16i8; maskVT = MVT::v16i8;
} else if (EltVT == MVT::i16) { } else if (EltVT == MVT::i16) {
V2EltIdx0 = 8; V2EltIdx0 = 8;
maskVT = MVT::v8i16; maskVT = MVT::v8i16;
@ -1862,7 +1862,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
for (unsigned i = 0; i != MaxElts; ++i) { for (unsigned i = 0; i != MaxElts; ++i) {
if (SVN->getMaskElt(i) < 0) if (SVN->getMaskElt(i) < 0)
continue; continue;
unsigned SrcElt = SVN->getMaskElt(i); unsigned SrcElt = SVN->getMaskElt(i);
if (monotonic) { if (monotonic) {
@ -1909,7 +1909,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT, SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
DAG.getRegister(SPU::R1, PtrVT), DAG.getRegister(SPU::R1, PtrVT),
DAG.getConstant(V2EltOffset, MVT::i32)); DAG.getConstant(V2EltOffset, MVT::i32));
SDValue ShufMaskOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, SDValue ShufMaskOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl,
maskVT, Pointer); maskVT, Pointer);
// Use shuffle mask in SHUFB synthetic instruction: // Use shuffle mask in SHUFB synthetic instruction:
@ -2173,7 +2173,7 @@ static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
DAG.getRegister(SPU::R1, PtrVT), DAG.getRegister(SPU::R1, PtrVT),
DAG.getConstant(Offset, PtrVT)); DAG.getConstant(Offset, PtrVT));
// widen the mask when dealing with half vectors // widen the mask when dealing with half vectors
EVT maskVT = EVT::getVectorVT(*(DAG.getContext()), VT.getVectorElementType(), EVT maskVT = EVT::getVectorVT(*(DAG.getContext()), VT.getVectorElementType(),
128/ VT.getVectorElementType().getSizeInBits()); 128/ VT.getVectorElementType().getSizeInBits());
SDValue ShufMask = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, maskVT, Pointer); SDValue ShufMask = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, maskVT, Pointer);
@ -2181,7 +2181,7 @@ static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
DAG.getNode(SPUISD::SHUFB, dl, VT, DAG.getNode(SPUISD::SHUFB, dl, VT,
DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, ValOp), DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, ValOp),
VecOp, VecOp,
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4i32, ShufMask)); DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, ShufMask));
return result; return result;
} }
@ -2301,12 +2301,12 @@ LowerByteImmed(SDValue Op, SelectionDAG &DAG) {
ConstVec = Op.getOperand(0); ConstVec = Op.getOperand(0);
Arg = Op.getOperand(1); Arg = Op.getOperand(1);
if (ConstVec.getNode()->getOpcode() != ISD::BUILD_VECTOR) { if (ConstVec.getNode()->getOpcode() != ISD::BUILD_VECTOR) {
if (ConstVec.getNode()->getOpcode() == ISD::BIT_CONVERT) { if (ConstVec.getNode()->getOpcode() == ISD::BITCAST) {
ConstVec = ConstVec.getOperand(0); ConstVec = ConstVec.getOperand(0);
} else { } else {
ConstVec = Op.getOperand(1); ConstVec = Op.getOperand(1);
Arg = Op.getOperand(0); Arg = Op.getOperand(0);
if (ConstVec.getNode()->getOpcode() == ISD::BIT_CONVERT) { if (ConstVec.getNode()->getOpcode() == ISD::BITCAST) {
ConstVec = ConstVec.getOperand(0); ConstVec = ConstVec.getOperand(0);
} }
} }
@ -2347,7 +2347,7 @@ LowerByteImmed(SDValue Op, SelectionDAG &DAG) {
*/ */
static SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) { static SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) {
EVT VT = Op.getValueType(); EVT VT = Op.getValueType();
EVT vecVT = EVT::getVectorVT(*DAG.getContext(), EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
VT, (128 / VT.getSizeInBits())); VT, (128 / VT.getSizeInBits()));
DebugLoc dl = Op.getDebugLoc(); DebugLoc dl = Op.getDebugLoc();
@ -2523,7 +2523,7 @@ static SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG,
// Take advantage of the fact that (truncate (sra arg, 32)) is efficiently // Take advantage of the fact that (truncate (sra arg, 32)) is efficiently
// selected to a NOP: // selected to a NOP:
SDValue i64lhs = DAG.getNode(ISD::BIT_CONVERT, dl, IntVT, lhs); SDValue i64lhs = DAG.getNode(ISD::BITCAST, dl, IntVT, lhs);
SDValue lhsHi32 = SDValue lhsHi32 =
DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
DAG.getNode(ISD::SRL, dl, IntVT, DAG.getNode(ISD::SRL, dl, IntVT,
@ -2557,7 +2557,7 @@ static SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG,
ISD::SETGT)); ISD::SETGT));
} }
SDValue i64rhs = DAG.getNode(ISD::BIT_CONVERT, dl, IntVT, rhs); SDValue i64rhs = DAG.getNode(ISD::BITCAST, dl, IntVT, rhs);
SDValue rhsHi32 = SDValue rhsHi32 =
DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
DAG.getNode(ISD::SRL, dl, IntVT, DAG.getNode(ISD::SRL, dl, IntVT,
@ -2671,7 +2671,7 @@ static SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG)
// Type to truncate to // Type to truncate to
EVT VT = Op.getValueType(); EVT VT = Op.getValueType();
MVT simpleVT = VT.getSimpleVT(); MVT simpleVT = VT.getSimpleVT();
EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EVT VecVT = EVT::getVectorVT(*DAG.getContext(),
VT, (128 / VT.getSizeInBits())); VT, (128 / VT.getSizeInBits()));
DebugLoc dl = Op.getDebugLoc(); DebugLoc dl = Op.getDebugLoc();
@ -2745,16 +2745,16 @@ static SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG)
DAG.getConstant(31, MVT::i32)); DAG.getConstant(31, MVT::i32));
// reinterpret as a i128 (SHUFB requires it). This gets lowered away. // reinterpret as a i128 (SHUFB requires it). This gets lowered away.
SDValue extended = SDValue(DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS, SDValue extended = SDValue(DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
dl, Op0VT, Op0, dl, Op0VT, Op0,
DAG.getTargetConstant( DAG.getTargetConstant(
SPU::GPRCRegClass.getID(), SPU::GPRCRegClass.getID(),
MVT::i32)), 0); MVT::i32)), 0);
// Shuffle bytes - Copy the sign bits into the upper 64 bits // Shuffle bytes - Copy the sign bits into the upper 64 bits
// and the input value into the lower 64 bits. // and the input value into the lower 64 bits.
SDValue extShuffle = DAG.getNode(SPUISD::SHUFB, dl, mvt, SDValue extShuffle = DAG.getNode(SPUISD::SHUFB, dl, mvt,
extended, sraVal, shufMask); extended, sraVal, shufMask);
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, extShuffle); return DAG.getNode(ISD::BITCAST, dl, MVT::i128, extShuffle);
} }
//! Custom (target-specific) lowering entry point //! Custom (target-specific) lowering entry point
@ -3234,14 +3234,14 @@ bool SPUTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
return isInt<10>(Imm); return isInt<10>(Imm);
} }
bool bool
SPUTargetLowering::isLegalAddressingMode(const AddrMode &AM, SPUTargetLowering::isLegalAddressingMode(const AddrMode &AM,
const Type * ) const{ const Type * ) const{
// A-form: 18bit absolute address. // A-form: 18bit absolute address.
if (AM.BaseGV && !AM.HasBaseReg && AM.Scale == 0 && AM.BaseOffs == 0) if (AM.BaseGV && !AM.HasBaseReg && AM.Scale == 0 && AM.BaseOffs == 0)
return true; return true;
// D-form: reg + 14bit offset // D-form: reg + 14bit offset
if (AM.BaseGV ==0 && AM.HasBaseReg && AM.Scale == 0 && isInt<14>(AM.BaseOffs)) if (AM.BaseGV ==0 && AM.HasBaseReg && AM.Scale == 0 && isInt<14>(AM.BaseOffs))
return true; return true;

View File

@ -116,8 +116,8 @@ MBlazeTargetLowering::MBlazeTargetLowering(MBlazeTargetMachine &TM)
} }
// Expand unsupported conversions // Expand unsupported conversions
setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand); setOperationAction(ISD::BITCAST, MVT::f32, Expand);
setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand); setOperationAction(ISD::BITCAST, MVT::i32, Expand);
// Expand SELECT_CC // Expand SELECT_CC
setOperationAction(ISD::SELECT_CC, MVT::Other, Expand); setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
@ -926,8 +926,8 @@ MBlazeTargetLowering::getSingleConstraintMatchWeight(
default: default:
weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
break; break;
case 'd': case 'd':
case 'y': case 'y':
if (type->isIntegerTy()) if (type->isIntegerTy())
weight = CW_Register; weight = CW_Register;
break; break;

View File

@ -57,7 +57,7 @@ MipsTargetLowering(MipsTargetMachine &TM)
Subtarget = &TM.getSubtarget<MipsSubtarget>(); Subtarget = &TM.getSubtarget<MipsSubtarget>();
// Mips does not have i1 type, so use i32 for // Mips does not have i1 type, so use i32 for
// setcc operations results (slt, sgt, ...). // setcc operations results (slt, sgt, ...).
setBooleanContents(ZeroOrOneBooleanContent); setBooleanContents(ZeroOrOneBooleanContent);
// Set up the register classes // Set up the register classes
@ -69,7 +69,7 @@ MipsTargetLowering(MipsTargetMachine &TM)
if (!Subtarget->isFP64bit()) if (!Subtarget->isFP64bit())
addRegisterClass(MVT::f64, Mips::AFGR64RegisterClass); addRegisterClass(MVT::f64, Mips::AFGR64RegisterClass);
// Load extented operations for i1 types must be promoted // Load extented operations for i1 types must be promoted
setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote); setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote); setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
@ -78,9 +78,9 @@ MipsTargetLowering(MipsTargetMachine &TM)
setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
setTruncStoreAction(MVT::f64, MVT::f32, Expand); setTruncStoreAction(MVT::f64, MVT::f32, Expand);
// Used by legalize types to correctly generate the setcc result. // Used by legalize types to correctly generate the setcc result.
// Without this, every float setcc comes with a AND/OR with the result, // Without this, every float setcc comes with a AND/OR with the result,
// we don't want this, since the fpcmp result goes to a flag register, // we don't want this, since the fpcmp result goes to a flag register,
// which is used implicitly by brcond and select operations. // which is used implicitly by brcond and select operations.
AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32); AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
@ -100,8 +100,8 @@ MipsTargetLowering(MipsTargetMachine &TM)
setOperationAction(ISD::VASTART, MVT::Other, Custom); setOperationAction(ISD::VASTART, MVT::Other, Custom);
// We custom lower AND/OR to handle the case where the DAG contain 'ands/ors' // We custom lower AND/OR to handle the case where the DAG contain 'ands/ors'
// with operands comming from setcc fp comparions. This is necessary since // with operands comming from setcc fp comparions. This is necessary since
// the result from these setcc are in a flag registers (FCR31). // the result from these setcc are in a flag registers (FCR31).
setOperationAction(ISD::AND, MVT::i32, Custom); setOperationAction(ISD::AND, MVT::i32, Custom);
setOperationAction(ISD::OR, MVT::i32, Custom); setOperationAction(ISD::OR, MVT::i32, Custom);
@ -168,7 +168,7 @@ unsigned MipsTargetLowering::getFunctionAlignment(const Function *) const {
SDValue MipsTargetLowering:: SDValue MipsTargetLowering::
LowerOperation(SDValue Op, SelectionDAG &DAG) const LowerOperation(SDValue Op, SelectionDAG &DAG) const
{ {
switch (Op.getOpcode()) switch (Op.getOpcode())
{ {
case ISD::AND: return LowerANDOR(Op, DAG); case ISD::AND: return LowerANDOR(Op, DAG);
case ISD::BRCOND: return LowerBRCOND(Op, DAG); case ISD::BRCOND: return LowerBRCOND(Op, DAG);
@ -194,7 +194,7 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const
// MachineFunction as a live in value. It also creates a corresponding // MachineFunction as a live in value. It also creates a corresponding
// virtual register for it. // virtual register for it.
static unsigned static unsigned
AddLiveIn(MachineFunction &MF, unsigned PReg, TargetRegisterClass *RC) AddLiveIn(MachineFunction &MF, unsigned PReg, TargetRegisterClass *RC)
{ {
assert(RC->contains(PReg) && "Not the correct regclass!"); assert(RC->contains(PReg) && "Not the correct regclass!");
unsigned VReg = MF.getRegInfo().createVirtualRegister(RC); unsigned VReg = MF.getRegInfo().createVirtualRegister(RC);
@ -212,7 +212,7 @@ static Mips::FPBranchCode GetFPBranchCodeFromCond(Mips::CondCode CC) {
return Mips::BRANCH_INVALID; return Mips::BRANCH_INVALID;
} }
static unsigned FPBranchCodeToOpc(Mips::FPBranchCode BC) { static unsigned FPBranchCodeToOpc(Mips::FPBranchCode BC) {
switch(BC) { switch(BC) {
default: default:
@ -227,24 +227,24 @@ static unsigned FPBranchCodeToOpc(Mips::FPBranchCode BC) {
static Mips::CondCode FPCondCCodeToFCC(ISD::CondCode CC) { static Mips::CondCode FPCondCCodeToFCC(ISD::CondCode CC) {
switch (CC) { switch (CC) {
default: llvm_unreachable("Unknown fp condition code!"); default: llvm_unreachable("Unknown fp condition code!");
case ISD::SETEQ: case ISD::SETEQ:
case ISD::SETOEQ: return Mips::FCOND_EQ; case ISD::SETOEQ: return Mips::FCOND_EQ;
case ISD::SETUNE: return Mips::FCOND_OGL; case ISD::SETUNE: return Mips::FCOND_OGL;
case ISD::SETLT: case ISD::SETLT:
case ISD::SETOLT: return Mips::FCOND_OLT; case ISD::SETOLT: return Mips::FCOND_OLT;
case ISD::SETGT: case ISD::SETGT:
case ISD::SETOGT: return Mips::FCOND_OGT; case ISD::SETOGT: return Mips::FCOND_OGT;
case ISD::SETLE: case ISD::SETLE:
case ISD::SETOLE: return Mips::FCOND_OLE; case ISD::SETOLE: return Mips::FCOND_OLE;
case ISD::SETGE: case ISD::SETGE:
case ISD::SETOGE: return Mips::FCOND_OGE; case ISD::SETOGE: return Mips::FCOND_OGE;
case ISD::SETULT: return Mips::FCOND_ULT; case ISD::SETULT: return Mips::FCOND_ULT;
case ISD::SETULE: return Mips::FCOND_ULE; case ISD::SETULE: return Mips::FCOND_ULE;
case ISD::SETUGT: return Mips::FCOND_UGT; case ISD::SETUGT: return Mips::FCOND_UGT;
case ISD::SETUGE: return Mips::FCOND_UGE; case ISD::SETUGE: return Mips::FCOND_UGE;
case ISD::SETUO: return Mips::FCOND_UN; case ISD::SETUO: return Mips::FCOND_UN;
case ISD::SETO: return Mips::FCOND_OR; case ISD::SETO: return Mips::FCOND_OR;
case ISD::SETNE: case ISD::SETNE:
case ISD::SETONE: return Mips::FCOND_NEQ; case ISD::SETONE: return Mips::FCOND_NEQ;
case ISD::SETUEQ: return Mips::FCOND_UEQ; case ISD::SETUEQ: return Mips::FCOND_UEQ;
} }
@ -364,7 +364,7 @@ LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const
// Emit the round instruction and bit convert to integer // Emit the round instruction and bit convert to integer
SDValue Trunc = DAG.getNode(MipsISD::FPRound, dl, MVT::f32, SDValue Trunc = DAG.getNode(MipsISD::FPRound, dl, MVT::f32,
Src, CondReg.getValue(1)); Src, CondReg.getValue(1));
SDValue BitCvt = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Trunc); SDValue BitCvt = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Trunc);
return BitCvt; return BitCvt;
} }
@ -382,11 +382,11 @@ LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
// obtain the new stack size. // obtain the new stack size.
SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, StackPointer, Size); SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, StackPointer, Size);
// The Sub result contains the new stack start address, so it // The Sub result contains the new stack start address, so it
// must be placed in the stack pointer register. // must be placed in the stack pointer register.
Chain = DAG.getCopyToReg(StackPointer.getValue(1), dl, Mips::SP, Sub); Chain = DAG.getCopyToReg(StackPointer.getValue(1), dl, Mips::SP, Sub);
// This node always has two return values: a new stack pointer // This node always has two return values: a new stack pointer
// value and a chain // value and a chain
SDValue Ops[2] = { Sub, Chain }; SDValue Ops[2] = { Sub, Chain };
return DAG.getMergeValues(Ops, 2, dl); return DAG.getMergeValues(Ops, 2, dl);
@ -405,9 +405,9 @@ LowerANDOR(SDValue Op, SelectionDAG &DAG) const
SDValue True = DAG.getConstant(1, MVT::i32); SDValue True = DAG.getConstant(1, MVT::i32);
SDValue False = DAG.getConstant(0, MVT::i32); SDValue False = DAG.getConstant(0, MVT::i32);
SDValue LSEL = DAG.getNode(MipsISD::FPSelectCC, dl, True.getValueType(), SDValue LSEL = DAG.getNode(MipsISD::FPSelectCC, dl, True.getValueType(),
LHS, True, False, LHS.getOperand(2)); LHS, True, False, LHS.getOperand(2));
SDValue RSEL = DAG.getNode(MipsISD::FPSelectCC, dl, True.getValueType(), SDValue RSEL = DAG.getNode(MipsISD::FPSelectCC, dl, True.getValueType(),
RHS, True, False, RHS.getOperand(2)); RHS, True, False, RHS.getOperand(2));
return DAG.getNode(Op.getOpcode(), dl, MVT::i32, LSEL, RSEL); return DAG.getNode(Op.getOpcode(), dl, MVT::i32, LSEL, RSEL);
@ -416,7 +416,7 @@ LowerANDOR(SDValue Op, SelectionDAG &DAG) const
SDValue MipsTargetLowering:: SDValue MipsTargetLowering::
LowerBRCOND(SDValue Op, SelectionDAG &DAG) const LowerBRCOND(SDValue Op, SelectionDAG &DAG) const
{ {
// The first operand is the chain, the second is the condition, the third is // The first operand is the chain, the second is the condition, the third is
// the block to branch to if the condition is true. // the block to branch to if the condition is true.
SDValue Chain = Op.getOperand(0); SDValue Chain = Op.getOperand(0);
SDValue Dest = Op.getOperand(2); SDValue Dest = Op.getOperand(2);
@ -424,55 +424,55 @@ LowerBRCOND(SDValue Op, SelectionDAG &DAG) const
if (Op.getOperand(1).getOpcode() != MipsISD::FPCmp) if (Op.getOperand(1).getOpcode() != MipsISD::FPCmp)
return Op; return Op;
SDValue CondRes = Op.getOperand(1); SDValue CondRes = Op.getOperand(1);
SDValue CCNode = CondRes.getOperand(2); SDValue CCNode = CondRes.getOperand(2);
Mips::CondCode CC = Mips::CondCode CC =
(Mips::CondCode)cast<ConstantSDNode>(CCNode)->getZExtValue(); (Mips::CondCode)cast<ConstantSDNode>(CCNode)->getZExtValue();
SDValue BrCode = DAG.getConstant(GetFPBranchCodeFromCond(CC), MVT::i32); SDValue BrCode = DAG.getConstant(GetFPBranchCodeFromCond(CC), MVT::i32);
return DAG.getNode(MipsISD::FPBrcond, dl, Op.getValueType(), Chain, BrCode, return DAG.getNode(MipsISD::FPBrcond, dl, Op.getValueType(), Chain, BrCode,
Dest, CondRes); Dest, CondRes);
} }
SDValue MipsTargetLowering:: SDValue MipsTargetLowering::
LowerSETCC(SDValue Op, SelectionDAG &DAG) const LowerSETCC(SDValue Op, SelectionDAG &DAG) const
{ {
// The operands to this are the left and right operands to compare (ops #0, // The operands to this are the left and right operands to compare (ops #0,
// and #1) and the condition code to compare them with (op #2) as a // and #1) and the condition code to compare them with (op #2) as a
// CondCodeSDNode. // CondCodeSDNode.
SDValue LHS = Op.getOperand(0); SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1); SDValue RHS = Op.getOperand(1);
DebugLoc dl = Op.getDebugLoc(); DebugLoc dl = Op.getDebugLoc();
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
return DAG.getNode(MipsISD::FPCmp, dl, Op.getValueType(), LHS, RHS, return DAG.getNode(MipsISD::FPCmp, dl, Op.getValueType(), LHS, RHS,
DAG.getConstant(FPCondCCodeToFCC(CC), MVT::i32)); DAG.getConstant(FPCondCCodeToFCC(CC), MVT::i32));
} }
SDValue MipsTargetLowering:: SDValue MipsTargetLowering::
LowerSELECT(SDValue Op, SelectionDAG &DAG) const LowerSELECT(SDValue Op, SelectionDAG &DAG) const
{ {
SDValue Cond = Op.getOperand(0); SDValue Cond = Op.getOperand(0);
SDValue True = Op.getOperand(1); SDValue True = Op.getOperand(1);
SDValue False = Op.getOperand(2); SDValue False = Op.getOperand(2);
DebugLoc dl = Op.getDebugLoc(); DebugLoc dl = Op.getDebugLoc();
// if the incomming condition comes from a integer compare, the select // if the incomming condition comes from a integer compare, the select
// operation must be SelectCC or a conditional move if the subtarget // operation must be SelectCC or a conditional move if the subtarget
// supports it. // supports it.
if (Cond.getOpcode() != MipsISD::FPCmp) { if (Cond.getOpcode() != MipsISD::FPCmp) {
if (Subtarget->hasCondMov() && !True.getValueType().isFloatingPoint()) if (Subtarget->hasCondMov() && !True.getValueType().isFloatingPoint())
return Op; return Op;
return DAG.getNode(MipsISD::SelectCC, dl, True.getValueType(), return DAG.getNode(MipsISD::SelectCC, dl, True.getValueType(),
Cond, True, False); Cond, True, False);
} }
// if the incomming condition comes from fpcmp, the select // if the incomming condition comes from fpcmp, the select
// operation must use FPSelectCC. // operation must use FPSelectCC.
SDValue CCNode = Cond.getOperand(2); SDValue CCNode = Cond.getOperand(2);
return DAG.getNode(MipsISD::FPSelectCC, dl, True.getValueType(), return DAG.getNode(MipsISD::FPSelectCC, dl, True.getValueType(),
Cond, True, False, CCNode); Cond, True, False, CCNode);
} }
@ -484,16 +484,16 @@ SDValue MipsTargetLowering::LowerGlobalAddress(SDValue Op,
if (getTargetMachine().getRelocationModel() != Reloc::PIC_) { if (getTargetMachine().getRelocationModel() != Reloc::PIC_) {
SDVTList VTs = DAG.getVTList(MVT::i32); SDVTList VTs = DAG.getVTList(MVT::i32);
MipsTargetObjectFile &TLOF = (MipsTargetObjectFile&)getObjFileLowering(); MipsTargetObjectFile &TLOF = (MipsTargetObjectFile&)getObjFileLowering();
// %gp_rel relocation // %gp_rel relocation
if (TLOF.IsGlobalInSmallSection(GV, getTargetMachine())) { if (TLOF.IsGlobalInSmallSection(GV, getTargetMachine())) {
SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, 0, SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, 0,
MipsII::MO_GPREL); MipsII::MO_GPREL);
SDValue GPRelNode = DAG.getNode(MipsISD::GPRel, dl, VTs, &GA, 1); SDValue GPRelNode = DAG.getNode(MipsISD::GPRel, dl, VTs, &GA, 1);
SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(MVT::i32); SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(MVT::i32);
return DAG.getNode(ISD::ADD, dl, MVT::i32, GOT, GPRelNode); return DAG.getNode(ISD::ADD, dl, MVT::i32, GOT, GPRelNode);
} }
// %hi/%lo relocation // %hi/%lo relocation
SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, 0, SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, 0,
@ -505,7 +505,7 @@ SDValue MipsTargetLowering::LowerGlobalAddress(SDValue Op,
} else { } else {
SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, 0, SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, 0,
MipsII::MO_GOT); MipsII::MO_GOT);
SDValue ResNode = DAG.getLoad(MVT::i32, dl, SDValue ResNode = DAG.getLoad(MVT::i32, dl,
DAG.getEntryNode(), GA, MachinePointerInfo(), DAG.getEntryNode(), GA, MachinePointerInfo(),
false, false, 0); false, false, 0);
// On functions and global targets not internal linked only // On functions and global targets not internal linked only
@ -531,7 +531,7 @@ SDValue MipsTargetLowering::
LowerJumpTable(SDValue Op, SelectionDAG &DAG) const LowerJumpTable(SDValue Op, SelectionDAG &DAG) const
{ {
SDValue ResNode; SDValue ResNode;
SDValue HiPart; SDValue HiPart;
// FIXME there isn't actually debug info here // FIXME there isn't actually debug info here
DebugLoc dl = Op.getDebugLoc(); DebugLoc dl = Op.getDebugLoc();
bool IsPIC = getTargetMachine().getRelocationModel() == Reloc::PIC_; bool IsPIC = getTargetMachine().getRelocationModel() == Reloc::PIC_;
@ -566,25 +566,25 @@ LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
DebugLoc dl = Op.getDebugLoc(); DebugLoc dl = Op.getDebugLoc();
// gp_rel relocation // gp_rel relocation
// FIXME: we should reference the constant pool using small data sections, // FIXME: we should reference the constant pool using small data sections,
// but the asm printer currently doens't support this feature without // but the asm printer currently doens't support this feature without
// hacking it. This feature should come soon so we can uncomment the // hacking it. This feature should come soon so we can uncomment the
// stuff below. // stuff below.
//if (IsInSmallSection(C->getType())) { //if (IsInSmallSection(C->getType())) {
// SDValue GPRelNode = DAG.getNode(MipsISD::GPRel, MVT::i32, CP); // SDValue GPRelNode = DAG.getNode(MipsISD::GPRel, MVT::i32, CP);
// SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(MVT::i32); // SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(MVT::i32);
// ResNode = DAG.getNode(ISD::ADD, MVT::i32, GOT, GPRelNode); // ResNode = DAG.getNode(ISD::ADD, MVT::i32, GOT, GPRelNode);
if (getTargetMachine().getRelocationModel() != Reloc::PIC_) { if (getTargetMachine().getRelocationModel() != Reloc::PIC_) {
SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment(), SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment(),
N->getOffset(), MipsII::MO_ABS_HILO); N->getOffset(), MipsII::MO_ABS_HILO);
SDValue HiPart = DAG.getNode(MipsISD::Hi, dl, MVT::i32, CP); SDValue HiPart = DAG.getNode(MipsISD::Hi, dl, MVT::i32, CP);
SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, CP); SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, CP);
ResNode = DAG.getNode(ISD::ADD, dl, MVT::i32, HiPart, Lo); ResNode = DAG.getNode(ISD::ADD, dl, MVT::i32, HiPart, Lo);
} else { } else {
SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment(), SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment(),
N->getOffset(), MipsII::MO_GOT); N->getOffset(), MipsII::MO_GOT);
SDValue Load = DAG.getLoad(MVT::i32, dl, DAG.getEntryNode(), SDValue Load = DAG.getLoad(MVT::i32, dl, DAG.getEntryNode(),
CP, MachinePointerInfo::getConstantPool(), CP, MachinePointerInfo::getConstantPool(),
false, false, 0); false, false, 0);
SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, CP); SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, CP);
@ -617,14 +617,14 @@ SDValue MipsTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
#include "MipsGenCallingConv.inc" #include "MipsGenCallingConv.inc"
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// TODO: Implement a generic logic using tblgen that can support this. // TODO: Implement a generic logic using tblgen that can support this.
// Mips O32 ABI rules: // Mips O32 ABI rules:
// --- // ---
// i32 - Passed in A0, A1, A2, A3 and stack // i32 - Passed in A0, A1, A2, A3 and stack
// f32 - Only passed in f32 registers if no int reg has been used yet to hold // f32 - Only passed in f32 registers if no int reg has been used yet to hold
// an argument. Otherwise, passed in A1, A2, A3 and stack. // an argument. Otherwise, passed in A1, A2, A3 and stack.
// f64 - Only passed in two aliased f32 registers if no int reg has been used // f64 - Only passed in two aliased f32 registers if no int reg has been used
// yet to hold an argument. Otherwise, use A2, A3 and stack. If A1 is // yet to hold an argument. Otherwise, use A2, A3 and stack. If A1 is
// not used, it must be shadowed. If only A3 is avaiable, shadow it and // not used, it must be shadowed. If only A3 is avaiable, shadow it and
// go to stack. // go to stack.
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
@ -633,7 +633,7 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT,
MVT LocVT, CCValAssign::LocInfo LocInfo, MVT LocVT, CCValAssign::LocInfo LocInfo,
ISD::ArgFlagsTy ArgFlags, CCState &State) { ISD::ArgFlagsTy ArgFlags, CCState &State) {
static const unsigned IntRegsSize=4, FloatRegsSize=2; static const unsigned IntRegsSize=4, FloatRegsSize=2;
static const unsigned IntRegs[] = { static const unsigned IntRegs[] = {
Mips::A0, Mips::A1, Mips::A2, Mips::A3 Mips::A0, Mips::A1, Mips::A2, Mips::A3
@ -681,7 +681,7 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT,
Reg = Mips::A2; Reg = Mips::A2;
for (;UnallocIntReg < IntRegsSize; ++UnallocIntReg) for (;UnallocIntReg < IntRegsSize; ++UnallocIntReg)
State.AllocateReg(UnallocIntReg); State.AllocateReg(UnallocIntReg);
} }
LocVT = MVT::i32; LocVT = MVT::i32;
} }
@ -739,7 +739,7 @@ static bool CC_MipsO32_VarArgs(unsigned ValNo, MVT ValVT,
IntRegs[UnallocIntReg] == (unsigned (Mips::A2))) { IntRegs[UnallocIntReg] == (unsigned (Mips::A2))) {
unsigned Reg = State.AllocateReg(IntRegs, IntRegsSize); unsigned Reg = State.AllocateReg(IntRegs, IntRegsSize);
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, MVT::i32, LocInfo)); State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, MVT::i32, LocInfo));
// Shadow the next register so it can be used // Shadow the next register so it can be used
// later to get the other 32bit part. // later to get the other 32bit part.
State.AllocateReg(IntRegs, IntRegsSize); State.AllocateReg(IntRegs, IntRegsSize);
return false; return false;
@ -791,11 +791,11 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
if (Subtarget->isABI_O32()) { if (Subtarget->isABI_O32()) {
int VTsize = MVT(MVT::i32).getSizeInBits()/8; int VTsize = MVT(MVT::i32).getSizeInBits()/8;
MFI->CreateFixedObject(VTsize, (VTsize*3), true); MFI->CreateFixedObject(VTsize, (VTsize*3), true);
CCInfo.AnalyzeCallOperands(Outs, CCInfo.AnalyzeCallOperands(Outs,
isVarArg ? CC_MipsO32_VarArgs : CC_MipsO32); isVarArg ? CC_MipsO32_VarArgs : CC_MipsO32);
} else } else
CCInfo.AnalyzeCallOperands(Outs, CC_Mips); CCInfo.AnalyzeCallOperands(Outs, CC_Mips);
// Get a count of how many bytes are to be pushed on the stack. // Get a count of how many bytes are to be pushed on the stack.
unsigned NumBytes = CCInfo.getNextStackOffset(); unsigned NumBytes = CCInfo.getNextStackOffset();
Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
@ -804,7 +804,7 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass; SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass;
SmallVector<SDValue, 8> MemOpChains; SmallVector<SDValue, 8> MemOpChains;
// First/LastArgStackLoc contains the first/last // First/LastArgStackLoc contains the first/last
// "at stack" argument location. // "at stack" argument location.
int LastArgStackLoc = 0; int LastArgStackLoc = 0;
unsigned FirstStackArgLoc = (Subtarget->isABI_EABI() ? 0 : 16); unsigned FirstStackArgLoc = (Subtarget->isABI_EABI() ? 0 : 16);
@ -817,12 +817,12 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Promote the value if needed. // Promote the value if needed.
switch (VA.getLocInfo()) { switch (VA.getLocInfo()) {
default: llvm_unreachable("Unknown loc info!"); default: llvm_unreachable("Unknown loc info!");
case CCValAssign::Full: case CCValAssign::Full:
if (Subtarget->isABI_O32() && VA.isRegLoc()) { if (Subtarget->isABI_O32() && VA.isRegLoc()) {
if (VA.getValVT() == MVT::f32 && VA.getLocVT() == MVT::i32) if (VA.getValVT() == MVT::f32 && VA.getLocVT() == MVT::i32)
Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Arg); Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
if (VA.getValVT() == MVT::f64 && VA.getLocVT() == MVT::i32) { if (VA.getValVT() == MVT::f64 && VA.getLocVT() == MVT::i32) {
Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, Arg); Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Arg, SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Arg,
DAG.getConstant(0, getPointerTy())); DAG.getConstant(0, getPointerTy()));
SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Arg, SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Arg,
@ -830,7 +830,7 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Lo)); RegsToPass.push_back(std::make_pair(VA.getLocReg(), Lo));
RegsToPass.push_back(std::make_pair(VA.getLocReg()+1, Hi)); RegsToPass.push_back(std::make_pair(VA.getLocReg()+1, Hi));
continue; continue;
} }
} }
break; break;
case CCValAssign::SExt: case CCValAssign::SExt:
@ -843,17 +843,17 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg); Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
break; break;
} }
// Arguments that can be passed on register must be kept at // Arguments that can be passed on register must be kept at
// RegsToPass vector // RegsToPass vector
if (VA.isRegLoc()) { if (VA.isRegLoc()) {
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
continue; continue;
} }
// Register can't get to this point... // Register can't get to this point...
assert(VA.isMemLoc()); assert(VA.isMemLoc());
// Create the frame index object for this incoming parameter // Create the frame index object for this incoming parameter
// This guarantees that when allocating Local Area the firsts // This guarantees that when allocating Local Area the firsts
// 16 bytes which are alwayes reserved won't be overwritten // 16 bytes which are alwayes reserved won't be overwritten
@ -864,7 +864,7 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
SDValue PtrOff = DAG.getFrameIndex(FI,getPointerTy()); SDValue PtrOff = DAG.getFrameIndex(FI,getPointerTy());
// emit ISD::STORE whichs stores the // emit ISD::STORE whichs stores the
// parameter value to a stack Location // parameter value to a stack Location
MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
MachinePointerInfo(), MachinePointerInfo(),
@ -873,34 +873,34 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Transform all store nodes into one single node because all store // Transform all store nodes into one single node because all store
// nodes are independent of each other. // nodes are independent of each other.
if (!MemOpChains.empty()) if (!MemOpChains.empty())
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&MemOpChains[0], MemOpChains.size()); &MemOpChains[0], MemOpChains.size());
// Build a sequence of copy-to-reg nodes chained together with token // Build a sequence of copy-to-reg nodes chained together with token
// chain and flag operands which copy the outgoing args into registers. // chain and flag operands which copy the outgoing args into registers.
// The InFlag in necessary since all emited instructions must be // The InFlag in necessary since all emited instructions must be
// stuck together. // stuck together.
SDValue InFlag; SDValue InFlag;
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
RegsToPass[i].second, InFlag); RegsToPass[i].second, InFlag);
InFlag = Chain.getValue(1); InFlag = Chain.getValue(1);
} }
// If the callee is a GlobalAddress/ExternalSymbol node (quite common, every // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
// direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
// node so that legalize doesn't hack it. // node so that legalize doesn't hack it.
unsigned char OpFlag = IsPIC ? MipsII::MO_GOT_CALL : MipsII::MO_NO_FLAG; unsigned char OpFlag = IsPIC ? MipsII::MO_GOT_CALL : MipsII::MO_NO_FLAG;
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl,
getPointerTy(), 0, OpFlag); getPointerTy(), 0, OpFlag);
else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee = DAG.getTargetExternalSymbol(S->getSymbol(),
getPointerTy(), OpFlag); getPointerTy(), OpFlag);
// MipsJmpLink = #chain, #target_address, #opt_in_flags... // MipsJmpLink = #chain, #target_address, #opt_in_flags...
// = Chain, Callee, Reg#1, Reg#2, ... // = Chain, Callee, Reg#1, Reg#2, ...
// //
// Returns a chain & a flag for retval copy to use. // Returns a chain & a flag for retval copy to use.
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag); SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
@ -908,7 +908,7 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
Ops.push_back(Chain); Ops.push_back(Chain);
Ops.push_back(Callee); Ops.push_back(Callee);
// Add argument registers to the end of the list so that they are // Add argument registers to the end of the list so that they are
// known live into the call. // known live into the call.
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
Ops.push_back(DAG.getRegister(RegsToPass[i].first, Ops.push_back(DAG.getRegister(RegsToPass[i].first,
@ -920,17 +920,17 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
Chain = DAG.getNode(MipsISD::JmpLink, dl, NodeTys, &Ops[0], Ops.size()); Chain = DAG.getNode(MipsISD::JmpLink, dl, NodeTys, &Ops[0], Ops.size());
InFlag = Chain.getValue(1); InFlag = Chain.getValue(1);
// Create a stack location to hold GP when PIC is used. This stack // Create a stack location to hold GP when PIC is used. This stack
// location is used on function prologue to save GP and also after all // location is used on function prologue to save GP and also after all
// emited CALL's to restore GP. // emited CALL's to restore GP.
if (IsPIC) { if (IsPIC) {
// Function can have an arbitrary number of calls, so // Function can have an arbitrary number of calls, so
// hold the LastArgStackLoc with the biggest offset. // hold the LastArgStackLoc with the biggest offset.
int FI; int FI;
MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
if (LastArgStackLoc >= MipsFI->getGPStackOffset()) { if (LastArgStackLoc >= MipsFI->getGPStackOffset()) {
LastArgStackLoc = (!LastArgStackLoc) ? (16) : (LastArgStackLoc+4); LastArgStackLoc = (!LastArgStackLoc) ? (16) : (LastArgStackLoc+4);
// Create the frame index only once. SPOffset here can be anything // Create the frame index only once. SPOffset here can be anything
// (this will be fixed on processFunctionBeforeFrameFinalized) // (this will be fixed on processFunctionBeforeFrameFinalized)
if (MipsFI->getGPStackOffset() == -1) { if (MipsFI->getGPStackOffset() == -1) {
FI = MFI->CreateFixedObject(4, 0, true); FI = MFI->CreateFixedObject(4, 0, true);
@ -946,10 +946,10 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
MachinePointerInfo::getFixedStack(FI), MachinePointerInfo::getFixedStack(FI),
false, false, 0); false, false, 0);
Chain = GPLoad.getValue(1); Chain = GPLoad.getValue(1);
Chain = DAG.getCopyToReg(Chain, dl, DAG.getRegister(Mips::GP, MVT::i32), Chain = DAG.getCopyToReg(Chain, dl, DAG.getRegister(Mips::GP, MVT::i32),
GPLoad, SDValue(0,0)); GPLoad, SDValue(0,0));
InFlag = Chain.getValue(1); InFlag = Chain.getValue(1);
} }
// Create the CALLSEQ_END node. // Create the CALLSEQ_END node.
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true), Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
@ -993,7 +993,7 @@ MipsTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
// Formal Arguments Calling Convention Implementation // Formal Arguments Calling Convention Implementation
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
/// LowerFormalArguments - transform physical registers into virtual registers /// LowerFormalArguments - transform physical registers into virtual registers
/// and generate load operations for arguments places on the stack. /// and generate load operations for arguments places on the stack.
SDValue SDValue
MipsTargetLowering::LowerFormalArguments(SDValue Chain, MipsTargetLowering::LowerFormalArguments(SDValue Chain,
@ -1023,7 +1023,7 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
ArgLocs, *DAG.getContext()); ArgLocs, *DAG.getContext());
if (Subtarget->isABI_O32()) if (Subtarget->isABI_O32())
CCInfo.AnalyzeFormalArguments(Ins, CCInfo.AnalyzeFormalArguments(Ins,
isVarArg ? CC_MipsO32_VarArgs : CC_MipsO32); isVarArg ? CC_MipsO32_VarArgs : CC_MipsO32);
else else
CCInfo.AnalyzeFormalArguments(Ins, CC_Mips); CCInfo.AnalyzeFormalArguments(Ins, CC_Mips);
@ -1042,22 +1042,22 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
TargetRegisterClass *RC = 0; TargetRegisterClass *RC = 0;
if (RegVT == MVT::i32) if (RegVT == MVT::i32)
RC = Mips::CPURegsRegisterClass; RC = Mips::CPURegsRegisterClass;
else if (RegVT == MVT::f32) else if (RegVT == MVT::f32)
RC = Mips::FGR32RegisterClass; RC = Mips::FGR32RegisterClass;
else if (RegVT == MVT::f64) { else if (RegVT == MVT::f64) {
if (!Subtarget->isSingleFloat()) if (!Subtarget->isSingleFloat())
RC = Mips::AFGR64RegisterClass; RC = Mips::AFGR64RegisterClass;
} else } else
llvm_unreachable("RegVT not supported by FormalArguments Lowering"); llvm_unreachable("RegVT not supported by FormalArguments Lowering");
// Transform the arguments stored on // Transform the arguments stored on
// physical registers into virtual ones // physical registers into virtual ones
unsigned Reg = AddLiveIn(DAG.getMachineFunction(), ArgRegEnd, RC); unsigned Reg = AddLiveIn(DAG.getMachineFunction(), ArgRegEnd, RC);
SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
// If this is an 8 or 16-bit value, it has been passed promoted // If this is an 8 or 16-bit value, it has been passed promoted
// to 32 bits. Insert an assert[sz]ext to capture this, then // to 32 bits. Insert an assert[sz]ext to capture this, then
// truncate to the right size. // truncate to the right size.
if (VA.getLocInfo() != CCValAssign::Full) { if (VA.getLocInfo() != CCValAssign::Full) {
unsigned Opcode = 0; unsigned Opcode = 0;
@ -1066,21 +1066,21 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
else if (VA.getLocInfo() == CCValAssign::ZExt) else if (VA.getLocInfo() == CCValAssign::ZExt)
Opcode = ISD::AssertZext; Opcode = ISD::AssertZext;
if (Opcode) if (Opcode)
ArgValue = DAG.getNode(Opcode, dl, RegVT, ArgValue, ArgValue = DAG.getNode(Opcode, dl, RegVT, ArgValue,
DAG.getValueType(VA.getValVT())); DAG.getValueType(VA.getValVT()));
ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue); ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
} }
// Handle O32 ABI cases: i32->f32 and (i32,i32)->f64 // Handle O32 ABI cases: i32->f32 and (i32,i32)->f64
if (Subtarget->isABI_O32()) { if (Subtarget->isABI_O32()) {
if (RegVT == MVT::i32 && VA.getValVT() == MVT::f32) if (RegVT == MVT::i32 && VA.getValVT() == MVT::f32)
ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, ArgValue); ArgValue = DAG.getNode(ISD::BITCAST, dl, MVT::f32, ArgValue);
if (RegVT == MVT::i32 && VA.getValVT() == MVT::f64) { if (RegVT == MVT::i32 && VA.getValVT() == MVT::f64) {
unsigned Reg2 = AddLiveIn(DAG.getMachineFunction(), unsigned Reg2 = AddLiveIn(DAG.getMachineFunction(),
VA.getLocReg()+1, RC); VA.getLocReg()+1, RC);
SDValue ArgValue2 = DAG.getCopyFromReg(Chain, dl, Reg2, RegVT); SDValue ArgValue2 = DAG.getCopyFromReg(Chain, dl, Reg2, RegVT);
SDValue Hi = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, ArgValue); SDValue Hi = DAG.getNode(ISD::BITCAST, dl, MVT::f32, ArgValue);
SDValue Lo = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, ArgValue2); SDValue Lo = DAG.getNode(ISD::BITCAST, dl, MVT::f32, ArgValue2);
ArgValue = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::f64, Lo, Hi); ArgValue = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::f64, Lo, Hi);
} }
} }
@ -1093,13 +1093,13 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
// The last argument is not a register anymore // The last argument is not a register anymore
ArgRegEnd = 0; ArgRegEnd = 0;
// The stack pointer offset is relative to the caller stack frame. // The stack pointer offset is relative to the caller stack frame.
// Since the real stack size is unknown here, a negative SPOffset // Since the real stack size is unknown here, a negative SPOffset
// is used so there's a way to adjust these offsets when the stack // is used so there's a way to adjust these offsets when the stack
// size get known (on EliminateFrameIndex). A dummy SPOffset is // size get known (on EliminateFrameIndex). A dummy SPOffset is
// used instead of a direct negative address (which is recorded to // used instead of a direct negative address (which is recorded to
// be used on emitPrologue) to avoid mis-calc of the first stack // be used on emitPrologue) to avoid mis-calc of the first stack
// offset on PEI::calculateFrameObjectOffsets. // offset on PEI::calculateFrameObjectOffsets.
// Arguments are always 32-bit. // Arguments are always 32-bit.
unsigned ArgSize = VA.getLocVT().getSizeInBits()/8; unsigned ArgSize = VA.getLocVT().getSizeInBits()/8;
@ -1130,11 +1130,11 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
// To meet ABI, when VARARGS are passed on registers, the registers // To meet ABI, when VARARGS are passed on registers, the registers
// must have their values written to the caller stack frame. If the last // must have their values written to the caller stack frame. If the last
// argument was placed in the stack, there's no need to save any register. // argument was placed in the stack, there's no need to save any register.
if ((isVarArg) && (Subtarget->isABI_O32() && ArgRegEnd)) { if ((isVarArg) && (Subtarget->isABI_O32() && ArgRegEnd)) {
if (StackPtr.getNode() == 0) if (StackPtr.getNode() == 0)
StackPtr = DAG.getRegister(StackReg, getPointerTy()); StackPtr = DAG.getRegister(StackReg, getPointerTy());
// The last register argument that must be saved is Mips::A3 // The last register argument that must be saved is Mips::A3
TargetRegisterClass *RC = Mips::CPURegsRegisterClass; TargetRegisterClass *RC = Mips::CPURegsRegisterClass;
unsigned StackLoc = ArgLocs.size()-1; unsigned StackLoc = ArgLocs.size()-1;
@ -1157,7 +1157,7 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
} }
} }
// All stores are grouped in one node to allow the matching between // All stores are grouped in one node to allow the matching between
// the size of Ins and InVals. This only happens when on varg functions // the size of Ins and InVals. This only happens when on varg functions
if (!OutChains.empty()) { if (!OutChains.empty()) {
OutChains.push_back(Chain); OutChains.push_back(Chain);
@ -1190,7 +1190,7 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
// Analize return values. // Analize return values.
CCInfo.AnalyzeReturn(Outs, RetCC_Mips); CCInfo.AnalyzeReturn(Outs, RetCC_Mips);
// If this is the first return lowered for this function, add // If this is the first return lowered for this function, add
// the regs to the liveout set for the function. // the regs to the liveout set for the function.
if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
for (unsigned i = 0; i != RVLocs.size(); ++i) for (unsigned i = 0; i != RVLocs.size(); ++i)
@ -1205,7 +1205,7 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
CCValAssign &VA = RVLocs[i]; CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!"); assert(VA.isRegLoc() && "Can only return in registers!");
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
OutVals[i], Flag); OutVals[i], Flag);
// guarantee that all emitted copies are // guarantee that all emitted copies are
@ -1222,7 +1222,7 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
unsigned Reg = MipsFI->getSRetReturnReg(); unsigned Reg = MipsFI->getSRetReturnReg();
if (!Reg) if (!Reg)
llvm_unreachable("sret virtual register not created in the entry block"); llvm_unreachable("sret virtual register not created in the entry block");
SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy()); SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy());
@ -1232,10 +1232,10 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
// Return on Mips is always a "jr $ra" // Return on Mips is always a "jr $ra"
if (Flag.getNode()) if (Flag.getNode())
return DAG.getNode(MipsISD::Ret, dl, MVT::Other, return DAG.getNode(MipsISD::Ret, dl, MVT::Other,
Chain, DAG.getRegister(Mips::RA, MVT::i32), Flag); Chain, DAG.getRegister(Mips::RA, MVT::i32), Flag);
else // Return Void else // Return Void
return DAG.getNode(MipsISD::Ret, dl, MVT::Other, return DAG.getNode(MipsISD::Ret, dl, MVT::Other,
Chain, DAG.getRegister(Mips::RA, MVT::i32)); Chain, DAG.getRegister(Mips::RA, MVT::i32));
} }
@ -1246,21 +1246,21 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
/// getConstraintType - Given a constraint letter, return the type of /// getConstraintType - Given a constraint letter, return the type of
/// constraint it is for this target. /// constraint it is for this target.
MipsTargetLowering::ConstraintType MipsTargetLowering:: MipsTargetLowering::ConstraintType MipsTargetLowering::
getConstraintType(const std::string &Constraint) const getConstraintType(const std::string &Constraint) const
{ {
// Mips specific constrainy // Mips specific constrainy
// GCC config/mips/constraints.md // GCC config/mips/constraints.md
// //
// 'd' : An address register. Equivalent to r // 'd' : An address register. Equivalent to r
// unless generating MIPS16 code. // unless generating MIPS16 code.
// 'y' : Equivalent to r; retained for // 'y' : Equivalent to r; retained for
// backwards compatibility. // backwards compatibility.
// 'f' : Floating Point registers. // 'f' : Floating Point registers.
if (Constraint.size() == 1) { if (Constraint.size() == 1) {
switch (Constraint[0]) { switch (Constraint[0]) {
default : break; default : break;
case 'd': case 'd':
case 'y': case 'y':
case 'f': case 'f':
return C_RegisterClass; return C_RegisterClass;
break; break;
@ -1287,8 +1287,8 @@ MipsTargetLowering::getSingleConstraintMatchWeight(
default: default:
weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
break; break;
case 'd': case 'd':
case 'y': case 'y':
if (type->isIntegerTy()) if (type->isIntegerTy())
weight = CW_Register; weight = CW_Register;
break; break;
@ -1313,7 +1313,7 @@ getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const
case 'f': case 'f':
if (VT == MVT::f32) if (VT == MVT::f32)
return std::make_pair(0U, Mips::FGR32RegisterClass); return std::make_pair(0U, Mips::FGR32RegisterClass);
if (VT == MVT::f64) if (VT == MVT::f64)
if ((!Subtarget->isSingleFloat()) && (!Subtarget->isFP64bit())) if ((!Subtarget->isSingleFloat()) && (!Subtarget->isFP64bit()))
return std::make_pair(0U, Mips::AFGR64RegisterClass); return std::make_pair(0U, Mips::AFGR64RegisterClass);
} }
@ -1331,15 +1331,15 @@ getRegClassForInlineAsmConstraint(const std::string &Constraint,
if (Constraint.size() != 1) if (Constraint.size() != 1)
return std::vector<unsigned>(); return std::vector<unsigned>();
switch (Constraint[0]) { switch (Constraint[0]) {
default : break; default : break;
case 'r': case 'r':
// GCC Mips Constraint Letters // GCC Mips Constraint Letters
case 'd': case 'd':
case 'y': case 'y':
return make_vector<unsigned>(Mips::T0, Mips::T1, Mips::T2, Mips::T3, return make_vector<unsigned>(Mips::T0, Mips::T1, Mips::T2, Mips::T3,
Mips::T4, Mips::T5, Mips::T6, Mips::T7, Mips::S0, Mips::S1, Mips::T4, Mips::T5, Mips::T6, Mips::T7, Mips::S0, Mips::S1,
Mips::S2, Mips::S3, Mips::S4, Mips::S5, Mips::S6, Mips::S7, Mips::S2, Mips::S3, Mips::S4, Mips::S5, Mips::S6, Mips::S7,
Mips::T8, 0); Mips::T8, 0);
case 'f': case 'f':
@ -1351,15 +1351,15 @@ getRegClassForInlineAsmConstraint(const std::string &Constraint,
Mips::F25, Mips::F26, Mips::F27, Mips::F28, Mips::F29, Mips::F25, Mips::F26, Mips::F27, Mips::F28, Mips::F29,
Mips::F30, Mips::F31, 0); Mips::F30, Mips::F31, 0);
else else
return make_vector<unsigned>(Mips::F2, Mips::F4, Mips::F6, Mips::F8, return make_vector<unsigned>(Mips::F2, Mips::F4, Mips::F6, Mips::F8,
Mips::F10, Mips::F20, Mips::F22, Mips::F24, Mips::F26, Mips::F10, Mips::F20, Mips::F22, Mips::F24, Mips::F26,
Mips::F28, Mips::F30, 0); Mips::F28, Mips::F30, 0);
} }
if (VT == MVT::f64) if (VT == MVT::f64)
if ((!Subtarget->isSingleFloat()) && (!Subtarget->isFP64bit())) if ((!Subtarget->isSingleFloat()) && (!Subtarget->isFP64bit()))
return make_vector<unsigned>(Mips::D1, Mips::D2, Mips::D3, Mips::D4, return make_vector<unsigned>(Mips::D1, Mips::D2, Mips::D3, Mips::D4,
Mips::D5, Mips::D10, Mips::D11, Mips::D12, Mips::D13, Mips::D5, Mips::D10, Mips::D11, Mips::D12, Mips::D13,
Mips::D14, Mips::D15, 0); Mips::D14, Mips::D15, 0);
} }
return std::vector<unsigned>(); return std::vector<unsigned>();

View File

@ -76,7 +76,7 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
// On PPC32/64, arguments smaller than 4/8 bytes are extended, so all // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
// arguments are at least 4/8 bytes aligned. // arguments are at least 4/8 bytes aligned.
setMinStackArgumentAlignment(TM.getSubtarget<PPCSubtarget>().isPPC64() ? 8:4); setMinStackArgumentAlignment(TM.getSubtarget<PPCSubtarget>().isPPC64() ? 8:4);
// Set up the register classes. // Set up the register classes.
addRegisterClass(MVT::i32, PPC::GPRCRegisterClass); addRegisterClass(MVT::i32, PPC::GPRCRegisterClass);
addRegisterClass(MVT::f32, PPC::F4RCRegisterClass); addRegisterClass(MVT::f32, PPC::F4RCRegisterClass);
@ -178,10 +178,10 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand); setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand); setOperationAction(ISD::BITCAST, MVT::f32, Expand);
setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand); setOperationAction(ISD::BITCAST, MVT::i32, Expand);
setOperationAction(ISD::BIT_CONVERT, MVT::i64, Expand); setOperationAction(ISD::BITCAST, MVT::i64, Expand);
setOperationAction(ISD::BIT_CONVERT, MVT::f64, Expand); setOperationAction(ISD::BITCAST, MVT::f64, Expand);
// We cannot sextinreg(i1). Expand to shifts. // We cannot sextinreg(i1). Expand to shifts.
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
@ -549,7 +549,7 @@ static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
/// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
/// a VRGL* instruction with the specified unit size (1,2 or 4 bytes). /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
bool isUnary) { bool isUnary) {
if (!isUnary) if (!isUnary)
return isVMerge(N, UnitSize, 8, 24); return isVMerge(N, UnitSize, 8, 24);
@ -558,7 +558,7 @@ bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
/// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
/// a VRGH* instruction with the specified unit size (1,2 or 4 bytes). /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize, bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
bool isUnary) { bool isUnary) {
if (!isUnary) if (!isUnary)
return isVMerge(N, UnitSize, 0, 16); return isVMerge(N, UnitSize, 0, 16);
@ -573,7 +573,7 @@ int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) {
"PPC only supports shuffles by bytes!"); "PPC only supports shuffles by bytes!");
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N); ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
// Find the first non-undef value in the shuffle mask. // Find the first non-undef value in the shuffle mask.
unsigned i; unsigned i;
for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i) for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
@ -611,7 +611,7 @@ bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
// This is a splat operation if each element of the permute is the same, and // This is a splat operation if each element of the permute is the same, and
// if the value doesn't reference the second vector. // if the value doesn't reference the second vector.
unsigned ElementBase = N->getMaskElt(0); unsigned ElementBase = N->getMaskElt(0);
// FIXME: Handle UNDEF elements too! // FIXME: Handle UNDEF elements too!
if (ElementBase >= 16) if (ElementBase >= 16)
return false; return false;
@ -639,7 +639,7 @@ bool PPC::isAllNegativeZeroVector(SDNode *N) {
APInt APVal, APUndef; APInt APVal, APUndef;
unsigned BitSize; unsigned BitSize;
bool HasAnyUndefs; bool HasAnyUndefs;
if (BV->isConstantSplat(APVal, APUndef, BitSize, HasAnyUndefs, 32, true)) if (BV->isConstantSplat(APVal, APUndef, BitSize, HasAnyUndefs, 32, true))
if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
return CFP->getValueAPF().isNegZero(); return CFP->getValueAPF().isNegZero();
@ -1104,10 +1104,10 @@ static bool GetLabelAccessInfo(const TargetMachine &TM, unsigned &HiOpFlags,
unsigned &LoOpFlags, const GlobalValue *GV = 0) { unsigned &LoOpFlags, const GlobalValue *GV = 0) {
HiOpFlags = PPCII::MO_HA16; HiOpFlags = PPCII::MO_HA16;
LoOpFlags = PPCII::MO_LO16; LoOpFlags = PPCII::MO_LO16;
// Don't use the pic base if not in PIC relocation model. Or if we are on a // Don't use the pic base if not in PIC relocation model. Or if we are on a
// non-darwin platform. We don't support PIC on other platforms yet. // non-darwin platform. We don't support PIC on other platforms yet.
bool isPIC = TM.getRelocationModel() == Reloc::PIC_ && bool isPIC = TM.getRelocationModel() == Reloc::PIC_ &&
TM.getSubtarget<PPCSubtarget>().isDarwin(); TM.getSubtarget<PPCSubtarget>().isDarwin();
if (isPIC) { if (isPIC) {
HiOpFlags |= PPCII::MO_PIC_FLAG; HiOpFlags |= PPCII::MO_PIC_FLAG;
@ -1119,13 +1119,13 @@ static bool GetLabelAccessInfo(const TargetMachine &TM, unsigned &HiOpFlags,
if (GV && TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV, TM)) { if (GV && TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV, TM)) {
HiOpFlags |= PPCII::MO_NLP_FLAG; HiOpFlags |= PPCII::MO_NLP_FLAG;
LoOpFlags |= PPCII::MO_NLP_FLAG; LoOpFlags |= PPCII::MO_NLP_FLAG;
if (GV->hasHiddenVisibility()) { if (GV->hasHiddenVisibility()) {
HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG;
LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG; LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG;
} }
} }
return isPIC; return isPIC;
} }
@ -1137,12 +1137,12 @@ static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero); SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero); SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
// With PIC, the first instruction is actually "GR+hi(&G)". // With PIC, the first instruction is actually "GR+hi(&G)".
if (isPIC) if (isPIC)
Hi = DAG.getNode(ISD::ADD, DL, PtrVT, Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi); DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
// Generate non-pic code that has direct accesses to the constant pool. // Generate non-pic code that has direct accesses to the constant pool.
// The address of the global is just (hi(&g)+lo(&g)). // The address of the global is just (hi(&g)+lo(&g)).
return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo); return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
@ -1166,7 +1166,7 @@ SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const { SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
EVT PtrVT = Op.getValueType(); EVT PtrVT = Op.getValueType();
JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
unsigned MOHiFlag, MOLoFlag; unsigned MOHiFlag, MOLoFlag;
bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag); bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag);
SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag); SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
@ -1180,7 +1180,7 @@ SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
DebugLoc DL = Op.getDebugLoc(); DebugLoc DL = Op.getDebugLoc();
const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
unsigned MOHiFlag, MOLoFlag; unsigned MOHiFlag, MOLoFlag;
bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag); bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag);
SDValue TgtBAHi = DAG.getBlockAddress(BA, PtrVT, /*isTarget=*/true, MOHiFlag); SDValue TgtBAHi = DAG.getBlockAddress(BA, PtrVT, /*isTarget=*/true, MOHiFlag);
@ -1210,7 +1210,7 @@ SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag); DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
SDValue GALo = SDValue GALo =
DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag); DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
SDValue Ptr = LowerLabelRef(GAHi, GALo, isPIC, DAG); SDValue Ptr = LowerLabelRef(GAHi, GALo, isPIC, DAG);
// If the global reference is actually to a non-lazy-pointer, we have to do an // If the global reference is actually to a non-lazy-pointer, we have to do an
@ -1429,7 +1429,7 @@ static bool CC_PPC_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
PPC::R7, PPC::R8, PPC::R9, PPC::R10, PPC::R7, PPC::R8, PPC::R9, PPC::R10,
}; };
const unsigned NumArgRegs = array_lengthof(ArgRegs); const unsigned NumArgRegs = array_lengthof(ArgRegs);
unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs); unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs);
// Skip one register if the first unallocated register has an even register // Skip one register if the first unallocated register has an even register
@ -1439,7 +1439,7 @@ static bool CC_PPC_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
if (RegNum != NumArgRegs && RegNum % 2 == 1) { if (RegNum != NumArgRegs && RegNum % 2 == 1) {
State.AllocateReg(ArgRegs[RegNum]); State.AllocateReg(ArgRegs[RegNum]);
} }
// Always return false here, as this function only makes sure that the first // Always return false here, as this function only makes sure that the first
// unallocated register has an odd register number and does not actually // unallocated register has an odd register number and does not actually
// allocate a register for the current argument. // allocate a register for the current argument.
@ -1457,7 +1457,7 @@ static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
}; };
const unsigned NumArgRegs = array_lengthof(ArgRegs); const unsigned NumArgRegs = array_lengthof(ArgRegs);
unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs); unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs);
// If there is only one Floating-point register left we need to put both f64 // If there is only one Floating-point register left we need to put both f64
@ -1465,7 +1465,7 @@ static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) { if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) {
State.AllocateReg(ArgRegs[RegNum]); State.AllocateReg(ArgRegs[RegNum]);
} }
// Always return false here, as this function only makes sure that the two f64 // Always return false here, as this function only makes sure that the two f64
// values a ppc_fp128 value is split into are both passed in registers or both // values a ppc_fp128 value is split into are both passed in registers or both
// passed on the stack and does not actually allocate a register for the // passed on the stack and does not actually allocate a register for the
@ -1550,7 +1550,7 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
// Specifications: // Specifications:
// System V Application Binary Interface PowerPC Processor Supplement // System V Application Binary Interface PowerPC Processor Supplement
// AltiVec Technology Programming Interface Manual // AltiVec Technology Programming Interface Manual
MachineFunction &MF = DAG.getMachineFunction(); MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo *MFI = MF.getFrameInfo(); MachineFrameInfo *MFI = MF.getFrameInfo();
PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
@ -1569,15 +1569,15 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
CCInfo.AllocateStack(PPCFrameInfo::getLinkageSize(false, false), PtrByteSize); CCInfo.AllocateStack(PPCFrameInfo::getLinkageSize(false, false), PtrByteSize);
CCInfo.AnalyzeFormalArguments(Ins, CC_PPC_SVR4); CCInfo.AnalyzeFormalArguments(Ins, CC_PPC_SVR4);
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
CCValAssign &VA = ArgLocs[i]; CCValAssign &VA = ArgLocs[i];
// Arguments stored in registers. // Arguments stored in registers.
if (VA.isRegLoc()) { if (VA.isRegLoc()) {
TargetRegisterClass *RC; TargetRegisterClass *RC;
EVT ValVT = VA.getValVT(); EVT ValVT = VA.getValVT();
switch (ValVT.getSimpleVT().SimpleTy) { switch (ValVT.getSimpleVT().SimpleTy) {
default: default:
llvm_unreachable("ValVT not supported by formal arguments Lowering"); llvm_unreachable("ValVT not supported by formal arguments Lowering");
@ -1597,7 +1597,7 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
RC = PPC::VRRCRegisterClass; RC = PPC::VRRCRegisterClass;
break; break;
} }
// Transform the arguments stored in physical registers into virtual ones. // Transform the arguments stored in physical registers into virtual ones.
unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, ValVT); SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, ValVT);
@ -1633,7 +1633,7 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
// Area that is at least reserved in the caller of this function. // Area that is at least reserved in the caller of this function.
unsigned MinReservedArea = CCByValInfo.getNextStackOffset(); unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
// Set the size that is at least reserved in caller of this function. Tail // Set the size that is at least reserved in caller of this function. Tail
// call optimized function's reserved stack space needs to be aligned so that // call optimized function's reserved stack space needs to be aligned so that
// taking the difference between two stack areas will result in an aligned // taking the difference between two stack areas will result in an aligned
@ -1643,16 +1643,16 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
MinReservedArea = MinReservedArea =
std::max(MinReservedArea, std::max(MinReservedArea,
PPCFrameInfo::getMinCallFrameSize(false, false)); PPCFrameInfo::getMinCallFrameSize(false, false));
unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameInfo()-> unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameInfo()->
getStackAlignment(); getStackAlignment();
unsigned AlignMask = TargetAlign-1; unsigned AlignMask = TargetAlign-1;
MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask; MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask;
FI->setMinReservedArea(MinReservedArea); FI->setMinReservedArea(MinReservedArea);
SmallVector<SDValue, 8> MemOps; SmallVector<SDValue, 8> MemOps;
// If the function takes variable number of arguments, make a frame index for // If the function takes variable number of arguments, make a frame index for
// the start of the first vararg value... for expansion of llvm.va_start. // the start of the first vararg value... for expansion of llvm.va_start.
if (isVarArg) { if (isVarArg) {
@ -1883,9 +1883,9 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
MemOps.push_back(Store); MemOps.push_back(Store);
++GPR_idx; ++GPR_idx;
} }
ArgOffset += PtrByteSize; ArgOffset += PtrByteSize;
continue; continue;
} }
for (unsigned j = 0; j < ArgSize; j += PtrByteSize) { for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
@ -2064,7 +2064,7 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
// result of va_next. // result of va_next.
for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) { for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
unsigned VReg; unsigned VReg;
if (isPPC64) if (isPPC64)
VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass); VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
else else
@ -2331,7 +2331,7 @@ SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG,
LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo(), LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo(),
false, false, 0); false, false, 0);
Chain = SDValue(LROpOut.getNode(), 1); Chain = SDValue(LROpOut.getNode(), 1);
// When using the 32/64-bit SVR4 ABI there is no need to load the FP stack // When using the 32/64-bit SVR4 ABI there is no need to load the FP stack
// slot as the FP is never overwritten. // slot as the FP is never overwritten.
if (isDarwinABI) { if (isDarwinABI) {
@ -2421,7 +2421,7 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
SmallVector<SDValue, 8> &Ops, std::vector<EVT> &NodeTys, SmallVector<SDValue, 8> &Ops, std::vector<EVT> &NodeTys,
const PPCSubtarget &PPCSubTarget) { const PPCSubtarget &PPCSubTarget) {
bool isPPC64 = PPCSubTarget.isPPC64(); bool isPPC64 = PPCSubTarget.isPPC64();
bool isSVR4ABI = PPCSubTarget.isSVR4ABI(); bool isSVR4ABI = PPCSubTarget.isSVR4ABI();
@ -2437,7 +2437,7 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
Callee = SDValue(Dest, 0); Callee = SDValue(Dest, 0);
needIndirectCall = false; needIndirectCall = false;
} }
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
// XXX Work around for http://llvm.org/bugs/show_bug.cgi?id=5201 // XXX Work around for http://llvm.org/bugs/show_bug.cgi?id=5201
// Use indirect calls for ALL functions calls in JIT mode, since the // Use indirect calls for ALL functions calls in JIT mode, since the
@ -2453,7 +2453,7 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
// automatically synthesizes these stubs. // automatically synthesizes these stubs.
OpFlags = PPCII::MO_DARWIN_STUB; OpFlags = PPCII::MO_DARWIN_STUB;
} }
// If the callee is a GlobalAddress/ExternalSymbol node (quite common, // If the callee is a GlobalAddress/ExternalSymbol node (quite common,
// every direct call is) turn it into a TargetGlobalAddress / // every direct call is) turn it into a TargetGlobalAddress /
// TargetExternalSymbol node so that legalize doesn't hack it. // TargetExternalSymbol node so that legalize doesn't hack it.
@ -2461,12 +2461,12 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
Callee.getValueType(), Callee.getValueType(),
0, OpFlags); 0, OpFlags);
needIndirectCall = false; needIndirectCall = false;
} }
} }
if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
unsigned char OpFlags = 0; unsigned char OpFlags = 0;
if (DAG.getTarget().getRelocationModel() != Reloc::Static && if (DAG.getTarget().getRelocationModel() != Reloc::Static &&
PPCSubTarget.getDarwinVers() < 9) { PPCSubTarget.getDarwinVers() < 9) {
// PC-relative references to external symbols should go through $stub, // PC-relative references to external symbols should go through $stub,
@ -2474,12 +2474,12 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
// automatically synthesizes these stubs. // automatically synthesizes these stubs.
OpFlags = PPCII::MO_DARWIN_STUB; OpFlags = PPCII::MO_DARWIN_STUB;
} }
Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(), Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(),
OpFlags); OpFlags);
needIndirectCall = false; needIndirectCall = false;
} }
if (needIndirectCall) { if (needIndirectCall) {
// Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair // Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair
// to do the call, we can't use PPCISD::CALL. // to do the call, we can't use PPCISD::CALL.
@ -2750,7 +2750,7 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
// in this function's (MF) stack pointer stack slot 0(SP). // in this function's (MF) stack pointer stack slot 0(SP).
if (GuaranteedTailCallOpt && CallConv==CallingConv::Fast) if (GuaranteedTailCallOpt && CallConv==CallingConv::Fast)
MF.getInfo<PPCFunctionInfo>()->setHasFastCall(); MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
// Count how many bytes are to be pushed on the stack, including the linkage // Count how many bytes are to be pushed on the stack, including the linkage
// area, parameter list area and the part of the local variable space which // area, parameter list area and the part of the local variable space which
// contains copies of aggregates which are passed by value. // contains copies of aggregates which are passed by value.
@ -2768,12 +2768,12 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
// Fixed vector arguments go into registers as long as registers are // Fixed vector arguments go into registers as long as registers are
// available. Variable vector arguments always go into memory. // available. Variable vector arguments always go into memory.
unsigned NumArgs = Outs.size(); unsigned NumArgs = Outs.size();
for (unsigned i = 0; i != NumArgs; ++i) { for (unsigned i = 0; i != NumArgs; ++i) {
MVT ArgVT = Outs[i].VT; MVT ArgVT = Outs[i].VT;
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
bool Result; bool Result;
if (Outs[i].IsFixed) { if (Outs[i].IsFixed) {
Result = CC_PPC_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, Result = CC_PPC_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
CCInfo); CCInfo);
@ -2781,7 +2781,7 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
Result = CC_PPC_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, Result = CC_PPC_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full,
ArgFlags, CCInfo); ArgFlags, CCInfo);
} }
if (Result) { if (Result) {
#ifndef NDEBUG #ifndef NDEBUG
errs() << "Call operand #" << i << " has unhandled type " errs() << "Call operand #" << i << " has unhandled type "
@ -2794,7 +2794,7 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
// All arguments are treated the same. // All arguments are treated the same.
CCInfo.AnalyzeCallOperands(Outs, CC_PPC_SVR4); CCInfo.AnalyzeCallOperands(Outs, CC_PPC_SVR4);
} }
// Assign locations to all of the outgoing aggregate by value arguments. // Assign locations to all of the outgoing aggregate by value arguments.
SmallVector<CCValAssign, 16> ByValArgLocs; SmallVector<CCValAssign, 16> ByValArgLocs;
CCState CCByValInfo(CallConv, isVarArg, getTargetMachine(), ByValArgLocs, CCState CCByValInfo(CallConv, isVarArg, getTargetMachine(), ByValArgLocs,
@ -2809,7 +2809,7 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
// space variable where copies of aggregates which are passed by value are // space variable where copies of aggregates which are passed by value are
// stored. // stored.
unsigned NumBytes = CCByValInfo.getNextStackOffset(); unsigned NumBytes = CCByValInfo.getNextStackOffset();
// Calculate by how many bytes the stack has to be adjusted in case of tail // Calculate by how many bytes the stack has to be adjusted in case of tail
// call optimization. // call optimization.
int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes); int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
@ -2829,7 +2829,7 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
// arguments that may not fit in the registers available for argument // arguments that may not fit in the registers available for argument
// passing. // passing.
SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32); SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
SmallVector<TailCallArgumentInfo, 8> TailCallArguments; SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
SmallVector<SDValue, 8> MemOpChains; SmallVector<SDValue, 8> MemOpChains;
@ -2841,7 +2841,7 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
CCValAssign &VA = ArgLocs[i]; CCValAssign &VA = ArgLocs[i];
SDValue Arg = OutVals[i]; SDValue Arg = OutVals[i];
ISD::ArgFlagsTy Flags = Outs[i].Flags; ISD::ArgFlagsTy Flags = Outs[i].Flags;
if (Flags.isByVal()) { if (Flags.isByVal()) {
// Argument is an aggregate which is passed by value, thus we need to // Argument is an aggregate which is passed by value, thus we need to
// create a copy of it in the local variable space of the current stack // create a copy of it in the local variable space of the current stack
@ -2850,33 +2850,33 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
assert((j < ByValArgLocs.size()) && "Index out of bounds!"); assert((j < ByValArgLocs.size()) && "Index out of bounds!");
CCValAssign &ByValVA = ByValArgLocs[j++]; CCValAssign &ByValVA = ByValArgLocs[j++];
assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!"); assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!");
// Memory reserved in the local variable space of the callers stack frame. // Memory reserved in the local variable space of the callers stack frame.
unsigned LocMemOffset = ByValVA.getLocMemOffset(); unsigned LocMemOffset = ByValVA.getLocMemOffset();
SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset); SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff); PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
// Create a copy of the argument in the local area of the current // Create a copy of the argument in the local area of the current
// stack frame. // stack frame.
SDValue MemcpyCall = SDValue MemcpyCall =
CreateCopyOfByValArgument(Arg, PtrOff, CreateCopyOfByValArgument(Arg, PtrOff,
CallSeqStart.getNode()->getOperand(0), CallSeqStart.getNode()->getOperand(0),
Flags, DAG, dl); Flags, DAG, dl);
// This must go outside the CALLSEQ_START..END. // This must go outside the CALLSEQ_START..END.
SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
CallSeqStart.getNode()->getOperand(1)); CallSeqStart.getNode()->getOperand(1));
DAG.ReplaceAllUsesWith(CallSeqStart.getNode(), DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
NewCallSeqStart.getNode()); NewCallSeqStart.getNode());
Chain = CallSeqStart = NewCallSeqStart; Chain = CallSeqStart = NewCallSeqStart;
// Pass the address of the aggregate copy on the stack either in a // Pass the address of the aggregate copy on the stack either in a
// physical register or in the parameter list area of the current stack // physical register or in the parameter list area of the current stack
// frame to the callee. // frame to the callee.
Arg = PtrOff; Arg = PtrOff;
} }
if (VA.isRegLoc()) { if (VA.isRegLoc()) {
// Put argument in a physical register. // Put argument in a physical register.
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
@ -2899,11 +2899,11 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
} }
} }
} }
if (!MemOpChains.empty()) if (!MemOpChains.empty())
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&MemOpChains[0], MemOpChains.size()); &MemOpChains[0], MemOpChains.size());
// Build a sequence of copy-to-reg nodes chained together with token chain // Build a sequence of copy-to-reg nodes chained together with token chain
// and flag operands which copy the outgoing args into the appropriate regs. // and flag operands which copy the outgoing args into the appropriate regs.
SDValue InFlag; SDValue InFlag;
@ -2912,7 +2912,7 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
RegsToPass[i].second, InFlag); RegsToPass[i].second, InFlag);
InFlag = Chain.getValue(1); InFlag = Chain.getValue(1);
} }
// Set CR6 to true if this is a vararg call. // Set CR6 to true if this is a vararg call.
if (isVarArg) { if (isVarArg) {
SDValue SetCR(DAG.getMachineNode(PPC::CRSET, dl, MVT::i32), 0); SDValue SetCR(DAG.getMachineNode(PPC::CRSET, dl, MVT::i32), 0);
@ -3187,7 +3187,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
MachinePointerInfo(), false, false, 0); MachinePointerInfo(), false, false, 0);
MemOpChains.push_back(Store); MemOpChains.push_back(Store);
if (VR_idx != NumVRs) { if (VR_idx != NumVRs) {
SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff,
MachinePointerInfo(), MachinePointerInfo(),
false, false, 0); false, false, 0);
MemOpChains.push_back(Load.getValue(1)); MemOpChains.push_back(Load.getValue(1));
@ -3272,7 +3272,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
// On Darwin, R12 must contain the address of an indirect callee. This does // On Darwin, R12 must contain the address of an indirect callee. This does
// not mean the MTCTR instruction must use R12; it's easier to model this as // not mean the MTCTR instruction must use R12; it's easier to model this as
// an extra parameter, so do that. // an extra parameter, so do that.
if (!isTailCall && if (!isTailCall &&
!dyn_cast<GlobalAddressSDNode>(Callee) && !dyn_cast<GlobalAddressSDNode>(Callee) &&
!dyn_cast<ExternalSymbolSDNode>(Callee) && !dyn_cast<ExternalSymbolSDNode>(Callee) &&
!isBLACompatibleAddress(Callee, DAG)) !isBLACompatibleAddress(Callee, DAG))
@ -3523,7 +3523,7 @@ SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!"); default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
case MVT::i32: case MVT::i32:
Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIWZ : Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIWZ :
PPCISD::FCTIDZ, PPCISD::FCTIDZ,
dl, MVT::f64, Src); dl, MVT::f64, Src);
break; break;
case MVT::i64: case MVT::i64:
@ -3555,8 +3555,7 @@ SDValue PPCTargetLowering::LowerSINT_TO_FP(SDValue Op,
return SDValue(); return SDValue();
if (Op.getOperand(0).getValueType() == MVT::i64) { if (Op.getOperand(0).getValueType() == MVT::i64) {
SDValue Bits = DAG.getNode(ISD::BIT_CONVERT, dl, SDValue Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op.getOperand(0));
MVT::f64, Op.getOperand(0));
SDValue FP = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Bits); SDValue FP = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Bits);
if (Op.getValueType() == MVT::f32) if (Op.getValueType() == MVT::f32)
FP = DAG.getNode(ISD::FP_ROUND, dl, FP = DAG.getNode(ISD::FP_ROUND, dl,
@ -3777,7 +3776,7 @@ static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT,
Ops.assign(CanonicalVT.getVectorNumElements(), Elt); Ops.assign(CanonicalVT.getVectorNumElements(), Elt);
SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT, SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT,
&Ops[0], Ops.size()); &Ops[0], Ops.size());
return DAG.getNode(ISD::BIT_CONVERT, dl, ReqVT, Res); return DAG.getNode(ISD::BITCAST, dl, ReqVT, Res);
} }
/// BuildIntrinsicOp - Return a binary operator intrinsic node with the /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
@ -3806,14 +3805,14 @@ static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt,
EVT VT, SelectionDAG &DAG, DebugLoc dl) { EVT VT, SelectionDAG &DAG, DebugLoc dl) {
// Force LHS/RHS to be the right type. // Force LHS/RHS to be the right type.
LHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, LHS); LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
RHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, RHS); RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
int Ops[16]; int Ops[16];
for (unsigned i = 0; i != 16; ++i) for (unsigned i = 0; i != 16; ++i)
Ops[i] = i + Amt; Ops[i] = i + Amt;
SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops); SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, T); return DAG.getNode(ISD::BITCAST, dl, VT, T);
} }
// If this is a case we can't handle, return null and let the default // If this is a case we can't handle, return null and let the default
@ -3847,7 +3846,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) { if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
SDValue Z = DAG.getConstant(0, MVT::i32); SDValue Z = DAG.getConstant(0, MVT::i32);
Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z); Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z);
Op = DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Z); Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
} }
return Op; return Op;
} }
@ -3866,7 +3865,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) { if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) {
SDValue Res = BuildSplatI(SextVal >> 1, SplatSize, MVT::Other, DAG, dl); SDValue Res = BuildSplatI(SextVal >> 1, SplatSize, MVT::Other, DAG, dl);
Res = DAG.getNode(ISD::ADD, dl, Res.getValueType(), Res, Res); Res = DAG.getNode(ISD::ADD, dl, Res.getValueType(), Res, Res);
return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res); return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
} }
// If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is // If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is
@ -3882,7 +3881,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
// xor by OnesV to invert it. // xor by OnesV to invert it.
Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV); Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res); return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
} }
// Check to see if this is a wide variety of vsplti*, binop self cases. // Check to see if this is a wide variety of vsplti*, binop self cases.
@ -3908,7 +3907,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
Intrinsic::ppc_altivec_vslw Intrinsic::ppc_altivec_vslw
}; };
Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res); return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
} }
// vsplti + srl self. // vsplti + srl self.
@ -3919,7 +3918,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
Intrinsic::ppc_altivec_vsrw Intrinsic::ppc_altivec_vsrw
}; };
Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res); return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
} }
// vsplti + sra self. // vsplti + sra self.
@ -3930,7 +3929,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
Intrinsic::ppc_altivec_vsraw Intrinsic::ppc_altivec_vsraw
}; };
Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res); return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
} }
// vsplti + rol self. // vsplti + rol self.
@ -3942,7 +3941,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
Intrinsic::ppc_altivec_vrlw Intrinsic::ppc_altivec_vrlw
}; };
Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl); Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res); return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
} }
// t = vsplti c, result = vsldoi t, t, 1 // t = vsplti c, result = vsldoi t, t, 1
@ -3969,14 +3968,14 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
SDValue LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG, dl); SDValue LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG, dl);
SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG, dl); SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG, dl);
LHS = DAG.getNode(ISD::SUB, dl, LHS.getValueType(), LHS, RHS); LHS = DAG.getNode(ISD::SUB, dl, LHS.getValueType(), LHS, RHS);
return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), LHS); return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), LHS);
} }
// Odd, in range [-31,-17]: (vsplti C)+(vsplti -16). // Odd, in range [-31,-17]: (vsplti C)+(vsplti -16).
if (SextVal >= -31 && SextVal <= 0) { if (SextVal >= -31 && SextVal <= 0) {
SDValue LHS = BuildSplatI(SextVal+16, SplatSize, MVT::Other, DAG, dl); SDValue LHS = BuildSplatI(SextVal+16, SplatSize, MVT::Other, DAG, dl);
SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG, dl); SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG, dl);
LHS = DAG.getNode(ISD::ADD, dl, LHS.getValueType(), LHS, RHS); LHS = DAG.getNode(ISD::ADD, dl, LHS.getValueType(), LHS, RHS);
return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), LHS); return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), LHS);
} }
return SDValue(); return SDValue();
@ -4053,10 +4052,10 @@ static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl); return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
} }
EVT VT = OpLHS.getValueType(); EVT VT = OpLHS.getValueType();
OpLHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, OpLHS); OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
OpRHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, OpRHS); OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs); SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, T); return DAG.getNode(ISD::BITCAST, dl, VT, T);
} }
/// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this
@ -4109,7 +4108,7 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
// perfect shuffle table to emit an optimal matching sequence. // perfect shuffle table to emit an optimal matching sequence.
SmallVector<int, 16> PermMask; SmallVector<int, 16> PermMask;
SVOp->getMask(PermMask); SVOp->getMask(PermMask);
unsigned PFIndexes[4]; unsigned PFIndexes[4];
bool isFourElementShuffle = true; bool isFourElementShuffle = true;
for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
@ -4244,7 +4243,7 @@ SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(), SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
Op.getOperand(1), Op.getOperand(2), Op.getOperand(1), Op.getOperand(2),
DAG.getConstant(CompareOpc, MVT::i32)); DAG.getConstant(CompareOpc, MVT::i32));
return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Tmp); return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp);
} }
// Create the PPCISD altivec 'dot' comparison node. // Create the PPCISD altivec 'dot' comparison node.
@ -4327,9 +4326,9 @@ SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl); BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
// Shrinkify inputs to v8i16. // Shrinkify inputs to v8i16.
LHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, LHS); LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS);
RHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, RHS); RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS);
RHSSwap = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, RHSSwap); RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap);
// Low parts multiplied together, generating 32-bit results (we ignore the // Low parts multiplied together, generating 32-bit results (we ignore the
// top parts). // top parts).
@ -4355,12 +4354,12 @@ SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
// Multiply the even 8-bit parts, producing 16-bit sums. // Multiply the even 8-bit parts, producing 16-bit sums.
SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub, SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
LHS, RHS, DAG, dl, MVT::v8i16); LHS, RHS, DAG, dl, MVT::v8i16);
EvenParts = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, EvenParts); EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts);
// Multiply the odd 8-bit parts, producing 16-bit sums. // Multiply the odd 8-bit parts, producing 16-bit sums.
SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub, SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
LHS, RHS, DAG, dl, MVT::v8i16); LHS, RHS, DAG, dl, MVT::v8i16);
OddParts = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, OddParts); OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
// Merge the results together. // Merge the results together.
int Ops[16]; int Ops[16];
@ -5568,7 +5567,7 @@ SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
if (Depth > 0) { if (Depth > 0) {
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
SDValue Offset = SDValue Offset =
DAG.getConstant(PPCFrameInfo::getReturnSaveOffset(isPPC64, isDarwinABI), DAG.getConstant(PPCFrameInfo::getReturnSaveOffset(isPPC64, isDarwinABI),
isPPC64? MVT::i64 : MVT::i32); isPPC64? MVT::i64 : MVT::i32);
return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),

View File

@ -285,8 +285,8 @@ including having this work sanely.
Fix Darwin FP-In-Integer Registers ABI Fix Darwin FP-In-Integer Registers ABI
Darwin passes doubles in structures in integer registers, which is very very Darwin passes doubles in structures in integer registers, which is very very
bad. Add something like a BIT_CONVERT to LLVM, then do an i-p transformation bad. Add something like a BITCAST to LLVM, then do an i-p transformation that
that percolates these things out of functions. percolates these things out of functions.
Check out how horrible this is: Check out how horrible this is:
http://gcc.gnu.org/ml/gcc/2005-10/msg01036.html http://gcc.gnu.org/ml/gcc/2005-10/msg01036.html

View File

@ -66,7 +66,7 @@ SparcTargetLowering::LowerReturn(SDValue Chain,
CCValAssign &VA = RVLocs[i]; CCValAssign &VA = RVLocs[i];
assert(VA.isRegLoc() && "Can only return in registers!"); assert(VA.isRegLoc() && "Can only return in registers!");
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
OutVals[i], Flag); OutVals[i], Flag);
// Guarantee that all emitted copies are stuck together with flags. // Guarantee that all emitted copies are stuck together with flags.
@ -166,7 +166,7 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
MF.getRegInfo().addLiveIn(*CurArgReg++, VReg); MF.getRegInfo().addLiveIn(*CurArgReg++, VReg);
SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32); SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Arg); Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg);
InVals.push_back(Arg); InVals.push_back(Arg);
} else { } else {
int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset, int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset,
@ -219,7 +219,7 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
// If we want a double, do a bit convert. // If we want a double, do a bit convert.
if (ObjectVT == MVT::f64) if (ObjectVT == MVT::f64)
WholeValue = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, WholeValue); WholeValue = DAG.getNode(ISD::BITCAST, dl, MVT::f64, WholeValue);
InVals.push_back(WholeValue); InVals.push_back(WholeValue);
} }
@ -383,7 +383,7 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
ValToStore = Val; ValToStore = Val;
} else { } else {
// Convert this to a FP value in an int reg. // Convert this to a FP value in an int reg.
Val = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Val); Val = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Val);
RegsToPass.push_back(std::make_pair(ArgRegs[RegsToPass.size()], Val)); RegsToPass.push_back(std::make_pair(ArgRegs[RegsToPass.size()], Val));
} }
break; break;
@ -397,7 +397,7 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Break into top and bottom parts by storing to the stack and loading // Break into top and bottom parts by storing to the stack and loading
// out the parts as integers. Top part goes in a reg. // out the parts as integers. Top part goes in a reg.
SDValue StackPtr = DAG.CreateStackTemporary(MVT::f64, MVT::i32); SDValue StackPtr = DAG.CreateStackTemporary(MVT::f64, MVT::i32);
SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, SDValue Store = DAG.getStore(DAG.getEntryNode(), dl,
Val, StackPtr, MachinePointerInfo(), Val, StackPtr, MachinePointerInfo(),
false, false, 0); false, false, 0);
// Sparc is big-endian, so the high part comes first. // Sparc is big-endian, so the high part comes first.
@ -450,7 +450,7 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32); SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
SDValue PtrOff = DAG.getConstant(ArgOffset, MVT::i32); SDValue PtrOff = DAG.getConstant(ArgOffset, MVT::i32);
PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff); PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
MemOpChains.push_back(DAG.getStore(Chain, dl, ValToStore, MemOpChains.push_back(DAG.getStore(Chain, dl, ValToStore,
PtrOff, MachinePointerInfo(), PtrOff, MachinePointerInfo(),
false, false, 0)); false, false, 0));
} }
@ -612,8 +612,8 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM)
setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand); setOperationAction(ISD::BITCAST, MVT::f32, Expand);
setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand); setOperationAction(ISD::BITCAST, MVT::i32, Expand);
// Sparc has no select or setcc: expand to SELECT_CC. // Sparc has no select or setcc: expand to SELECT_CC.
setOperationAction(ISD::SELECT, MVT::i32, Expand); setOperationAction(ISD::SELECT, MVT::i32, Expand);
@ -758,7 +758,7 @@ static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
} }
} }
SDValue SparcTargetLowering::LowerGlobalAddress(SDValue Op, SDValue SparcTargetLowering::LowerGlobalAddress(SDValue Op,
SelectionDAG &DAG) const { SelectionDAG &DAG) const {
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
// FIXME there isn't really any debug info here // FIXME there isn't really any debug info here
@ -767,15 +767,15 @@ SDValue SparcTargetLowering::LowerGlobalAddress(SDValue Op,
SDValue Hi = DAG.getNode(SPISD::Hi, dl, MVT::i32, GA); SDValue Hi = DAG.getNode(SPISD::Hi, dl, MVT::i32, GA);
SDValue Lo = DAG.getNode(SPISD::Lo, dl, MVT::i32, GA); SDValue Lo = DAG.getNode(SPISD::Lo, dl, MVT::i32, GA);
if (getTargetMachine().getRelocationModel() != Reloc::PIC_) if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
return DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi); return DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi);
SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, dl, SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, dl,
getPointerTy()); getPointerTy());
SDValue RelAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi); SDValue RelAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi);
SDValue AbsAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, SDValue AbsAddr = DAG.getNode(ISD::ADD, dl, MVT::i32,
GlobalBase, RelAddr); GlobalBase, RelAddr);
return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
AbsAddr, MachinePointerInfo(), false, false, 0); AbsAddr, MachinePointerInfo(), false, false, 0);
} }
@ -788,15 +788,15 @@ SDValue SparcTargetLowering::LowerConstantPool(SDValue Op,
SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment()); SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment());
SDValue Hi = DAG.getNode(SPISD::Hi, dl, MVT::i32, CP); SDValue Hi = DAG.getNode(SPISD::Hi, dl, MVT::i32, CP);
SDValue Lo = DAG.getNode(SPISD::Lo, dl, MVT::i32, CP); SDValue Lo = DAG.getNode(SPISD::Lo, dl, MVT::i32, CP);
if (getTargetMachine().getRelocationModel() != Reloc::PIC_) if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
return DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi); return DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi);
SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, dl, SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, dl,
getPointerTy()); getPointerTy());
SDValue RelAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi); SDValue RelAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi);
SDValue AbsAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, SDValue AbsAddr = DAG.getNode(ISD::ADD, dl, MVT::i32,
GlobalBase, RelAddr); GlobalBase, RelAddr);
return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
AbsAddr, MachinePointerInfo(), false, false, 0); AbsAddr, MachinePointerInfo(), false, false, 0);
} }
@ -805,13 +805,13 @@ static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) {
// Convert the fp value to integer in an FP register. // Convert the fp value to integer in an FP register.
assert(Op.getValueType() == MVT::i32); assert(Op.getValueType() == MVT::i32);
Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0)); Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op); return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
} }
static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) { static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
DebugLoc dl = Op.getDebugLoc(); DebugLoc dl = Op.getDebugLoc();
assert(Op.getOperand(0).getValueType() == MVT::i32); assert(Op.getOperand(0).getValueType() == MVT::i32);
SDValue Tmp = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Op.getOperand(0)); SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0));
// Convert the int value to FP in an FP register. // Convert the int value to FP in an FP register.
return DAG.getNode(SPISD::ITOF, dl, Op.getValueType(), Tmp); return DAG.getNode(SPISD::ITOF, dl, Op.getValueType(), Tmp);
} }
@ -925,7 +925,7 @@ static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) {
// Bit-Convert the value to f64. // Bit-Convert the value to f64.
SDValue Ops[2] = { SDValue Ops[2] = {
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, V), DAG.getNode(ISD::BITCAST, dl, MVT::f64, V),
V.getValue(1) V.getValue(1)
}; };
return DAG.getMergeValues(Ops, 2, dl); return DAG.getMergeValues(Ops, 2, dl);

View File

@ -147,8 +147,8 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm) :
setOperationAction(ISD::FREM, MVT::f64, Expand); setOperationAction(ISD::FREM, MVT::f64, Expand);
// We have only 64-bit bitconverts // We have only 64-bit bitconverts
setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand); setOperationAction(ISD::BITCAST, MVT::f32, Expand);
setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand); setOperationAction(ISD::BITCAST, MVT::i32, Expand);
setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);

View File

@ -36,7 +36,7 @@
using namespace llvm; using namespace llvm;
namespace { namespace {
class X86FastISel : public FastISel { class X86FastISel : public FastISel {
/// Subtarget - Keep a pointer to the X86Subtarget around so that we can /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
/// make the right decision when generating code for different targets. /// make the right decision when generating code for different targets.
@ -46,7 +46,7 @@ class X86FastISel : public FastISel {
/// ///
unsigned StackPtr; unsigned StackPtr;
/// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87 /// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
/// floating point ops. /// floating point ops.
/// When SSE is available, use it for f32 operations. /// When SSE is available, use it for f32 operations.
/// When SSE2 is available, use it for f64 operations. /// When SSE2 is available, use it for f64 operations.
@ -69,12 +69,12 @@ public:
/// possible. /// possible.
virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo, virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
const LoadInst *LI); const LoadInst *LI);
#include "X86GenFastISel.inc" #include "X86GenFastISel.inc"
private: private:
bool X86FastEmitCompare(const Value *LHS, const Value *RHS, EVT VT); bool X86FastEmitCompare(const Value *LHS, const Value *RHS, EVT VT);
bool X86FastEmitLoad(EVT VT, const X86AddressMode &AM, unsigned &RR); bool X86FastEmitLoad(EVT VT, const X86AddressMode &AM, unsigned &RR);
bool X86FastEmitStore(EVT VT, const Value *Val, bool X86FastEmitStore(EVT VT, const Value *Val,
@ -84,12 +84,12 @@ private:
bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT, bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT,
unsigned &ResultReg); unsigned &ResultReg);
bool X86SelectAddress(const Value *V, X86AddressMode &AM); bool X86SelectAddress(const Value *V, X86AddressMode &AM);
bool X86SelectCallAddress(const Value *V, X86AddressMode &AM); bool X86SelectCallAddress(const Value *V, X86AddressMode &AM);
bool X86SelectLoad(const Instruction *I); bool X86SelectLoad(const Instruction *I);
bool X86SelectStore(const Instruction *I); bool X86SelectStore(const Instruction *I);
bool X86SelectRet(const Instruction *I); bool X86SelectRet(const Instruction *I);
@ -105,7 +105,7 @@ private:
bool X86SelectSelect(const Instruction *I); bool X86SelectSelect(const Instruction *I);
bool X86SelectTrunc(const Instruction *I); bool X86SelectTrunc(const Instruction *I);
bool X86SelectFPExt(const Instruction *I); bool X86SelectFPExt(const Instruction *I);
bool X86SelectFPTrunc(const Instruction *I); bool X86SelectFPTrunc(const Instruction *I);
@ -134,7 +134,7 @@ private:
bool isTypeLegal(const Type *Ty, MVT &VT, bool AllowI1 = false); bool isTypeLegal(const Type *Ty, MVT &VT, bool AllowI1 = false);
}; };
} // end anonymous namespace. } // end anonymous namespace.
bool X86FastISel::isTypeLegal(const Type *Ty, MVT &VT, bool AllowI1) { bool X86FastISel::isTypeLegal(const Type *Ty, MVT &VT, bool AllowI1) {
@ -250,7 +250,7 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val,
Opc = Subtarget->hasSSE2() ? X86::MOVSDmr : X86::ST_Fp64m; Opc = Subtarget->hasSSE2() ? X86::MOVSDmr : X86::ST_Fp64m;
break; break;
} }
addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
DL, TII.get(Opc)), AM).addReg(Val); DL, TII.get(Opc)), AM).addReg(Val);
return true; return true;
@ -261,7 +261,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
// Handle 'null' like i32/i64 0. // Handle 'null' like i32/i64 0.
if (isa<ConstantPointerNull>(Val)) if (isa<ConstantPointerNull>(Val))
Val = Constant::getNullValue(TD.getIntPtrType(Val->getContext())); Val = Constant::getNullValue(TD.getIntPtrType(Val->getContext()));
// If this is a store of a simple constant, fold the constant into the store. // If this is a store of a simple constant, fold the constant into the store.
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Val)) { if (const ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
unsigned Opc = 0; unsigned Opc = 0;
@ -278,7 +278,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
Opc = X86::MOV64mi32; Opc = X86::MOV64mi32;
break; break;
} }
if (Opc) { if (Opc) {
addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
DL, TII.get(Opc)), AM) DL, TII.get(Opc)), AM)
@ -287,11 +287,11 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
return true; return true;
} }
} }
unsigned ValReg = getRegForValue(Val); unsigned ValReg = getRegForValue(Val);
if (ValReg == 0) if (ValReg == 0)
return false; return false;
return X86FastEmitStore(VT, ValReg, AM); return X86FastEmitStore(VT, ValReg, AM);
} }
@ -303,7 +303,7 @@ bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT,
unsigned &ResultReg) { unsigned &ResultReg) {
unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc,
Src, /*TODO: Kill=*/false); Src, /*TODO: Kill=*/false);
if (RR != 0) { if (RR != 0) {
ResultReg = RR; ResultReg = RR;
return true; return true;
@ -438,7 +438,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
AM.Disp = (uint32_t)Disp; AM.Disp = (uint32_t)Disp;
if (X86SelectAddress(U->getOperand(0), AM)) if (X86SelectAddress(U->getOperand(0), AM))
return true; return true;
// If we couldn't merge the sub value into this addr mode, revert back to // If we couldn't merge the sub value into this addr mode, revert back to
// our address and just match the value instead of completely failing. // our address and just match the value instead of completely failing.
AM = SavedAM; AM = SavedAM;
@ -467,7 +467,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
// Okay, we've committed to selecting this global. Set up the basic address. // Okay, we've committed to selecting this global. Set up the basic address.
AM.GV = GV; AM.GV = GV;
// Allow the subtarget to classify the global. // Allow the subtarget to classify the global.
unsigned char GVFlags = Subtarget->ClassifyGlobalReference(GV, TM); unsigned char GVFlags = Subtarget->ClassifyGlobalReference(GV, TM);
@ -476,7 +476,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
// FIXME: How do we know Base.Reg is free?? // FIXME: How do we know Base.Reg is free??
AM.Base.Reg = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF); AM.Base.Reg = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
} }
// Unless the ABI requires an extra load, return a direct reference to // Unless the ABI requires an extra load, return a direct reference to
// the global. // the global.
if (!isGlobalStubReference(GVFlags)) { if (!isGlobalStubReference(GVFlags)) {
@ -489,7 +489,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
AM.GVOpFlags = GVFlags; AM.GVOpFlags = GVFlags;
return true; return true;
} }
// Ok, we need to do a load from a stub. If we've already loaded from this // Ok, we need to do a load from a stub. If we've already loaded from this
// stub, reuse the loaded pointer, otherwise emit the load now. // stub, reuse the loaded pointer, otherwise emit the load now.
DenseMap<const Value*, unsigned>::iterator I = LocalValueMap.find(V); DenseMap<const Value*, unsigned>::iterator I = LocalValueMap.find(V);
@ -511,14 +511,14 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
if (TLI.getPointerTy() == MVT::i64) { if (TLI.getPointerTy() == MVT::i64) {
Opc = X86::MOV64rm; Opc = X86::MOV64rm;
RC = X86::GR64RegisterClass; RC = X86::GR64RegisterClass;
if (Subtarget->isPICStyleRIPRel()) if (Subtarget->isPICStyleRIPRel())
StubAM.Base.Reg = X86::RIP; StubAM.Base.Reg = X86::RIP;
} else { } else {
Opc = X86::MOV32rm; Opc = X86::MOV32rm;
RC = X86::GR32RegisterClass; RC = X86::GR32RegisterClass;
} }
LoadReg = createResultReg(RC); LoadReg = createResultReg(RC);
MachineInstrBuilder LoadMI = MachineInstrBuilder LoadMI =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), LoadReg); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), LoadReg);
@ -530,7 +530,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
// Prevent loading GV stub multiple times in same MBB. // Prevent loading GV stub multiple times in same MBB.
LocalValueMap[V] = LoadReg; LocalValueMap[V] = LoadReg;
} }
// Now construct the final address. Note that the Disp, Scale, // Now construct the final address. Note that the Disp, Scale,
// and Index values may already be set here. // and Index values may already be set here.
AM.Base.Reg = LoadReg; AM.Base.Reg = LoadReg;
@ -604,7 +604,7 @@ bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) {
// Okay, we've committed to selecting this global. Set up the basic address. // Okay, we've committed to selecting this global. Set up the basic address.
AM.GV = GV; AM.GV = GV;
// No ABI requires an extra load for anything other than DLLImport, which // No ABI requires an extra load for anything other than DLLImport, which
// we rejected above. Return a direct reference to the global. // we rejected above. Return a direct reference to the global.
if (Subtarget->isPICStyleRIPRel()) { if (Subtarget->isPICStyleRIPRel()) {
@ -617,7 +617,7 @@ bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) {
} else if (Subtarget->isPICStyleGOT()) { } else if (Subtarget->isPICStyleGOT()) {
AM.GVOpFlags = X86II::MO_GOTOFF; AM.GVOpFlags = X86II::MO_GOTOFF;
} }
return true; return true;
} }
@ -702,7 +702,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
return false; return false;
CCValAssign &VA = ValLocs[0]; CCValAssign &VA = ValLocs[0];
// Don't bother handling odd stuff for now. // Don't bother handling odd stuff for now.
if (VA.getLocInfo() != CCValAssign::Full) if (VA.getLocInfo() != CCValAssign::Full)
return false; return false;
@ -792,11 +792,11 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1,
EVT VT) { EVT VT) {
unsigned Op0Reg = getRegForValue(Op0); unsigned Op0Reg = getRegForValue(Op0);
if (Op0Reg == 0) return false; if (Op0Reg == 0) return false;
// Handle 'null' like i32/i64 0. // Handle 'null' like i32/i64 0.
if (isa<ConstantPointerNull>(Op1)) if (isa<ConstantPointerNull>(Op1))
Op1 = Constant::getNullValue(TD.getIntPtrType(Op0->getContext())); Op1 = Constant::getNullValue(TD.getIntPtrType(Op0->getContext()));
// We have two options: compare with register or immediate. If the RHS of // We have two options: compare with register or immediate. If the RHS of
// the compare is an immediate that we can fold into this compare, use // the compare is an immediate that we can fold into this compare, use
// CMPri, otherwise use CMPrr. // CMPri, otherwise use CMPrr.
@ -808,16 +808,16 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1,
return true; return true;
} }
} }
unsigned CompareOpc = X86ChooseCmpOpcode(VT, Subtarget); unsigned CompareOpc = X86ChooseCmpOpcode(VT, Subtarget);
if (CompareOpc == 0) return false; if (CompareOpc == 0) return false;
unsigned Op1Reg = getRegForValue(Op1); unsigned Op1Reg = getRegForValue(Op1);
if (Op1Reg == 0) return false; if (Op1Reg == 0) return false;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CompareOpc)) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CompareOpc))
.addReg(Op0Reg) .addReg(Op0Reg)
.addReg(Op1Reg); .addReg(Op1Reg);
return true; return true;
} }
@ -835,13 +835,13 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
case CmpInst::FCMP_OEQ: { case CmpInst::FCMP_OEQ: {
if (!X86FastEmitCompare(CI->getOperand(0), CI->getOperand(1), VT)) if (!X86FastEmitCompare(CI->getOperand(0), CI->getOperand(1), VT))
return false; return false;
unsigned EReg = createResultReg(&X86::GR8RegClass); unsigned EReg = createResultReg(&X86::GR8RegClass);
unsigned NPReg = createResultReg(&X86::GR8RegClass); unsigned NPReg = createResultReg(&X86::GR8RegClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::SETEr), EReg); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::SETEr), EReg);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::SETNPr), NPReg); TII.get(X86::SETNPr), NPReg);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg); TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg);
UpdateValueMap(I, ResultReg); UpdateValueMap(I, ResultReg);
return true; return true;
@ -874,7 +874,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
case CmpInst::FCMP_UGE: SwapArgs = true; SetCCOpc = X86::SETBEr; break; case CmpInst::FCMP_UGE: SwapArgs = true; SetCCOpc = X86::SETBEr; break;
case CmpInst::FCMP_ULT: SwapArgs = false; SetCCOpc = X86::SETBr; break; case CmpInst::FCMP_ULT: SwapArgs = false; SetCCOpc = X86::SETBr; break;
case CmpInst::FCMP_ULE: SwapArgs = false; SetCCOpc = X86::SETBEr; break; case CmpInst::FCMP_ULE: SwapArgs = false; SetCCOpc = X86::SETBEr; break;
case CmpInst::ICMP_EQ: SwapArgs = false; SetCCOpc = X86::SETEr; break; case CmpInst::ICMP_EQ: SwapArgs = false; SetCCOpc = X86::SETEr; break;
case CmpInst::ICMP_NE: SwapArgs = false; SetCCOpc = X86::SETNEr; break; case CmpInst::ICMP_NE: SwapArgs = false; SetCCOpc = X86::SETNEr; break;
case CmpInst::ICMP_UGT: SwapArgs = false; SetCCOpc = X86::SETAr; break; case CmpInst::ICMP_UGT: SwapArgs = false; SetCCOpc = X86::SETAr; break;
@ -896,7 +896,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
// Emit a compare of Op0/Op1. // Emit a compare of Op0/Op1.
if (!X86FastEmitCompare(Op0, Op1, VT)) if (!X86FastEmitCompare(Op0, Op1, VT))
return false; return false;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(SetCCOpc), ResultReg); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(SetCCOpc), ResultReg);
UpdateValueMap(I, ResultReg); UpdateValueMap(I, ResultReg);
return true; return true;
@ -961,7 +961,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
case CmpInst::FCMP_UGE: SwapArgs = true; BranchOpc = X86::JBE_4; break; case CmpInst::FCMP_UGE: SwapArgs = true; BranchOpc = X86::JBE_4; break;
case CmpInst::FCMP_ULT: SwapArgs = false; BranchOpc = X86::JB_4; break; case CmpInst::FCMP_ULT: SwapArgs = false; BranchOpc = X86::JB_4; break;
case CmpInst::FCMP_ULE: SwapArgs = false; BranchOpc = X86::JBE_4; break; case CmpInst::FCMP_ULE: SwapArgs = false; BranchOpc = X86::JBE_4; break;
case CmpInst::ICMP_EQ: SwapArgs = false; BranchOpc = X86::JE_4; break; case CmpInst::ICMP_EQ: SwapArgs = false; BranchOpc = X86::JE_4; break;
case CmpInst::ICMP_NE: SwapArgs = false; BranchOpc = X86::JNE_4; break; case CmpInst::ICMP_NE: SwapArgs = false; BranchOpc = X86::JNE_4; break;
case CmpInst::ICMP_UGT: SwapArgs = false; BranchOpc = X86::JA_4; break; case CmpInst::ICMP_UGT: SwapArgs = false; BranchOpc = X86::JA_4; break;
@ -975,7 +975,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
default: default:
return false; return false;
} }
const Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1); const Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
if (SwapArgs) if (SwapArgs)
std::swap(Op0, Op1); std::swap(Op0, Op1);
@ -983,7 +983,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
// Emit a compare of the LHS and RHS, setting the flags. // Emit a compare of the LHS and RHS, setting the flags.
if (!X86FastEmitCompare(Op0, Op1, VT)) if (!X86FastEmitCompare(Op0, Op1, VT))
return false; return false;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BranchOpc)) BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BranchOpc))
.addMBB(TrueMBB); .addMBB(TrueMBB);
@ -1119,16 +1119,16 @@ bool X86FastISel::X86SelectShift(const Instruction *I) {
unsigned Op0Reg = getRegForValue(I->getOperand(0)); unsigned Op0Reg = getRegForValue(I->getOperand(0));
if (Op0Reg == 0) return false; if (Op0Reg == 0) return false;
// Fold immediate in shl(x,3). // Fold immediate in shl(x,3).
if (const ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) { if (const ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
unsigned ResultReg = createResultReg(RC); unsigned ResultReg = createResultReg(RC);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpImm), BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpImm),
ResultReg).addReg(Op0Reg).addImm(CI->getZExtValue() & 0xff); ResultReg).addReg(Op0Reg).addImm(CI->getZExtValue() & 0xff);
UpdateValueMap(I, ResultReg); UpdateValueMap(I, ResultReg);
return true; return true;
} }
unsigned Op1Reg = getRegForValue(I->getOperand(1)); unsigned Op1Reg = getRegForValue(I->getOperand(1));
if (Op1Reg == 0) return false; if (Op1Reg == 0) return false;
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
@ -1152,10 +1152,10 @@ bool X86FastISel::X86SelectSelect(const Instruction *I) {
MVT VT; MVT VT;
if (!isTypeLegal(I->getType(), VT)) if (!isTypeLegal(I->getType(), VT))
return false; return false;
// We only use cmov here, if we don't have a cmov instruction bail. // We only use cmov here, if we don't have a cmov instruction bail.
if (!Subtarget->hasCMov()) return false; if (!Subtarget->hasCMov()) return false;
unsigned Opc = 0; unsigned Opc = 0;
const TargetRegisterClass *RC = NULL; const TargetRegisterClass *RC = NULL;
if (VT == MVT::i16) { if (VT == MVT::i16) {
@ -1168,7 +1168,7 @@ bool X86FastISel::X86SelectSelect(const Instruction *I) {
Opc = X86::CMOVE64rr; Opc = X86::CMOVE64rr;
RC = &X86::GR64RegClass; RC = &X86::GR64RegClass;
} else { } else {
return false; return false;
} }
unsigned Op0Reg = getRegForValue(I->getOperand(0)); unsigned Op0Reg = getRegForValue(I->getOperand(0));
@ -1233,7 +1233,7 @@ bool X86FastISel::X86SelectTrunc(const Instruction *I) {
return false; return false;
EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType()); EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
EVT DstVT = TLI.getValueType(I->getType()); EVT DstVT = TLI.getValueType(I->getType());
// This code only handles truncation to byte right now. // This code only handles truncation to byte right now.
if (DstVT != MVT::i8 && DstVT != MVT::i1) if (DstVT != MVT::i8 && DstVT != MVT::i1)
// All other cases should be handled by the tblgen generated code. // All other cases should be handled by the tblgen generated code.
@ -1304,21 +1304,21 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
// Grab the frame index. // Grab the frame index.
X86AddressMode AM; X86AddressMode AM;
if (!X86SelectAddress(Slot, AM)) return false; if (!X86SelectAddress(Slot, AM)) return false;
if (!X86FastEmitStore(PtrTy, Op1, AM)) return false; if (!X86FastEmitStore(PtrTy, Op1, AM)) return false;
return true; return true;
} }
case Intrinsic::objectsize: { case Intrinsic::objectsize: {
ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(1)); ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(1));
const Type *Ty = I.getCalledFunction()->getReturnType(); const Type *Ty = I.getCalledFunction()->getReturnType();
assert(CI && "Non-constant type in Intrinsic::objectsize?"); assert(CI && "Non-constant type in Intrinsic::objectsize?");
MVT VT; MVT VT;
if (!isTypeLegal(Ty, VT)) if (!isTypeLegal(Ty, VT))
return false; return false;
unsigned OpC = 0; unsigned OpC = 0;
if (VT == MVT::i32) if (VT == MVT::i32)
OpC = X86::MOV32ri; OpC = X86::MOV32ri;
@ -1326,7 +1326,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
OpC = X86::MOV64ri; OpC = X86::MOV64ri;
else else
return false; return false;
unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT)); unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpC), ResultReg). BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpC), ResultReg).
addImm(CI->isZero() ? -1ULL : 0); addImm(CI->isZero() ? -1ULL : 0);
@ -1398,7 +1398,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
ResultReg = DestReg1+1; ResultReg = DestReg1+1;
else else
ResultReg = createResultReg(TLI.getRegClassFor(MVT::i8)); ResultReg = createResultReg(TLI.getRegClassFor(MVT::i8));
unsigned Opc = X86::SETBr; unsigned Opc = X86::SETBr;
if (I.getIntrinsicID() == Intrinsic::sadd_with_overflow) if (I.getIntrinsicID() == Intrinsic::sadd_with_overflow)
Opc = X86::SETOr; Opc = X86::SETOr;
@ -1516,10 +1516,10 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
// Analyze operands of the call, assigning locations to each operand. // Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs; SmallVector<CCValAssign, 16> ArgLocs;
CCState CCInfo(CC, false, TM, ArgLocs, I->getParent()->getContext()); CCState CCInfo(CC, false, TM, ArgLocs, I->getParent()->getContext());
// Allocate shadow area for Win64 // Allocate shadow area for Win64
if (Subtarget->isTargetWin64()) { if (Subtarget->isTargetWin64()) {
CCInfo.AllocateStack(32, 8); CCInfo.AllocateStack(32, 8);
} }
CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CC_X86); CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CC_X86);
@ -1539,7 +1539,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
CCValAssign &VA = ArgLocs[i]; CCValAssign &VA = ArgLocs[i];
unsigned Arg = Args[VA.getValNo()]; unsigned Arg = Args[VA.getValNo()];
EVT ArgVT = ArgVTs[VA.getValNo()]; EVT ArgVT = ArgVTs[VA.getValNo()];
// Promote the value if needed. // Promote the value if needed.
switch (VA.getLocInfo()) { switch (VA.getLocInfo()) {
default: llvm_unreachable("Unknown loc info!"); default: llvm_unreachable("Unknown loc info!");
@ -1572,21 +1572,21 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
if (!Emitted) if (!Emitted)
Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(), Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(),
Arg, ArgVT, Arg); Arg, ArgVT, Arg);
assert(Emitted && "Failed to emit a aext!"); Emitted=Emitted; assert(Emitted && "Failed to emit a aext!"); Emitted=Emitted;
ArgVT = VA.getLocVT(); ArgVT = VA.getLocVT();
break; break;
} }
case CCValAssign::BCvt: { case CCValAssign::BCvt: {
unsigned BC = FastEmit_r(ArgVT.getSimpleVT(), VA.getLocVT(), unsigned BC = FastEmit_r(ArgVT.getSimpleVT(), VA.getLocVT(),
ISD::BIT_CONVERT, Arg, /*TODO: Kill=*/false); ISD::BITCAST, Arg, /*TODO: Kill=*/false);
assert(BC != 0 && "Failed to emit a bitcast!"); assert(BC != 0 && "Failed to emit a bitcast!");
Arg = BC; Arg = BC;
ArgVT = VA.getLocVT(); ArgVT = VA.getLocVT();
break; break;
} }
} }
if (VA.isRegLoc()) { if (VA.isRegLoc()) {
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
VA.getLocReg()).addReg(Arg); VA.getLocReg()).addReg(Arg);
@ -1597,7 +1597,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
AM.Base.Reg = StackPtr; AM.Base.Reg = StackPtr;
AM.Disp = LocMemOffset; AM.Disp = LocMemOffset;
const Value *ArgVal = ArgVals[VA.getValNo()]; const Value *ArgVal = ArgVals[VA.getValNo()];
// If this is a really simple value, emit this with the Value* version of // If this is a really simple value, emit this with the Value* version of
// X86FastEmitStore. If it isn't simple, we don't want to do this, as it // X86FastEmitStore. If it isn't simple, we don't want to do this, as it
// can cause us to reevaluate the argument. // can cause us to reevaluate the argument.
@ -1609,13 +1609,13 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
} }
// ELF / PIC requires GOT in the EBX register before function calls via PLT // ELF / PIC requires GOT in the EBX register before function calls via PLT
// GOT pointer. // GOT pointer.
if (Subtarget->isPICStyleGOT()) { if (Subtarget->isPICStyleGOT()) {
unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF); unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY), BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
X86::EBX).addReg(Base); X86::EBX).addReg(Base);
} }
// Issue the call. // Issue the call.
MachineInstrBuilder MIB; MachineInstrBuilder MIB;
if (CalleeOp) { if (CalleeOp) {
@ -1629,7 +1629,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
CallOpc = X86::CALL32r; CallOpc = X86::CALL32r;
MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc)) MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc))
.addReg(CalleeOp); .addReg(CalleeOp);
} else { } else {
// Direct call. // Direct call.
assert(GV && "Not a direct call"); assert(GV && "Not a direct call");
@ -1640,10 +1640,10 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
CallOpc = X86::CALL64pcrel32; CallOpc = X86::CALL64pcrel32;
else else
CallOpc = X86::CALLpcrel32; CallOpc = X86::CALLpcrel32;
// See if we need any target-specific flags on the GV operand. // See if we need any target-specific flags on the GV operand.
unsigned char OpFlags = 0; unsigned char OpFlags = 0;
// On ELF targets, in both X86-64 and X86-32 mode, direct calls to // On ELF targets, in both X86-64 and X86-32 mode, direct calls to
// external symbols most go through the PLT in PIC mode. If the symbol // external symbols most go through the PLT in PIC mode. If the symbol
// has hidden or protected visibility, or if it is static or local, then // has hidden or protected visibility, or if it is static or local, then
@ -1660,8 +1660,8 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
// automatically synthesizes these stubs. // automatically synthesizes these stubs.
OpFlags = X86II::MO_DARWIN_STUB; OpFlags = X86II::MO_DARWIN_STUB;
} }
MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc)) MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc))
.addGlobalAddress(GV, 0, OpFlags); .addGlobalAddress(GV, 0, OpFlags);
} }
@ -1690,7 +1690,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
assert(RVLocs.size() == 1 && "Can't handle multi-value calls!"); assert(RVLocs.size() == 1 && "Can't handle multi-value calls!");
EVT CopyVT = RVLocs[0].getValVT(); EVT CopyVT = RVLocs[0].getValVT();
TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT); TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
// If this is a call to a function that returns an fp value on the x87 fp // If this is a call to a function that returns an fp value on the x87 fp
// stack, but where we prefer to use the value in xmm registers, copy it // stack, but where we prefer to use the value in xmm registers, copy it
// out as F80 and use a truncate to move it from fp stack reg to xmm reg. // out as F80 and use a truncate to move it from fp stack reg to xmm reg.
@ -1728,7 +1728,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
if (AndToI1) { if (AndToI1) {
// Mask out all but lowest bit for some call which produces an i1. // Mask out all but lowest bit for some call which produces an i1.
unsigned AndResult = createResultReg(X86::GR8RegisterClass); unsigned AndResult = createResultReg(X86::GR8RegisterClass);
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
TII.get(X86::AND8ri), AndResult).addReg(ResultReg).addImm(1); TII.get(X86::AND8ri), AndResult).addReg(ResultReg).addImm(1);
ResultReg = AndResult; ResultReg = AndResult;
} }
@ -1798,7 +1798,7 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
MVT VT; MVT VT;
if (!isTypeLegal(C->getType(), VT)) if (!isTypeLegal(C->getType(), VT))
return false; return false;
// Get opcode and regclass of the output for the given load instruction. // Get opcode and regclass of the output for the given load instruction.
unsigned Opc = 0; unsigned Opc = 0;
const TargetRegisterClass *RC = NULL; const TargetRegisterClass *RC = NULL;
@ -1843,7 +1843,7 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
// No f80 support yet. // No f80 support yet.
return false; return false;
} }
// Materialize addresses with LEA instructions. // Materialize addresses with LEA instructions.
if (isa<GlobalValue>(C)) { if (isa<GlobalValue>(C)) {
X86AddressMode AM; X86AddressMode AM;
@ -1859,14 +1859,14 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
} }
return 0; return 0;
} }
// MachineConstantPool wants an explicit alignment. // MachineConstantPool wants an explicit alignment.
unsigned Align = TD.getPrefTypeAlignment(C->getType()); unsigned Align = TD.getPrefTypeAlignment(C->getType());
if (Align == 0) { if (Align == 0) {
// Alignment of vector types. FIXME! // Alignment of vector types. FIXME!
Align = TD.getTypeAllocSize(C->getType()); Align = TD.getTypeAllocSize(C->getType());
} }
// x86-32 PIC requires a PIC base register for constant pools. // x86-32 PIC requires a PIC base register for constant pools.
unsigned PICBase = 0; unsigned PICBase = 0;
unsigned char OpFlag = 0; unsigned char OpFlag = 0;
@ -1922,19 +1922,19 @@ bool X86FastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
X86AddressMode AM; X86AddressMode AM;
if (!X86SelectAddress(LI->getOperand(0), AM)) if (!X86SelectAddress(LI->getOperand(0), AM))
return false; return false;
X86InstrInfo &XII = (X86InstrInfo&)TII; X86InstrInfo &XII = (X86InstrInfo&)TII;
unsigned Size = TD.getTypeAllocSize(LI->getType()); unsigned Size = TD.getTypeAllocSize(LI->getType());
unsigned Alignment = LI->getAlignment(); unsigned Alignment = LI->getAlignment();
SmallVector<MachineOperand, 8> AddrOps; SmallVector<MachineOperand, 8> AddrOps;
AM.getFullAddress(AddrOps); AM.getFullAddress(AddrOps);
MachineInstr *Result = MachineInstr *Result =
XII.foldMemoryOperandImpl(*FuncInfo.MF, MI, OpNo, AddrOps, Size, Alignment); XII.foldMemoryOperandImpl(*FuncInfo.MF, MI, OpNo, AddrOps, Size, Alignment);
if (Result == 0) return false; if (Result == 0) return false;
MI->getParent()->insert(MI, Result); MI->getParent()->insert(MI, Result);
MI->eraseFromParent(); MI->eraseFromParent();
return true; return true;

View File

@ -226,12 +226,12 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// TODO: when we have SSE, these could be more efficient, by using movd/movq. // TODO: when we have SSE, these could be more efficient, by using movd/movq.
if (!X86ScalarSSEf64) { if (!X86ScalarSSEf64) {
setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand); setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand); setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
if (Subtarget->is64Bit()) { if (Subtarget->is64Bit()) {
setOperationAction(ISD::BIT_CONVERT , MVT::f64 , Expand); setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
// Without SSE, i64->f64 goes through memory. // Without SSE, i64->f64 goes through memory.
setOperationAction(ISD::BIT_CONVERT , MVT::i64 , Expand); setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
} }
} }
@ -654,10 +654,10 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
setOperationAction(ISD::SELECT, MVT::v4i16, Expand); setOperationAction(ISD::SELECT, MVT::v4i16, Expand);
setOperationAction(ISD::SELECT, MVT::v2i32, Expand); setOperationAction(ISD::SELECT, MVT::v2i32, Expand);
setOperationAction(ISD::SELECT, MVT::v1i64, Expand); setOperationAction(ISD::SELECT, MVT::v1i64, Expand);
setOperationAction(ISD::BIT_CONVERT, MVT::v8i8, Expand); setOperationAction(ISD::BITCAST, MVT::v8i8, Expand);
setOperationAction(ISD::BIT_CONVERT, MVT::v4i16, Expand); setOperationAction(ISD::BITCAST, MVT::v4i16, Expand);
setOperationAction(ISD::BIT_CONVERT, MVT::v2i32, Expand); setOperationAction(ISD::BITCAST, MVT::v2i32, Expand);
setOperationAction(ISD::BIT_CONVERT, MVT::v1i64, Expand); setOperationAction(ISD::BITCAST, MVT::v1i64, Expand);
if (!UseSoftFloat && Subtarget->hasSSE1()) { if (!UseSoftFloat && Subtarget->hasSSE1()) {
addRegisterClass(MVT::v4f32, X86::VR128RegisterClass); addRegisterClass(MVT::v4f32, X86::VR128RegisterClass);
@ -1293,13 +1293,13 @@ X86TargetLowering::LowerReturn(SDValue Chain,
if (Subtarget->is64Bit()) { if (Subtarget->is64Bit()) {
if (ValVT == MVT::x86mmx) { if (ValVT == MVT::x86mmx) {
if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) { if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
ValToCopy = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, ValToCopy); ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy);
ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
ValToCopy); ValToCopy);
// If we don't have SSE2 available, convert to v4f32 so the generated // If we don't have SSE2 available, convert to v4f32 so the generated
// register is legal. // register is legal.
if (!Subtarget->hasSSE2()) if (!Subtarget->hasSSE2())
ValToCopy = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32,ValToCopy); ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy);
} }
} }
} }
@ -1406,7 +1406,7 @@ X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
MVT::i64, InFlag).getValue(1); MVT::i64, InFlag).getValue(1);
Val = Chain.getValue(0); Val = Chain.getValue(0);
} }
Val = DAG.getNode(ISD::BIT_CONVERT, dl, CopyVT, Val); Val = DAG.getNode(ISD::BITCAST, dl, CopyVT, Val);
} else { } else {
Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
CopyVT, InFlag).getValue(1); CopyVT, InFlag).getValue(1);
@ -1589,7 +1589,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue, ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
DAG.getValueType(VA.getValVT())); DAG.getValueType(VA.getValVT()));
else if (VA.getLocInfo() == CCValAssign::BCvt) else if (VA.getLocInfo() == CCValAssign::BCvt)
ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), ArgValue); ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
if (VA.isExtInLoc()) { if (VA.isExtInLoc()) {
// Handle MMX values passed in XMM regs. // Handle MMX values passed in XMM regs.
@ -1922,14 +1922,14 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
case CCValAssign::AExt: case CCValAssign::AExt:
if (RegVT.isVector() && RegVT.getSizeInBits() == 128) { if (RegVT.isVector() && RegVT.getSizeInBits() == 128) {
// Special case: passing MMX values in XMM registers. // Special case: passing MMX values in XMM registers.
Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, Arg); Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg); Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg); Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
} else } else
Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg); Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
break; break;
case CCValAssign::BCvt: case CCValAssign::BCvt:
Arg = DAG.getNode(ISD::BIT_CONVERT, dl, RegVT, Arg); Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg);
break; break;
case CCValAssign::Indirect: { case CCValAssign::Indirect: {
// Store the argument. // Store the argument.
@ -3501,7 +3501,7 @@ static SDValue getZeroVector(EVT VT, bool HasSSE2, SelectionDAG &DAG,
SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops, 8); Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops, 8);
} }
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec); return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
} }
/// getOnesVector - Returns a vector of specified type with all bits set. /// getOnesVector - Returns a vector of specified type with all bits set.
@ -3514,7 +3514,7 @@ static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32); SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32);
SDValue Vec; SDValue Vec;
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec); return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
} }
@ -3599,9 +3599,9 @@ static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) {
// Perform the splat. // Perform the splat.
int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo }; int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo };
V1 = DAG.getNode(ISD::BIT_CONVERT, dl, PVT, V1); V1 = DAG.getNode(ISD::BITCAST, dl, PVT, V1);
V1 = DAG.getVectorShuffle(PVT, dl, V1, DAG.getUNDEF(PVT), &SplatMask[0]); V1 = DAG.getVectorShuffle(PVT, dl, V1, DAG.getUNDEF(PVT), &SplatMask[0]);
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, V1); return DAG.getNode(ISD::BITCAST, dl, VT, V1);
} }
/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified /// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
@ -3725,7 +3725,7 @@ SDValue getShuffleScalarElt(SDNode *N, int Index, SelectionDAG &DAG,
} }
// Actual nodes that may contain scalar elements // Actual nodes that may contain scalar elements
if (Opcode == ISD::BIT_CONVERT) { if (Opcode == ISD::BITCAST) {
V = V.getOperand(0); V = V.getOperand(0);
EVT SrcVT = V.getValueType(); EVT SrcVT = V.getValueType();
unsigned NumElems = VT.getVectorNumElements(); unsigned NumElems = VT.getVectorNumElements();
@ -3914,7 +3914,7 @@ static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
} }
} }
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, V); return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V);
} }
/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16. /// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16.
@ -3955,8 +3955,8 @@ static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
const TargetLowering &TLI, DebugLoc dl) { const TargetLowering &TLI, DebugLoc dl) {
EVT ShVT = MVT::v2i64; EVT ShVT = MVT::v2i64;
unsigned Opc = isLeft ? X86ISD::VSHL : X86ISD::VSRL; unsigned Opc = isLeft ? X86ISD::VSHL : X86ISD::VSRL;
SrcOp = DAG.getNode(ISD::BIT_CONVERT, dl, ShVT, SrcOp); SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp);
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, return DAG.getNode(ISD::BITCAST, dl, VT,
DAG.getNode(Opc, dl, ShVT, SrcOp, DAG.getNode(Opc, dl, ShVT, SrcOp,
DAG.getConstant(NumBits, TLI.getShiftAmountTy()))); DAG.getConstant(NumBits, TLI.getShiftAmountTy())));
} }
@ -4023,8 +4023,8 @@ X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl,
LD->getPointerInfo().getWithOffset(StartOffset), LD->getPointerInfo().getWithOffset(StartOffset),
false, false, 0); false, false, 0);
// Canonicalize it to a v4i32 shuffle. // Canonicalize it to a v4i32 shuffle.
V1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4i32, V1); V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, return DAG.getNode(ISD::BITCAST, dl, VT,
DAG.getVectorShuffle(MVT::v4i32, dl, V1, DAG.getVectorShuffle(MVT::v4i32, dl, V1,
DAG.getUNDEF(MVT::v4i32),&Mask[0])); DAG.getUNDEF(MVT::v4i32),&Mask[0]));
} }
@ -4092,7 +4092,7 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts,
SDValue ResNode = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, SDValue ResNode = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys,
Ops, 2, MVT::i32, Ops, 2, MVT::i32,
LDBase->getMemOperand()); LDBase->getMemOperand());
return DAG.getNode(ISD::BIT_CONVERT, DL, VT, ResNode); return DAG.getNode(ISD::BITCAST, DL, VT, ResNode);
} }
return SDValue(); return SDValue();
} }
@ -4184,7 +4184,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
DAG.getUNDEF(Item.getValueType()), DAG.getUNDEF(Item.getValueType()),
&Mask[0]); &Mask[0]);
} }
return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Item); return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Item);
} }
} }
@ -4208,7 +4208,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MiddleVT, Item); Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MiddleVT, Item);
Item = getShuffleVectorZeroOrUndef(Item, 0, true, Item = getShuffleVectorZeroOrUndef(Item, 0, true,
Subtarget->hasSSE2(), DAG); Subtarget->hasSSE2(), DAG);
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Item); return DAG.getNode(ISD::BITCAST, dl, VT, Item);
} }
} }
@ -4401,21 +4401,21 @@ X86TargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
assert(ResVT == MVT::v2i64 || ResVT == MVT::v4i32 || assert(ResVT == MVT::v2i64 || ResVT == MVT::v4i32 ||
ResVT == MVT::v8i16 || ResVT == MVT::v16i8); ResVT == MVT::v8i16 || ResVT == MVT::v16i8);
int Mask[2]; int Mask[2];
SDValue InVec = DAG.getNode(ISD::BIT_CONVERT,dl, MVT::v1i64, Op.getOperand(0)); SDValue InVec = DAG.getNode(ISD::BITCAST,dl, MVT::v1i64, Op.getOperand(0));
SDValue VecOp = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64, InVec); SDValue VecOp = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64, InVec);
InVec = Op.getOperand(1); InVec = Op.getOperand(1);
if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR) { if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR) {
unsigned NumElts = ResVT.getVectorNumElements(); unsigned NumElts = ResVT.getVectorNumElements();
VecOp = DAG.getNode(ISD::BIT_CONVERT, dl, ResVT, VecOp); VecOp = DAG.getNode(ISD::BITCAST, dl, ResVT, VecOp);
VecOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ResVT, VecOp, VecOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ResVT, VecOp,
InVec.getOperand(0), DAG.getIntPtrConstant(NumElts/2+1)); InVec.getOperand(0), DAG.getIntPtrConstant(NumElts/2+1));
} else { } else {
InVec = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v1i64, InVec); InVec = DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, InVec);
SDValue VecOp2 = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64, InVec); SDValue VecOp2 = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64, InVec);
Mask[0] = 0; Mask[1] = 2; Mask[0] = 0; Mask[1] = 2;
VecOp = DAG.getVectorShuffle(MVT::v2i64, dl, VecOp, VecOp2, Mask); VecOp = DAG.getVectorShuffle(MVT::v2i64, dl, VecOp, VecOp2, Mask);
} }
return DAG.getNode(ISD::BIT_CONVERT, dl, ResVT, VecOp); return DAG.getNode(ISD::BITCAST, dl, ResVT, VecOp);
} }
// v8i16 shuffles - Prefer shuffles in the following order: // v8i16 shuffles - Prefer shuffles in the following order:
@ -4497,9 +4497,9 @@ X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op,
MaskV.push_back(BestLoQuad < 0 ? 0 : BestLoQuad); MaskV.push_back(BestLoQuad < 0 ? 0 : BestLoQuad);
MaskV.push_back(BestHiQuad < 0 ? 1 : BestHiQuad); MaskV.push_back(BestHiQuad < 0 ? 1 : BestHiQuad);
NewV = DAG.getVectorShuffle(MVT::v2i64, dl, NewV = DAG.getVectorShuffle(MVT::v2i64, dl,
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, V1), DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1),
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, V2), &MaskV[0]); DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]);
NewV = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, NewV); NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV);
// Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the
// source words for the shuffle, to aid later transformations. // source words for the shuffle, to aid later transformations.
@ -4568,12 +4568,12 @@ X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op,
pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8)); pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
pshufbMask.push_back(DAG.getConstant(EltIdx+1, MVT::i8)); pshufbMask.push_back(DAG.getConstant(EltIdx+1, MVT::i8));
} }
V1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, V1); V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V1);
V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1, V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
DAG.getNode(ISD::BUILD_VECTOR, dl, DAG.getNode(ISD::BUILD_VECTOR, dl,
MVT::v16i8, &pshufbMask[0], 16)); MVT::v16i8, &pshufbMask[0], 16));
if (!TwoInputs) if (!TwoInputs)
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, V1); return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
// Calculate the shuffle mask for the second input, shuffle it, and // Calculate the shuffle mask for the second input, shuffle it, and
// OR it with the first shuffled input. // OR it with the first shuffled input.
@ -4588,12 +4588,12 @@ X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op,
pshufbMask.push_back(DAG.getConstant(EltIdx - 16, MVT::i8)); pshufbMask.push_back(DAG.getConstant(EltIdx - 16, MVT::i8));
pshufbMask.push_back(DAG.getConstant(EltIdx - 15, MVT::i8)); pshufbMask.push_back(DAG.getConstant(EltIdx - 15, MVT::i8));
} }
V2 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, V2); V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V2);
V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2, V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2,
DAG.getNode(ISD::BUILD_VECTOR, dl, DAG.getNode(ISD::BUILD_VECTOR, dl,
MVT::v16i8, &pshufbMask[0], 16)); MVT::v16i8, &pshufbMask[0], 16));
V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2); V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, V1); return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
} }
// If BestLoQuad >= 0, generate a pshuflw to put the low elements in order, // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order,
@ -4760,8 +4760,8 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
// No SSSE3 - Calculate in place words and then fix all out of place words // No SSSE3 - Calculate in place words and then fix all out of place words
// With 0-16 extracts & inserts. Worst case is 16 bytes out of order from // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from
// the 16 different words that comprise the two doublequadword input vectors. // the 16 different words that comprise the two doublequadword input vectors.
V1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, V1); V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
V2 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, V2); V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
SDValue NewV = V2Only ? V2 : V1; SDValue NewV = V2Only ? V2 : V1;
for (int i = 0; i != 8; ++i) { for (int i = 0; i != 8; ++i) {
int Elt0 = MaskVals[i*2]; int Elt0 = MaskVals[i*2];
@ -4823,7 +4823,7 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt, NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
DAG.getIntPtrConstant(i)); DAG.getIntPtrConstant(i));
} }
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, NewV); return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV);
} }
/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide /// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
@ -4867,8 +4867,8 @@ SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
MaskVec.push_back(StartIdx / Scale); MaskVec.push_back(StartIdx / Scale);
} }
V1 = DAG.getNode(ISD::BIT_CONVERT, dl, NewVT, V1); V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, V1);
V2 = DAG.getNode(ISD::BIT_CONVERT, dl, NewVT, V2); V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, V2);
return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]); return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]);
} }
@ -4887,11 +4887,11 @@ static SDValue getVZextMovL(EVT VT, EVT OpVT,
MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32; MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32;
if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) && if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) &&
SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR && SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
SrcOp.getOperand(0).getOpcode() == ISD::BIT_CONVERT && SrcOp.getOperand(0).getOpcode() == ISD::BITCAST &&
SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) { SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) {
// PR2108 // PR2108
OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32; OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32;
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, return DAG.getNode(ISD::BITCAST, dl, VT,
DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT, DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
OpVT, OpVT,
@ -4901,9 +4901,9 @@ static SDValue getVZextMovL(EVT VT, EVT OpVT,
} }
} }
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, return DAG.getNode(ISD::BITCAST, dl, VT,
DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT, DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
DAG.getNode(ISD::BIT_CONVERT, dl, DAG.getNode(ISD::BITCAST, dl,
OpVT, SrcOp))); OpVT, SrcOp)));
} }
@ -5057,7 +5057,7 @@ LowerVECTOR_SHUFFLE_4wide(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
} }
static bool MayFoldVectorLoad(SDValue V) { static bool MayFoldVectorLoad(SDValue V) {
if (V.hasOneUse() && V.getOpcode() == ISD::BIT_CONVERT) if (V.hasOneUse() && V.getOpcode() == ISD::BITCAST)
V = V.getOperand(0); V = V.getOperand(0);
if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR) if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR)
V = V.getOperand(0); V = V.getOperand(0);
@ -5074,7 +5074,7 @@ static bool MayFoldVectorLoad(SDValue V) {
// one use. Remove this version after this bug get fixed. // one use. Remove this version after this bug get fixed.
// rdar://8434668, PR8156 // rdar://8434668, PR8156
static bool RelaxedMayFoldVectorLoad(SDValue V) { static bool RelaxedMayFoldVectorLoad(SDValue V) {
if (V.hasOneUse() && V.getOpcode() == ISD::BIT_CONVERT) if (V.hasOneUse() && V.getOpcode() == ISD::BITCAST)
V = V.getOperand(0); V = V.getOperand(0);
if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR) if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR)
V = V.getOperand(0); V = V.getOperand(0);
@ -5112,7 +5112,7 @@ bool CanXFormVExtractWithShuffleIntoLoad(SDValue V, SelectionDAG &DAG,
// If the bit convert changed the number of elements, it is unsafe // If the bit convert changed the number of elements, it is unsafe
// to examine the mask. // to examine the mask.
bool HasShuffleIntoBitcast = false; bool HasShuffleIntoBitcast = false;
if (V.getOpcode() == ISD::BIT_CONVERT) { if (V.getOpcode() == ISD::BITCAST) {
EVT SrcVT = V.getOperand(0).getValueType(); EVT SrcVT = V.getOperand(0).getValueType();
if (SrcVT.getVectorNumElements() != VT.getVectorNumElements()) if (SrcVT.getVectorNumElements() != VT.getVectorNumElements())
return false; return false;
@ -5127,7 +5127,7 @@ bool CanXFormVExtractWithShuffleIntoLoad(SDValue V, SelectionDAG &DAG,
V = (Idx < (int)NumElems) ? V.getOperand(0) : V.getOperand(1); V = (Idx < (int)NumElems) ? V.getOperand(0) : V.getOperand(1);
// Skip one more bit_convert if necessary // Skip one more bit_convert if necessary
if (V.getOpcode() == ISD::BIT_CONVERT) if (V.getOpcode() == ISD::BITCAST)
V = V.getOperand(0); V = V.getOperand(0);
if (ISD::isNormalLoad(V.getNode())) { if (ISD::isNormalLoad(V.getNode())) {
@ -5164,8 +5164,8 @@ SDValue getMOVDDup(SDValue &Op, DebugLoc &dl, SDValue V1, SelectionDAG &DAG) {
EVT VT = Op.getValueType(); EVT VT = Op.getValueType();
// Canonizalize to v2f64. // Canonizalize to v2f64.
V1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, V1); V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, return DAG.getNode(ISD::BITCAST, dl, VT,
getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64, getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64,
V1, DAG)); V1, DAG));
} }
@ -5319,7 +5319,7 @@ SDValue NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG,
if (VT == MVT::v8i16 || VT == MVT::v16i8) { if (VT == MVT::v8i16 || VT == MVT::v16i8) {
SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl); SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl);
if (NewOp.getNode()) if (NewOp.getNode())
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, NewOp); return DAG.getNode(ISD::BITCAST, dl, VT, NewOp);
} else if ((VT == MVT::v4i32 || (VT == MVT::v4f32 && Subtarget->hasSSE2()))) { } else if ((VT == MVT::v4i32 || (VT == MVT::v4f32 && Subtarget->hasSSE2()))) {
// FIXME: Figure out a cleaner way to do this. // FIXME: Figure out a cleaner way to do this.
// Try to make use of movq to zero out the top part. // Try to make use of movq to zero out the top part.
@ -5629,7 +5629,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op,
if (Idx == 0) if (Idx == 0)
return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
DAG.getNode(ISD::BIT_CONVERT, dl, DAG.getNode(ISD::BITCAST, dl,
MVT::v4i32, MVT::v4i32,
Op.getOperand(0)), Op.getOperand(0)),
Op.getOperand(1))); Op.getOperand(1)));
@ -5650,14 +5650,14 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op,
if ((User->getOpcode() != ISD::STORE || if ((User->getOpcode() != ISD::STORE ||
(isa<ConstantSDNode>(Op.getOperand(1)) && (isa<ConstantSDNode>(Op.getOperand(1)) &&
cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) && cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) &&
(User->getOpcode() != ISD::BIT_CONVERT || (User->getOpcode() != ISD::BITCAST ||
User->getValueType(0) != MVT::i32)) User->getValueType(0) != MVT::i32))
return SDValue(); return SDValue();
SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4i32, DAG.getNode(ISD::BITCAST, dl, MVT::v4i32,
Op.getOperand(0)), Op.getOperand(0)),
Op.getOperand(1)); Op.getOperand(1));
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Extract); return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract);
} else if (VT == MVT::i32) { } else if (VT == MVT::i32) {
// ExtractPS works with constant index. // ExtractPS works with constant index.
if (isa<ConstantSDNode>(Op.getOperand(1))) if (isa<ConstantSDNode>(Op.getOperand(1)))
@ -5688,7 +5688,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
if (Idx == 0) if (Idx == 0)
return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
DAG.getNode(ISD::BIT_CONVERT, dl, DAG.getNode(ISD::BITCAST, dl,
MVT::v4i32, Vec), MVT::v4i32, Vec),
Op.getOperand(1))); Op.getOperand(1)));
// Transform it so it match pextrw which produces a 32-bit result. // Transform it so it match pextrw which produces a 32-bit result.
@ -5819,7 +5819,7 @@ X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const {
SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0)); SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
assert(Op.getValueType().getSimpleVT().getSizeInBits() == 128 && assert(Op.getValueType().getSimpleVT().getSizeInBits() == 128 &&
"Expected an SSE type!"); "Expected an SSE type!");
return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(),
DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt)); DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt));
} }
@ -6390,7 +6390,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
MachinePointerInfo::getConstantPool(), MachinePointerInfo::getConstantPool(),
false, false, 16); false, false, 16);
SDValue Unpck2 = getUnpackl(DAG, dl, MVT::v4i32, Unpck1, CLod0); SDValue Unpck2 = getUnpackl(DAG, dl, MVT::v4i32, Unpck1, CLod0);
SDValue XR2F = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Unpck2); SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck2);
SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1, SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
MachinePointerInfo::getConstantPool(), MachinePointerInfo::getConstantPool(),
false, false, 16); false, false, 16);
@ -6420,19 +6420,19 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
DAG.getIntPtrConstant(0))); DAG.getIntPtrConstant(0)));
Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Load), DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load),
DAG.getIntPtrConstant(0)); DAG.getIntPtrConstant(0));
// Or the load with the bias. // Or the load with the bias.
SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64,
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
MVT::v2f64, Load)), MVT::v2f64, Load)),
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
MVT::v2f64, Bias))); MVT::v2f64, Bias)));
Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Or), DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or),
DAG.getIntPtrConstant(0)); DAG.getIntPtrConstant(0));
// Subtract the bias. // Subtract the bias.
@ -6690,11 +6690,11 @@ SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) const {
MachinePointerInfo::getConstantPool(), MachinePointerInfo::getConstantPool(),
false, false, 16); false, false, 16);
if (VT.isVector()) { if (VT.isVector()) {
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, return DAG.getNode(ISD::BITCAST, dl, VT,
DAG.getNode(ISD::XOR, dl, MVT::v2i64, DAG.getNode(ISD::XOR, dl, MVT::v2i64,
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
Op.getOperand(0)), Op.getOperand(0)),
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, Mask))); DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Mask)));
} else { } else {
return DAG.getNode(X86ISD::FXOR, dl, VT, Op.getOperand(0), Mask); return DAG.getNode(X86ISD::FXOR, dl, VT, Op.getOperand(0), Mask);
} }
@ -6746,7 +6746,7 @@ SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, SignBit); SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, SignBit);
SignBit = DAG.getNode(X86ISD::FSRL, dl, MVT::v2f64, SignBit, SignBit = DAG.getNode(X86ISD::FSRL, dl, MVT::v2f64, SignBit,
DAG.getConstant(32, MVT::i32)); DAG.getConstant(32, MVT::i32));
SignBit = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32, SignBit); SignBit = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, SignBit);
SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, SignBit, SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, SignBit,
DAG.getIntPtrConstant(0)); DAG.getIntPtrConstant(0));
} }
@ -7895,7 +7895,7 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
} }
EVT VT = Op.getValueType(); EVT VT = Op.getValueType();
ShAmt = DAG.getNode(ISD::BIT_CONVERT, dl, VT, ShAmt); ShAmt = DAG.getNode(ISD::BITCAST, dl, VT, ShAmt);
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
DAG.getConstant(NewIntNo, MVT::i32), DAG.getConstant(NewIntNo, MVT::i32),
Op.getOperand(1), ShAmt); Op.getOperand(1), ShAmt);
@ -8329,7 +8329,7 @@ SDValue X86TargetLowering::LowerSHL(SDValue Op, SelectionDAG &DAG) const {
false, false, 16); false, false, 16);
Op = DAG.getNode(ISD::ADD, dl, VT, Op, Addend); Op = DAG.getNode(ISD::ADD, dl, VT, Op, Addend);
Op = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32, Op); Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op);
Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op); Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
return DAG.getNode(ISD::MUL, dl, VT, Op, R); return DAG.getNode(ISD::MUL, dl, VT, Op, R);
} }
@ -8550,16 +8550,16 @@ SDValue X86TargetLowering::LowerREADCYCLECOUNTER(SDValue Op,
return DAG.getMergeValues(Ops, 2, dl); return DAG.getMergeValues(Ops, 2, dl);
} }
SDValue X86TargetLowering::LowerBIT_CONVERT(SDValue Op, SDValue X86TargetLowering::LowerBITCAST(SDValue Op,
SelectionDAG &DAG) const { SelectionDAG &DAG) const {
EVT SrcVT = Op.getOperand(0).getValueType(); EVT SrcVT = Op.getOperand(0).getValueType();
EVT DstVT = Op.getValueType(); EVT DstVT = Op.getValueType();
assert((Subtarget->is64Bit() && !Subtarget->hasSSE2() && assert((Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
Subtarget->hasMMX() && !DisableMMX) && Subtarget->hasMMX() && !DisableMMX) &&
"Unexpected custom BIT_CONVERT"); "Unexpected custom BITCAST");
assert((DstVT == MVT::i64 || assert((DstVT == MVT::i64 ||
(DstVT.isVector() && DstVT.getSizeInBits()==64)) && (DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
"Unexpected custom BIT_CONVERT"); "Unexpected custom BITCAST");
// i64 <=> MMX conversions are Legal. // i64 <=> MMX conversions are Legal.
if (SrcVT==MVT::i64 && DstVT.isVector()) if (SrcVT==MVT::i64 && DstVT.isVector())
return Op; return Op;
@ -8642,7 +8642,7 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::SMULO: case ISD::SMULO:
case ISD::UMULO: return LowerXALUO(Op, DAG); case ISD::UMULO: return LowerXALUO(Op, DAG);
case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, DAG); case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, DAG);
case ISD::BIT_CONVERT: return LowerBIT_CONVERT(Op, DAG); case ISD::BITCAST: return LowerBITCAST(Op, DAG);
} }
} }
@ -11177,13 +11177,13 @@ static SDValue PerformBTCombine(SDNode *N,
static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) { static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) {
SDValue Op = N->getOperand(0); SDValue Op = N->getOperand(0);
if (Op.getOpcode() == ISD::BIT_CONVERT) if (Op.getOpcode() == ISD::BITCAST)
Op = Op.getOperand(0); Op = Op.getOperand(0);
EVT VT = N->getValueType(0), OpVT = Op.getValueType(); EVT VT = N->getValueType(0), OpVT = Op.getValueType();
if (Op.getOpcode() == X86ISD::VZEXT_LOAD && if (Op.getOpcode() == X86ISD::VZEXT_LOAD &&
VT.getVectorElementType().getSizeInBits() == VT.getVectorElementType().getSizeInBits() ==
OpVT.getVectorElementType().getSizeInBits()) { OpVT.getVectorElementType().getSizeInBits()) {
return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), VT, Op); return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op);
} }
return SDValue(); return SDValue();
} }

View File

@ -740,7 +740,7 @@ namespace llvm {
SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const; SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const;
SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot, SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot,
SelectionDAG &DAG) const; SelectionDAG &DAG) const;
SDValue LowerBIT_CONVERT(SDValue op, SelectionDAG &DAG) const; SDValue LowerBITCAST(SDValue op, SelectionDAG &DAG) const;
SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) const; SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) const;