forked from OSchip/llvm-project
Renaming ISD::BIT_CONVERT to ISD::BITCAST to better reflect the LLVM IR concept.
llvm-svn: 119990
This commit is contained in:
parent
4329e078ac
commit
527da1b6e2
|
@ -1825,7 +1825,7 @@ register to convert the floating-point value to an integer.
|
|||
static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) {
|
||||
assert(Op.getValueType() == MVT::i32);
|
||||
Op = DAG.getNode(SPISD::FTOI, MVT::f32, Op.getOperand(0));
|
||||
return DAG.getNode(ISD::BIT_CONVERT, MVT::i32, Op);
|
||||
return DAG.getNode(ISD::BITCAST, MVT::i32, Op);
|
||||
}
|
||||
</pre>
|
||||
</div>
|
||||
|
|
|
@ -274,11 +274,11 @@ namespace ISD {
|
|||
/// IDX, which must be a multiple of the result vector length.
|
||||
EXTRACT_SUBVECTOR,
|
||||
|
||||
/// VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as
|
||||
/// VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as
|
||||
/// VEC1/VEC2. A VECTOR_SHUFFLE node also contains an array of constant int
|
||||
/// values that indicate which value (or undef) each result element will
|
||||
/// get. These constant ints are accessible through the
|
||||
/// ShuffleVectorSDNode class. This is quite similar to the Altivec
|
||||
/// get. These constant ints are accessible through the
|
||||
/// ShuffleVectorSDNode class. This is quite similar to the Altivec
|
||||
/// 'vperm' instruction, except that the indices must be constants and are
|
||||
/// in terms of the element size of VEC1/VEC2, not in terms of bytes.
|
||||
VECTOR_SHUFFLE,
|
||||
|
@ -399,14 +399,14 @@ namespace ISD {
|
|||
/// X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
|
||||
FP_EXTEND,
|
||||
|
||||
// BIT_CONVERT - This operator converts between integer, vector and FP
|
||||
// BITCAST - This operator converts between integer, vector and FP
|
||||
// values, as if the value was stored to memory with one type and loaded
|
||||
// from the same address with the other type (or equivalently for vector
|
||||
// format conversions, etc). The source and result are required to have
|
||||
// the same bit size (e.g. f32 <-> i32). This can also be used for
|
||||
// int-to-int or fp-to-fp conversions, but that is a noop, deleted by
|
||||
// getNode().
|
||||
BIT_CONVERT,
|
||||
BITCAST,
|
||||
|
||||
// CONVERT_RNDSAT - This operator is used to support various conversions
|
||||
// between various types (float, signed, unsigned and vectors of those
|
||||
|
@ -532,7 +532,7 @@ namespace ISD {
|
|||
// SRCVALUE - This is a node type that holds a Value* that is used to
|
||||
// make reference to a value in the LLVM IR.
|
||||
SRCVALUE,
|
||||
|
||||
|
||||
// MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to
|
||||
// reference metadata in the IR.
|
||||
MDNODE_SDNODE,
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
//===- TargetSelectionDAG.td - Common code for DAG isels ---*- tablegen -*-===//
|
||||
//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file defines the target-independent interfaces used by SelectionDAG
|
||||
|
@ -123,10 +123,10 @@ def SDTFPRoundOp : SDTypeProfile<1, 1, [ // fround
|
|||
def SDTFPExtendOp : SDTypeProfile<1, 1, [ // fextend
|
||||
SDTCisFP<0>, SDTCisFP<1>, SDTCisOpSmallerThanOp<1, 0>
|
||||
]>;
|
||||
def SDTIntToFPOp : SDTypeProfile<1, 1, [ // [su]int_to_fp
|
||||
def SDTIntToFPOp : SDTypeProfile<1, 1, [ // [su]int_to_fp
|
||||
SDTCisFP<0>, SDTCisInt<1>
|
||||
]>;
|
||||
def SDTFPToIntOp : SDTypeProfile<1, 1, [ // fp_to_[su]int
|
||||
def SDTFPToIntOp : SDTypeProfile<1, 1, [ // fp_to_[su]int
|
||||
SDTCisInt<0>, SDTCisFP<1>
|
||||
]>;
|
||||
def SDTExtInreg : SDTypeProfile<1, 2, [ // sext_inreg
|
||||
|
@ -138,7 +138,7 @@ def SDTSetCC : SDTypeProfile<1, 3, [ // setcc
|
|||
SDTCisInt<0>, SDTCisSameAs<1, 2>, SDTCisVT<3, OtherVT>
|
||||
]>;
|
||||
|
||||
def SDTSelect : SDTypeProfile<1, 3, [ // select
|
||||
def SDTSelect : SDTypeProfile<1, 3, [ // select
|
||||
SDTCisInt<1>, SDTCisSameAs<0, 2>, SDTCisSameAs<2, 3>
|
||||
]>;
|
||||
|
||||
|
@ -162,11 +162,11 @@ def SDTBrind : SDTypeProfile<0, 1, [ // brind
|
|||
def SDTNone : SDTypeProfile<0, 0, []>; // ret, trap
|
||||
|
||||
def SDTLoad : SDTypeProfile<1, 1, [ // load
|
||||
SDTCisPtrTy<1>
|
||||
SDTCisPtrTy<1>
|
||||
]>;
|
||||
|
||||
def SDTStore : SDTypeProfile<0, 2, [ // store
|
||||
SDTCisPtrTy<1>
|
||||
SDTCisPtrTy<1>
|
||||
]>;
|
||||
|
||||
def SDTIStore : SDTypeProfile<1, 3, [ // indexed store
|
||||
|
@ -235,7 +235,7 @@ class SDPatternOperator;
|
|||
// Selection DAG Node definitions.
|
||||
//
|
||||
class SDNode<string opcode, SDTypeProfile typeprof,
|
||||
list<SDNodeProperty> props = [], string sdclass = "SDNode">
|
||||
list<SDNodeProperty> props = [], string sdclass = "SDNode">
|
||||
: SDPatternOperator {
|
||||
string Opcode = opcode;
|
||||
string SDClass = sdclass;
|
||||
|
@ -319,7 +319,7 @@ def subc : SDNode<"ISD::SUBC" , SDTIntBinOp,
|
|||
[SDNPOutFlag]>;
|
||||
def sube : SDNode<"ISD::SUBE" , SDTIntBinOp,
|
||||
[SDNPOutFlag, SDNPInFlag]>;
|
||||
|
||||
|
||||
def sext_inreg : SDNode<"ISD::SIGN_EXTEND_INREG", SDTExtInreg>;
|
||||
def bswap : SDNode<"ISD::BSWAP" , SDTIntUnaryOp>;
|
||||
def ctlz : SDNode<"ISD::CTLZ" , SDTIntUnaryOp>;
|
||||
|
@ -329,11 +329,11 @@ def sext : SDNode<"ISD::SIGN_EXTEND", SDTIntExtendOp>;
|
|||
def zext : SDNode<"ISD::ZERO_EXTEND", SDTIntExtendOp>;
|
||||
def anyext : SDNode<"ISD::ANY_EXTEND" , SDTIntExtendOp>;
|
||||
def trunc : SDNode<"ISD::TRUNCATE" , SDTIntTruncOp>;
|
||||
def bitconvert : SDNode<"ISD::BIT_CONVERT", SDTUnaryOp>;
|
||||
def bitconvert : SDNode<"ISD::BITCAST" , SDTUnaryOp>;
|
||||
def extractelt : SDNode<"ISD::EXTRACT_VECTOR_ELT", SDTVecExtract>;
|
||||
def insertelt : SDNode<"ISD::INSERT_VECTOR_ELT", SDTVecInsert>;
|
||||
|
||||
|
||||
|
||||
def fadd : SDNode<"ISD::FADD" , SDTFPBinOp, [SDNPCommutative]>;
|
||||
def fsub : SDNode<"ISD::FSUB" , SDTFPBinOp>;
|
||||
def fmul : SDNode<"ISD::FMUL" , SDTFPBinOp, [SDNPCommutative]>;
|
||||
|
@ -423,16 +423,16 @@ def vector_extract : SDNode<"ISD::EXTRACT_VECTOR_ELT",
|
|||
SDTypeProfile<1, 2, [SDTCisPtrTy<2>]>, []>;
|
||||
def vector_insert : SDNode<"ISD::INSERT_VECTOR_ELT",
|
||||
SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisPtrTy<3>]>, []>;
|
||||
|
||||
|
||||
// Nodes for intrinsics, you should use the intrinsic itself and let tblgen use
|
||||
// these internally. Don't reference these directly.
|
||||
def intrinsic_void : SDNode<"ISD::INTRINSIC_VOID",
|
||||
def intrinsic_void : SDNode<"ISD::INTRINSIC_VOID",
|
||||
SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
|
||||
[SDNPHasChain]>;
|
||||
def intrinsic_w_chain : SDNode<"ISD::INTRINSIC_W_CHAIN",
|
||||
def intrinsic_w_chain : SDNode<"ISD::INTRINSIC_W_CHAIN",
|
||||
SDTypeProfile<1, -1, [SDTCisPtrTy<1>]>,
|
||||
[SDNPHasChain]>;
|
||||
def intrinsic_wo_chain : SDNode<"ISD::INTRINSIC_WO_CHAIN",
|
||||
def intrinsic_wo_chain : SDNode<"ISD::INTRINSIC_WO_CHAIN",
|
||||
SDTypeProfile<1, -1, [SDTCisPtrTy<1>]>, []>;
|
||||
|
||||
// Do not use cvt directly. Use cvt forms below
|
||||
|
|
|
@ -185,7 +185,7 @@ namespace {
|
|||
SDValue visitANY_EXTEND(SDNode *N);
|
||||
SDValue visitSIGN_EXTEND_INREG(SDNode *N);
|
||||
SDValue visitTRUNCATE(SDNode *N);
|
||||
SDValue visitBIT_CONVERT(SDNode *N);
|
||||
SDValue visitBITCAST(SDNode *N);
|
||||
SDValue visitBUILD_PAIR(SDNode *N);
|
||||
SDValue visitFADD(SDNode *N);
|
||||
SDValue visitFSUB(SDNode *N);
|
||||
|
@ -229,7 +229,7 @@ namespace {
|
|||
SDValue SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp,
|
||||
unsigned HiOp);
|
||||
SDValue CombineConsecutiveLoads(SDNode *N, EVT VT);
|
||||
SDValue ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *, EVT);
|
||||
SDValue ConstantFoldBITCASTofBUILD_VECTOR(SDNode *, EVT);
|
||||
SDValue BuildSDIV(SDNode *N);
|
||||
SDValue BuildUDIV(SDNode *N);
|
||||
SDNode *MatchRotate(SDValue LHS, SDValue RHS, DebugLoc DL);
|
||||
|
@ -273,15 +273,15 @@ namespace {
|
|||
|
||||
/// Run - runs the dag combiner on all nodes in the work list
|
||||
void Run(CombineLevel AtLevel);
|
||||
|
||||
|
||||
SelectionDAG &getDAG() const { return DAG; }
|
||||
|
||||
|
||||
/// getShiftAmountTy - Returns a type large enough to hold any valid
|
||||
/// shift amount - before type legalization these can be huge.
|
||||
EVT getShiftAmountTy() {
|
||||
return LegalTypes ? TLI.getShiftAmountTy() : TLI.getPointerTy();
|
||||
}
|
||||
|
||||
|
||||
/// isTypeLegal - This method returns true if we are running before type
|
||||
/// legalization or if the specified VT is legal.
|
||||
bool isTypeLegal(const EVT &VT) {
|
||||
|
@ -634,7 +634,7 @@ bool DAGCombiner::SimplifyDemandedBits(SDValue Op, const APInt &Demanded) {
|
|||
|
||||
// Replace the old value with the new one.
|
||||
++NodesCombined;
|
||||
DEBUG(dbgs() << "\nReplacing.2 ";
|
||||
DEBUG(dbgs() << "\nReplacing.2 ";
|
||||
TLO.Old.getNode()->dump(&DAG);
|
||||
dbgs() << "\nWith: ";
|
||||
TLO.New.getNode()->dump(&DAG);
|
||||
|
@ -694,7 +694,7 @@ SDValue DAGCombiner::PromoteOperand(SDValue Op, EVT PVT, bool &Replace) {
|
|||
unsigned ExtOpc =
|
||||
Op.getValueType().isByteSized() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
|
||||
return DAG.getNode(ExtOpc, dl, PVT, Op);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!TLI.isOperationLegal(ISD::ANY_EXTEND, PVT))
|
||||
|
@ -978,7 +978,7 @@ void DAGCombiner::Run(CombineLevel AtLevel) {
|
|||
RV.getNode()->getOpcode() != ISD::DELETED_NODE &&
|
||||
"Node was deleted but visit returned new node!");
|
||||
|
||||
DEBUG(dbgs() << "\nReplacing.3 ";
|
||||
DEBUG(dbgs() << "\nReplacing.3 ";
|
||||
N->dump(&DAG);
|
||||
dbgs() << "\nWith: ";
|
||||
RV.getNode()->dump(&DAG);
|
||||
|
@ -1057,7 +1057,7 @@ SDValue DAGCombiner::visit(SDNode *N) {
|
|||
case ISD::ANY_EXTEND: return visitANY_EXTEND(N);
|
||||
case ISD::SIGN_EXTEND_INREG: return visitSIGN_EXTEND_INREG(N);
|
||||
case ISD::TRUNCATE: return visitTRUNCATE(N);
|
||||
case ISD::BIT_CONVERT: return visitBIT_CONVERT(N);
|
||||
case ISD::BITCAST: return visitBITCAST(N);
|
||||
case ISD::BUILD_PAIR: return visitBUILD_PAIR(N);
|
||||
case ISD::FADD: return visitFADD(N);
|
||||
case ISD::FSUB: return visitFSUB(N);
|
||||
|
@ -1228,7 +1228,7 @@ SDValue DAGCombiner::visitTokenFactor(SDNode *N) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
SDValue Result;
|
||||
|
||||
// If we've change things around then replace token factor.
|
||||
|
@ -1429,10 +1429,10 @@ SDValue DAGCombiner::visitADD(SDNode *N) {
|
|||
|
||||
if (N1.getOpcode() == ISD::AND) {
|
||||
SDValue AndOp0 = N1.getOperand(0);
|
||||
ConstantSDNode *AndOp1 = dyn_cast<ConstantSDNode>(N1->getOperand(1));
|
||||
ConstantSDNode *AndOp1 = dyn_cast<ConstantSDNode>(N1->getOperand(1));
|
||||
unsigned NumSignBits = DAG.ComputeNumSignBits(AndOp0);
|
||||
unsigned DestBits = VT.getScalarType().getSizeInBits();
|
||||
|
||||
|
||||
// (add z, (and (sbbl x, x), 1)) -> (sub z, (sbbl x, x))
|
||||
// and similar xforms where the inner op is either ~0 or 0.
|
||||
if (NumSignBits == DestBits && AndOp1 && AndOp1->isOne()) {
|
||||
|
@ -2269,8 +2269,8 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
|
|||
if (ExtVT == LoadedVT &&
|
||||
(!LegalOperations || TLI.isLoadExtLegal(ISD::ZEXTLOAD, ExtVT))) {
|
||||
EVT LoadResultTy = HasAnyExt ? LN0->getValueType(0) : VT;
|
||||
|
||||
SDValue NewLoad =
|
||||
|
||||
SDValue NewLoad =
|
||||
DAG.getExtLoad(ISD::ZEXTLOAD, LoadResultTy, LN0->getDebugLoc(),
|
||||
LN0->getChain(), LN0->getBasePtr(),
|
||||
LN0->getPointerInfo(),
|
||||
|
@ -2280,7 +2280,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
|
|||
CombineTo(LN0, NewLoad, NewLoad.getValue(1));
|
||||
return SDValue(N, 0); // Return N so it doesn't get rechecked!
|
||||
}
|
||||
|
||||
|
||||
// Do not change the width of a volatile load.
|
||||
// Do not generate loads of non-round integer types since these can
|
||||
// be expensive (and would be wrong if the type is not byte sized).
|
||||
|
@ -2304,7 +2304,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
|
|||
}
|
||||
|
||||
AddToWorkList(NewPtr.getNode());
|
||||
|
||||
|
||||
EVT LoadResultTy = HasAnyExt ? LN0->getValueType(0) : VT;
|
||||
SDValue Load =
|
||||
DAG.getExtLoad(ISD::ZEXTLOAD, LoadResultTy, LN0->getDebugLoc(),
|
||||
|
@ -3086,7 +3086,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
|
|||
return DAG.getNode(ISD::SRL, N->getDebugLoc(), VT, N0.getOperand(0),
|
||||
DAG.getConstant(c1 + c2, N1.getValueType()));
|
||||
}
|
||||
|
||||
|
||||
// fold (srl (shl x, c), c) -> (and x, cst2)
|
||||
if (N1C && N0.getOpcode() == ISD::SHL && N0.getOperand(1) == N1 &&
|
||||
N0.getValueSizeInBits() <= 64) {
|
||||
|
@ -3094,7 +3094,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
|
|||
return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0.getOperand(0),
|
||||
DAG.getConstant(~0ULL >> ShAmt, VT));
|
||||
}
|
||||
|
||||
|
||||
|
||||
// fold (srl (anyextend x), c) -> (anyextend (srl x, c))
|
||||
if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) {
|
||||
|
@ -3198,7 +3198,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
|
|||
// brcond i32 %c ...
|
||||
//
|
||||
// into
|
||||
//
|
||||
//
|
||||
// %a = ...
|
||||
// %b = and %a, 2
|
||||
// %c = setcc eq %b, 0
|
||||
|
@ -3626,7 +3626,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
|
|||
N0.getOperand(0), N0.getOperand(1),
|
||||
cast<CondCodeSDNode>(N0.getOperand(2))->get()),
|
||||
NegOne, DAG.getConstant(0, VT));
|
||||
}
|
||||
}
|
||||
|
||||
// fold (sext x) -> (zext x) if the sign bit is known zero.
|
||||
if ((!LegalOperations || TLI.isOperationLegal(ISD::ZERO_EXTEND, VT)) &&
|
||||
|
@ -4104,7 +4104,7 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
|
|||
if ((N0.getValueType().getSizeInBits() & (EVTBits-1)) != 0)
|
||||
return SDValue();
|
||||
}
|
||||
|
||||
|
||||
// If the shift amount is larger than the input type then we're not
|
||||
// accessing any of the loaded bytes. If the load was a zextload/extload
|
||||
// then the result of the shift+trunc is zero/undef (handled elsewhere).
|
||||
|
@ -4112,7 +4112,7 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
|
|||
// of the extended byte. This is not worth optimizing for.
|
||||
if (ShAmt >= VT.getSizeInBits())
|
||||
return SDValue();
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4379,7 +4379,7 @@ SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, EVT VT) {
|
|||
return SDValue();
|
||||
}
|
||||
|
||||
SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) {
|
||||
SDValue DAGCombiner::visitBITCAST(SDNode *N) {
|
||||
SDValue N0 = N->getOperand(0);
|
||||
EVT VT = N->getValueType(0);
|
||||
|
||||
|
@ -4403,12 +4403,12 @@ SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) {
|
|||
assert(!DestEltVT.isVector() &&
|
||||
"Element type of vector ValueType must not be vector!");
|
||||
if (isSimple)
|
||||
return ConstantFoldBIT_CONVERTofBUILD_VECTOR(N0.getNode(), DestEltVT);
|
||||
return ConstantFoldBITCASTofBUILD_VECTOR(N0.getNode(), DestEltVT);
|
||||
}
|
||||
|
||||
// If the input is a constant, let getNode fold it.
|
||||
if (isa<ConstantSDNode>(N0) || isa<ConstantFPSDNode>(N0)) {
|
||||
SDValue Res = DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), VT, N0);
|
||||
SDValue Res = DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, N0);
|
||||
if (Res.getNode() != N) {
|
||||
if (!LegalOperations ||
|
||||
TLI.isOperationLegal(Res.getNode()->getOpcode(), VT))
|
||||
|
@ -4424,8 +4424,8 @@ SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) {
|
|||
}
|
||||
|
||||
// (conv (conv x, t1), t2) -> (conv x, t2)
|
||||
if (N0.getOpcode() == ISD::BIT_CONVERT)
|
||||
return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), VT,
|
||||
if (N0.getOpcode() == ISD::BITCAST)
|
||||
return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT,
|
||||
N0.getOperand(0));
|
||||
|
||||
// fold (conv (load x)) -> (load (conv*)x)
|
||||
|
@ -4446,7 +4446,7 @@ SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) {
|
|||
OrigAlign);
|
||||
AddToWorkList(N);
|
||||
CombineTo(N0.getNode(),
|
||||
DAG.getNode(ISD::BIT_CONVERT, N0.getDebugLoc(),
|
||||
DAG.getNode(ISD::BITCAST, N0.getDebugLoc(),
|
||||
N0.getValueType(), Load),
|
||||
Load.getValue(1));
|
||||
return Load;
|
||||
|
@ -4458,7 +4458,7 @@ SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) {
|
|||
// This often reduces constant pool loads.
|
||||
if ((N0.getOpcode() == ISD::FNEG || N0.getOpcode() == ISD::FABS) &&
|
||||
N0.getNode()->hasOneUse() && VT.isInteger() && !VT.isVector()) {
|
||||
SDValue NewConv = DAG.getNode(ISD::BIT_CONVERT, N0.getDebugLoc(), VT,
|
||||
SDValue NewConv = DAG.getNode(ISD::BITCAST, N0.getDebugLoc(), VT,
|
||||
N0.getOperand(0));
|
||||
AddToWorkList(NewConv.getNode());
|
||||
|
||||
|
@ -4481,7 +4481,7 @@ SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) {
|
|||
unsigned OrigXWidth = N0.getOperand(1).getValueType().getSizeInBits();
|
||||
EVT IntXVT = EVT::getIntegerVT(*DAG.getContext(), OrigXWidth);
|
||||
if (isTypeLegal(IntXVT)) {
|
||||
SDValue X = DAG.getNode(ISD::BIT_CONVERT, N0.getDebugLoc(),
|
||||
SDValue X = DAG.getNode(ISD::BITCAST, N0.getDebugLoc(),
|
||||
IntXVT, N0.getOperand(1));
|
||||
AddToWorkList(X.getNode());
|
||||
|
||||
|
@ -4506,7 +4506,7 @@ SDValue DAGCombiner::visitBIT_CONVERT(SDNode *N) {
|
|||
X, DAG.getConstant(SignBit, VT));
|
||||
AddToWorkList(X.getNode());
|
||||
|
||||
SDValue Cst = DAG.getNode(ISD::BIT_CONVERT, N0.getDebugLoc(),
|
||||
SDValue Cst = DAG.getNode(ISD::BITCAST, N0.getDebugLoc(),
|
||||
VT, N0.getOperand(0));
|
||||
Cst = DAG.getNode(ISD::AND, Cst.getDebugLoc(), VT,
|
||||
Cst, DAG.getConstant(~SignBit, VT));
|
||||
|
@ -4531,11 +4531,11 @@ SDValue DAGCombiner::visitBUILD_PAIR(SDNode *N) {
|
|||
return CombineConsecutiveLoads(N, VT);
|
||||
}
|
||||
|
||||
/// ConstantFoldBIT_CONVERTofBUILD_VECTOR - We know that BV is a build_vector
|
||||
/// ConstantFoldBITCASTofBUILD_VECTOR - We know that BV is a build_vector
|
||||
/// node with Constant, ConstantFP or Undef operands. DstEltVT indicates the
|
||||
/// destination element value type.
|
||||
SDValue DAGCombiner::
|
||||
ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
|
||||
ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
|
||||
EVT SrcEltVT = BV->getValueType(0).getVectorElementType();
|
||||
|
||||
// If this is already the right type, we're done.
|
||||
|
@ -4553,10 +4553,10 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
|
|||
// Due to the FP element handling below calling this routine recursively,
|
||||
// we can end up with a scalar-to-vector node here.
|
||||
if (BV->getOpcode() == ISD::SCALAR_TO_VECTOR)
|
||||
return DAG.getNode(ISD::SCALAR_TO_VECTOR, BV->getDebugLoc(), VT,
|
||||
DAG.getNode(ISD::BIT_CONVERT, BV->getDebugLoc(),
|
||||
return DAG.getNode(ISD::SCALAR_TO_VECTOR, BV->getDebugLoc(), VT,
|
||||
DAG.getNode(ISD::BITCAST, BV->getDebugLoc(),
|
||||
DstEltVT, BV->getOperand(0)));
|
||||
|
||||
|
||||
SmallVector<SDValue, 8> Ops;
|
||||
for (unsigned i = 0, e = BV->getNumOperands(); i != e; ++i) {
|
||||
SDValue Op = BV->getOperand(i);
|
||||
|
@ -4564,7 +4564,7 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
|
|||
// are promoted and implicitly truncated. Make that explicit here.
|
||||
if (Op.getValueType() != SrcEltVT)
|
||||
Op = DAG.getNode(ISD::TRUNCATE, BV->getDebugLoc(), SrcEltVT, Op);
|
||||
Ops.push_back(DAG.getNode(ISD::BIT_CONVERT, BV->getDebugLoc(),
|
||||
Ops.push_back(DAG.getNode(ISD::BITCAST, BV->getDebugLoc(),
|
||||
DstEltVT, Op));
|
||||
AddToWorkList(Ops.back().getNode());
|
||||
}
|
||||
|
@ -4580,7 +4580,7 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
|
|||
// same sizes.
|
||||
assert((SrcEltVT == MVT::f32 || SrcEltVT == MVT::f64) && "Unknown FP VT!");
|
||||
EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), SrcEltVT.getSizeInBits());
|
||||
BV = ConstantFoldBIT_CONVERTofBUILD_VECTOR(BV, IntVT).getNode();
|
||||
BV = ConstantFoldBITCASTofBUILD_VECTOR(BV, IntVT).getNode();
|
||||
SrcEltVT = IntVT;
|
||||
}
|
||||
|
||||
|
@ -4589,10 +4589,10 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
|
|||
if (DstEltVT.isFloatingPoint()) {
|
||||
assert((DstEltVT == MVT::f32 || DstEltVT == MVT::f64) && "Unknown FP VT!");
|
||||
EVT TmpVT = EVT::getIntegerVT(*DAG.getContext(), DstEltVT.getSizeInBits());
|
||||
SDNode *Tmp = ConstantFoldBIT_CONVERTofBUILD_VECTOR(BV, TmpVT).getNode();
|
||||
SDNode *Tmp = ConstantFoldBITCASTofBUILD_VECTOR(BV, TmpVT).getNode();
|
||||
|
||||
// Next, convert to FP elements of the same size.
|
||||
return ConstantFoldBIT_CONVERTofBUILD_VECTOR(Tmp, DstEltVT);
|
||||
return ConstantFoldBITCASTofBUILD_VECTOR(Tmp, DstEltVT);
|
||||
}
|
||||
|
||||
// Okay, we know the src/dst types are both integers of differing types.
|
||||
|
@ -5068,7 +5068,7 @@ SDValue DAGCombiner::visitFNEG(SDNode *N) {
|
|||
|
||||
// Transform fneg(bitconvert(x)) -> bitconvert(x^sign) to avoid loading
|
||||
// constant pool values.
|
||||
if (N0.getOpcode() == ISD::BIT_CONVERT &&
|
||||
if (N0.getOpcode() == ISD::BITCAST &&
|
||||
!VT.isVector() &&
|
||||
N0.getNode()->hasOneUse() &&
|
||||
N0.getOperand(0).getValueType().isInteger()) {
|
||||
|
@ -5078,7 +5078,7 @@ SDValue DAGCombiner::visitFNEG(SDNode *N) {
|
|||
Int = DAG.getNode(ISD::XOR, N0.getDebugLoc(), IntVT, Int,
|
||||
DAG.getConstant(APInt::getSignBit(IntVT.getSizeInBits()), IntVT));
|
||||
AddToWorkList(Int.getNode());
|
||||
return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(),
|
||||
return DAG.getNode(ISD::BITCAST, N->getDebugLoc(),
|
||||
VT, Int);
|
||||
}
|
||||
}
|
||||
|
@ -5104,7 +5104,7 @@ SDValue DAGCombiner::visitFABS(SDNode *N) {
|
|||
|
||||
// Transform fabs(bitconvert(x)) -> bitconvert(x&~sign) to avoid loading
|
||||
// constant pool values.
|
||||
if (N0.getOpcode() == ISD::BIT_CONVERT && N0.getNode()->hasOneUse() &&
|
||||
if (N0.getOpcode() == ISD::BITCAST && N0.getNode()->hasOneUse() &&
|
||||
N0.getOperand(0).getValueType().isInteger() &&
|
||||
!N0.getOperand(0).getValueType().isVector()) {
|
||||
SDValue Int = N0.getOperand(0);
|
||||
|
@ -5113,7 +5113,7 @@ SDValue DAGCombiner::visitFABS(SDNode *N) {
|
|||
Int = DAG.getNode(ISD::AND, N0.getDebugLoc(), IntVT, Int,
|
||||
DAG.getConstant(~APInt::getSignBit(IntVT.getSizeInBits()), IntVT));
|
||||
AddToWorkList(Int.getNode());
|
||||
return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(),
|
||||
return DAG.getNode(ISD::BITCAST, N->getDebugLoc(),
|
||||
N->getValueType(0), Int);
|
||||
}
|
||||
}
|
||||
|
@ -5160,7 +5160,7 @@ SDValue DAGCombiner::visitBRCOND(SDNode *N) {
|
|||
// brcond i32 %c ...
|
||||
//
|
||||
// into
|
||||
//
|
||||
//
|
||||
// %a = ...
|
||||
// %b = and i32 %a, 2
|
||||
// %c = setcc eq %b, 0
|
||||
|
@ -5211,7 +5211,7 @@ SDValue DAGCombiner::visitBRCOND(SDNode *N) {
|
|||
// Restore N1 if the above transformation doesn't match.
|
||||
N1 = N->getOperand(1);
|
||||
}
|
||||
|
||||
|
||||
// Transform br(xor(x, y)) -> br(x != y)
|
||||
// Transform br(xor(xor(x,y), 1)) -> br (x == y)
|
||||
if (N1.hasOneUse() && N1.getOpcode() == ISD::XOR) {
|
||||
|
@ -5665,10 +5665,10 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
|
|||
// Create token factor to keep old chain connected.
|
||||
SDValue Token = DAG.getNode(ISD::TokenFactor, N->getDebugLoc(),
|
||||
MVT::Other, Chain, ReplLoad.getValue(1));
|
||||
|
||||
|
||||
// Make sure the new and old chains are cleaned up.
|
||||
AddToWorkList(Token.getNode());
|
||||
|
||||
|
||||
// Replace uses with load result and token factor. Don't add users
|
||||
// to work list.
|
||||
return CombineTo(N, ReplLoad.getValue(0), Token, false);
|
||||
|
@ -5688,17 +5688,17 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
|
|||
static std::pair<unsigned, unsigned>
|
||||
CheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain) {
|
||||
std::pair<unsigned, unsigned> Result(0, 0);
|
||||
|
||||
|
||||
// Check for the structure we're looking for.
|
||||
if (V->getOpcode() != ISD::AND ||
|
||||
!isa<ConstantSDNode>(V->getOperand(1)) ||
|
||||
!ISD::isNormalLoad(V->getOperand(0).getNode()))
|
||||
return Result;
|
||||
|
||||
|
||||
// Check the chain and pointer.
|
||||
LoadSDNode *LD = cast<LoadSDNode>(V->getOperand(0));
|
||||
if (LD->getBasePtr() != Ptr) return Result; // Not from same pointer.
|
||||
|
||||
|
||||
// The store should be chained directly to the load or be an operand of a
|
||||
// tokenfactor.
|
||||
if (LD == Chain.getNode())
|
||||
|
@ -5714,7 +5714,7 @@ CheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain) {
|
|||
}
|
||||
if (!isOk) return Result;
|
||||
}
|
||||
|
||||
|
||||
// This only handles simple types.
|
||||
if (V.getValueType() != MVT::i16 &&
|
||||
V.getValueType() != MVT::i32 &&
|
||||
|
@ -5730,7 +5730,7 @@ CheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain) {
|
|||
unsigned NotMaskTZ = CountTrailingZeros_64(NotMask);
|
||||
if (NotMaskTZ & 7) return Result; // Must be multiple of a byte.
|
||||
if (NotMaskLZ == 64) return Result; // All zero mask.
|
||||
|
||||
|
||||
// See if we have a continuous run of bits. If so, we have 0*1+0*
|
||||
if (CountTrailingOnes_64(NotMask >> NotMaskTZ)+NotMaskTZ+NotMaskLZ != 64)
|
||||
return Result;
|
||||
|
@ -5738,19 +5738,19 @@ CheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain) {
|
|||
// Adjust NotMaskLZ down to be from the actual size of the int instead of i64.
|
||||
if (V.getValueType() != MVT::i64 && NotMaskLZ)
|
||||
NotMaskLZ -= 64-V.getValueSizeInBits();
|
||||
|
||||
|
||||
unsigned MaskedBytes = (V.getValueSizeInBits()-NotMaskLZ-NotMaskTZ)/8;
|
||||
switch (MaskedBytes) {
|
||||
case 1:
|
||||
case 2:
|
||||
case 1:
|
||||
case 2:
|
||||
case 4: break;
|
||||
default: return Result; // All one mask, or 5-byte mask.
|
||||
}
|
||||
|
||||
|
||||
// Verify that the first bit starts at a multiple of mask so that the access
|
||||
// is aligned the same as the access width.
|
||||
if (NotMaskTZ && NotMaskTZ/8 % MaskedBytes) return Result;
|
||||
|
||||
|
||||
Result.first = MaskedBytes;
|
||||
Result.second = NotMaskTZ/8;
|
||||
return Result;
|
||||
|
@ -5767,20 +5767,20 @@ ShrinkLoadReplaceStoreWithStore(const std::pair<unsigned, unsigned> &MaskInfo,
|
|||
unsigned NumBytes = MaskInfo.first;
|
||||
unsigned ByteShift = MaskInfo.second;
|
||||
SelectionDAG &DAG = DC->getDAG();
|
||||
|
||||
|
||||
// Check to see if IVal is all zeros in the part being masked in by the 'or'
|
||||
// that uses this. If not, this is not a replacement.
|
||||
APInt Mask = ~APInt::getBitsSet(IVal.getValueSizeInBits(),
|
||||
ByteShift*8, (ByteShift+NumBytes)*8);
|
||||
if (!DAG.MaskedValueIsZero(IVal, Mask)) return 0;
|
||||
|
||||
|
||||
// Check that it is legal on the target to do this. It is legal if the new
|
||||
// VT we're shrinking to (i8/i16/i32) is legal or we're still before type
|
||||
// legalization.
|
||||
MVT VT = MVT::getIntegerVT(NumBytes*8);
|
||||
if (!DC->isTypeLegal(VT))
|
||||
return 0;
|
||||
|
||||
|
||||
// Okay, we can do this! Replace the 'St' store with a store of IVal that is
|
||||
// shifted by ByteShift and truncated down to NumBytes.
|
||||
if (ByteShift)
|
||||
|
@ -5795,19 +5795,19 @@ ShrinkLoadReplaceStoreWithStore(const std::pair<unsigned, unsigned> &MaskInfo,
|
|||
StOffset = ByteShift;
|
||||
else
|
||||
StOffset = IVal.getValueType().getStoreSize() - ByteShift - NumBytes;
|
||||
|
||||
|
||||
SDValue Ptr = St->getBasePtr();
|
||||
if (StOffset) {
|
||||
Ptr = DAG.getNode(ISD::ADD, IVal->getDebugLoc(), Ptr.getValueType(),
|
||||
Ptr, DAG.getConstant(StOffset, Ptr.getValueType()));
|
||||
NewAlign = MinAlign(NewAlign, StOffset);
|
||||
}
|
||||
|
||||
|
||||
// Truncate down to the new size.
|
||||
IVal = DAG.getNode(ISD::TRUNCATE, IVal->getDebugLoc(), VT, IVal);
|
||||
|
||||
|
||||
++OpsNarrowed;
|
||||
return DAG.getStore(St->getChain(), St->getDebugLoc(), IVal, Ptr,
|
||||
return DAG.getStore(St->getChain(), St->getDebugLoc(), IVal, Ptr,
|
||||
St->getPointerInfo().getWithOffset(StOffset),
|
||||
false, false, NewAlign).getNode();
|
||||
}
|
||||
|
@ -5831,7 +5831,7 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
|
|||
return SDValue();
|
||||
|
||||
unsigned Opc = Value.getOpcode();
|
||||
|
||||
|
||||
// If this is "store (or X, Y), P" and X is "(and (load P), cst)", where cst
|
||||
// is a byte mask indicating a consecutive number of bytes, check to see if
|
||||
// Y is known to provide just those bytes. If so, we try to replace the
|
||||
|
@ -5844,7 +5844,7 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
|
|||
if (SDNode *NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad,
|
||||
Value.getOperand(1), ST,this))
|
||||
return SDValue(NewST, 0);
|
||||
|
||||
|
||||
// Or is commutative, so try swapping X and Y.
|
||||
MaskedLoad = CheckForMaskedLoad(Value.getOperand(1), Ptr, Chain);
|
||||
if (MaskedLoad.first)
|
||||
|
@ -5852,7 +5852,7 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
|
|||
Value.getOperand(0), ST,this))
|
||||
return SDValue(NewST, 0);
|
||||
}
|
||||
|
||||
|
||||
if ((Opc != ISD::OR && Opc != ISD::XOR && Opc != ISD::AND) ||
|
||||
Value.getOperand(1).getOpcode() != ISD::Constant)
|
||||
return SDValue();
|
||||
|
@ -5944,7 +5944,7 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
|
|||
|
||||
// If this is a store of a bit convert, store the input value if the
|
||||
// resultant store does not need a higher alignment than the original.
|
||||
if (Value.getOpcode() == ISD::BIT_CONVERT && !ST->isTruncatingStore() &&
|
||||
if (Value.getOpcode() == ISD::BITCAST && !ST->isTruncatingStore() &&
|
||||
ST->isUnindexed()) {
|
||||
unsigned OrigAlign = ST->getAlignment();
|
||||
EVT SVT = Value.getOperand(0).getValueType();
|
||||
|
@ -6146,9 +6146,9 @@ SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) {
|
|||
return DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
|
||||
InVec.getValueType(), &Ops[0], Ops.size());
|
||||
}
|
||||
// If the invec is an UNDEF and if EltNo is a constant, create a new
|
||||
// If the invec is an UNDEF and if EltNo is a constant, create a new
|
||||
// BUILD_VECTOR with undef elements and the inserted element.
|
||||
if (!LegalOperations && InVec.getOpcode() == ISD::UNDEF &&
|
||||
if (!LegalOperations && InVec.getOpcode() == ISD::UNDEF &&
|
||||
isa<ConstantSDNode>(EltNo)) {
|
||||
EVT VT = InVec.getValueType();
|
||||
EVT EltVT = VT.getVectorElementType();
|
||||
|
@ -6198,7 +6198,7 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
|
|||
EVT ExtVT = VT.getVectorElementType();
|
||||
EVT LVT = ExtVT;
|
||||
|
||||
if (InVec.getOpcode() == ISD::BIT_CONVERT) {
|
||||
if (InVec.getOpcode() == ISD::BITCAST) {
|
||||
EVT BCVT = InVec.getOperand(0).getValueType();
|
||||
if (!BCVT.isVector() || ExtVT.bitsGT(BCVT.getVectorElementType()))
|
||||
return SDValue();
|
||||
|
@ -6232,7 +6232,7 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
|
|||
int Idx = (Elt > (int)NumElems) ? -1 : SVN->getMaskElt(Elt);
|
||||
InVec = (Idx < (int)NumElems) ? InVec.getOperand(0) : InVec.getOperand(1);
|
||||
|
||||
if (InVec.getOpcode() == ISD::BIT_CONVERT)
|
||||
if (InVec.getOpcode() == ISD::BITCAST)
|
||||
InVec = InVec.getOperand(0);
|
||||
if (ISD::isNormalLoad(InVec.getNode())) {
|
||||
LN0 = cast<LoadSDNode>(InVec);
|
||||
|
@ -6262,7 +6262,7 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
|
|||
|
||||
SDValue NewPtr = LN0->getBasePtr();
|
||||
unsigned PtrOff = 0;
|
||||
|
||||
|
||||
if (Elt) {
|
||||
PtrOff = LVT.getSizeInBits() * Elt / 8;
|
||||
EVT PtrType = NewPtr.getValueType();
|
||||
|
@ -6339,7 +6339,7 @@ SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) {
|
|||
unsigned ExtIndex = cast<ConstantSDNode>(ExtVal)->getZExtValue();
|
||||
if (ExtIndex > VT.getVectorNumElements())
|
||||
return SDValue();
|
||||
|
||||
|
||||
Mask.push_back(ExtIndex);
|
||||
continue;
|
||||
}
|
||||
|
@ -6396,7 +6396,7 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
|
|||
// If this is a bit convert that changes the element type of the vector but
|
||||
// not the number of vector elements, look through it. Be careful not to
|
||||
// look though conversions that change things like v4f32 to v2f64.
|
||||
if (V->getOpcode() == ISD::BIT_CONVERT) {
|
||||
if (V->getOpcode() == ISD::BITCAST) {
|
||||
SDValue ConvInput = V->getOperand(0);
|
||||
if (ConvInput.getValueType().isVector() &&
|
||||
ConvInput.getValueType().getVectorNumElements() == NumElts)
|
||||
|
@ -6494,7 +6494,7 @@ SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) {
|
|||
SDValue LHS = N->getOperand(0);
|
||||
SDValue RHS = N->getOperand(1);
|
||||
if (N->getOpcode() == ISD::AND) {
|
||||
if (RHS.getOpcode() == ISD::BIT_CONVERT)
|
||||
if (RHS.getOpcode() == ISD::BITCAST)
|
||||
RHS = RHS.getOperand(0);
|
||||
if (RHS.getOpcode() == ISD::BUILD_VECTOR) {
|
||||
SmallVector<int, 8> Indices;
|
||||
|
@ -6522,9 +6522,9 @@ SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) {
|
|||
DAG.getConstant(0, EltVT));
|
||||
SDValue Zero = DAG.getNode(ISD::BUILD_VECTOR, N->getDebugLoc(),
|
||||
RVT, &ZeroOps[0], ZeroOps.size());
|
||||
LHS = DAG.getNode(ISD::BIT_CONVERT, dl, RVT, LHS);
|
||||
LHS = DAG.getNode(ISD::BITCAST, dl, RVT, LHS);
|
||||
SDValue Shuf = DAG.getVectorShuffle(RVT, dl, LHS, Zero, &Indices[0]);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Shuf);
|
||||
return DAG.getNode(ISD::BITCAST, dl, VT, Shuf);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -6643,7 +6643,7 @@ bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS,
|
|||
if (LHS.getOpcode() != RHS.getOpcode() ||
|
||||
!LHS.hasOneUse() || !RHS.hasOneUse())
|
||||
return false;
|
||||
|
||||
|
||||
// If this is a load and the token chain is identical, replace the select
|
||||
// of two loads with a load through a select of the address to load from.
|
||||
// This triggers in things like "select bool X, 10.0, 123.0" after the FP
|
||||
|
@ -6651,7 +6651,7 @@ bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS,
|
|||
if (LHS.getOpcode() == ISD::LOAD) {
|
||||
LoadSDNode *LLD = cast<LoadSDNode>(LHS);
|
||||
LoadSDNode *RLD = cast<LoadSDNode>(RHS);
|
||||
|
||||
|
||||
// Token chains must be identical.
|
||||
if (LHS.getOperand(0) != RHS.getOperand(0) ||
|
||||
// Do not let this transformation reduce the number of volatile loads.
|
||||
|
@ -6671,7 +6671,7 @@ bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS,
|
|||
LLD->getPointerInfo().getAddrSpace() != 0 ||
|
||||
RLD->getPointerInfo().getAddrSpace() != 0)
|
||||
return false;
|
||||
|
||||
|
||||
// Check that the select condition doesn't reach either load. If so,
|
||||
// folding this will induce a cycle into the DAG. If not, this is safe to
|
||||
// xform, so create a select of the addresses.
|
||||
|
@ -6694,7 +6694,7 @@ bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS,
|
|||
(LLD->hasAnyUseOfValue(1) &&
|
||||
(LLD->isPredecessorOf(CondLHS) || LLD->isPredecessorOf(CondRHS))))
|
||||
return false;
|
||||
|
||||
|
||||
Addr = DAG.getNode(ISD::SELECT_CC, TheSelect->getDebugLoc(),
|
||||
LLD->getBasePtr().getValueType(),
|
||||
TheSelect->getOperand(0),
|
||||
|
@ -6742,7 +6742,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
|
|||
ISD::CondCode CC, bool NotExtCompare) {
|
||||
// (x ? y : y) -> y.
|
||||
if (N2 == N3) return N2;
|
||||
|
||||
|
||||
EVT VT = N2.getValueType();
|
||||
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
|
||||
ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
|
||||
|
@ -6778,7 +6778,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
|
|||
return DAG.getNode(ISD::FABS, DL, VT, N3);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Turn "(a cond b) ? 1.0f : 2.0f" into "load (tmp + ((a cond b) ? 0 : 4)"
|
||||
// where "tmp" is a constant pool entry containing an array with 1.0 and 2.0
|
||||
// in it. This is a win when the constant is not otherwise available because
|
||||
|
@ -6801,7 +6801,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
|
|||
};
|
||||
const Type *FPTy = Elts[0]->getType();
|
||||
const TargetData &TD = *TLI.getTargetData();
|
||||
|
||||
|
||||
// Create a ConstantArray of the two constants.
|
||||
Constant *CA = ConstantArray::get(ArrayType::get(FPTy, 2), Elts, 2);
|
||||
SDValue CPIdx = DAG.getConstantPool(CA, TLI.getPointerTy(),
|
||||
|
@ -6813,7 +6813,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
|
|||
SDValue Zero = DAG.getIntPtrConstant(0);
|
||||
unsigned EltSize = (unsigned)TD.getTypeAllocSize(Elts[0]->getType());
|
||||
SDValue One = DAG.getIntPtrConstant(EltSize);
|
||||
|
||||
|
||||
SDValue Cond = DAG.getSetCC(DL,
|
||||
TLI.getSetCCResultType(N0.getValueType()),
|
||||
N0, N1, CC);
|
||||
|
@ -6826,7 +6826,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
|
|||
false, Alignment);
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check to see if we can perform the "gzip trick", transforming
|
||||
// (select_cc setlt X, 0, A, 0) -> (and (sra X, (sub size(X), 1), A)
|
||||
|
@ -6879,7 +6879,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
|
|||
// shift-left and shift-right-arith.
|
||||
if (CC == ISD::SETEQ && N0->getOpcode() == ISD::AND &&
|
||||
N0->getValueType(0) == VT &&
|
||||
N1C && N1C->isNullValue() &&
|
||||
N1C && N1C->isNullValue() &&
|
||||
N2C && N2C->isNullValue()) {
|
||||
SDValue AndLHS = N0->getOperand(0);
|
||||
ConstantSDNode *ConstAndRHS = dyn_cast<ConstantSDNode>(N0->getOperand(1));
|
||||
|
@ -6889,13 +6889,13 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
|
|||
SDValue ShlAmt =
|
||||
DAG.getConstant(AndMask.countLeadingZeros(), getShiftAmountTy());
|
||||
SDValue Shl = DAG.getNode(ISD::SHL, N0.getDebugLoc(), VT, AndLHS, ShlAmt);
|
||||
|
||||
|
||||
// Now arithmetic right shift it all the way over, so the result is either
|
||||
// all-ones, or zero.
|
||||
SDValue ShrAmt =
|
||||
DAG.getConstant(AndMask.getBitWidth()-1, getShiftAmountTy());
|
||||
SDValue Shr = DAG.getNode(ISD::SRA, N0.getDebugLoc(), VT, Shl, ShrAmt);
|
||||
|
||||
|
||||
return DAG.getNode(ISD::AND, DL, VT, Shr, N3);
|
||||
}
|
||||
}
|
||||
|
@ -7066,7 +7066,7 @@ static bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset,
|
|||
Offset += C->getZExtValue();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Return the underlying GlobalValue, and update the Offset. Return false
|
||||
// for GlobalAddressSDNode since the same GlobalAddress may be represented
|
||||
// by multiple nodes with different offsets.
|
||||
|
@ -7125,7 +7125,7 @@ bool DAGCombiner::isAlias(SDValue Ptr1, int64_t Size1,
|
|||
return !((Offset1 + Size1) <= Offset2 || (Offset2 + Size2) <= Offset1);
|
||||
}
|
||||
|
||||
// Otherwise, if we know what the bases are, and they aren't identical, then
|
||||
// Otherwise, if we know what the bases are, and they aren't identical, then
|
||||
// we know they cannot alias.
|
||||
if ((isFrameIndex1 || CV1 || GV1) && (isFrameIndex2 || CV2 || GV2))
|
||||
return false;
|
||||
|
@ -7139,13 +7139,13 @@ bool DAGCombiner::isAlias(SDValue Ptr1, int64_t Size1,
|
|||
(Size1 == Size2) && (SrcValueAlign1 > Size1)) {
|
||||
int64_t OffAlign1 = SrcValueOffset1 % SrcValueAlign1;
|
||||
int64_t OffAlign2 = SrcValueOffset2 % SrcValueAlign1;
|
||||
|
||||
|
||||
// There is no overlap between these relatively aligned accesses of similar
|
||||
// size, return no alias.
|
||||
if ((OffAlign1 + Size1) <= OffAlign2 || (OffAlign2 + Size2) <= OffAlign1)
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
if (CombinerGlobalAA) {
|
||||
// Use alias analysis information.
|
||||
int64_t MinOffset = std::min(SrcValueOffset1, SrcValueOffset2);
|
||||
|
@ -7166,7 +7166,7 @@ bool DAGCombiner::isAlias(SDValue Ptr1, int64_t Size1,
|
|||
/// node. Returns true if the operand was a load.
|
||||
bool DAGCombiner::FindAliasInfo(SDNode *N,
|
||||
SDValue &Ptr, int64_t &Size,
|
||||
const Value *&SrcValue,
|
||||
const Value *&SrcValue,
|
||||
int &SrcValueOffset,
|
||||
unsigned &SrcValueAlign,
|
||||
const MDNode *&TBAAInfo) const {
|
||||
|
@ -7206,26 +7206,26 @@ void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain,
|
|||
int SrcValueOffset;
|
||||
unsigned SrcValueAlign;
|
||||
const MDNode *SrcTBAAInfo;
|
||||
bool IsLoad = FindAliasInfo(N, Ptr, Size, SrcValue, SrcValueOffset,
|
||||
bool IsLoad = FindAliasInfo(N, Ptr, Size, SrcValue, SrcValueOffset,
|
||||
SrcValueAlign, SrcTBAAInfo);
|
||||
|
||||
// Starting off.
|
||||
Chains.push_back(OriginalChain);
|
||||
unsigned Depth = 0;
|
||||
|
||||
|
||||
// Look at each chain and determine if it is an alias. If so, add it to the
|
||||
// aliases list. If not, then continue up the chain looking for the next
|
||||
// candidate.
|
||||
while (!Chains.empty()) {
|
||||
SDValue Chain = Chains.back();
|
||||
Chains.pop_back();
|
||||
|
||||
// For TokenFactor nodes, look at each operand and only continue up the
|
||||
// chain until we find two aliases. If we've seen two aliases, assume we'll
|
||||
|
||||
// For TokenFactor nodes, look at each operand and only continue up the
|
||||
// chain until we find two aliases. If we've seen two aliases, assume we'll
|
||||
// find more and revert to original chain since the xform is unlikely to be
|
||||
// profitable.
|
||||
//
|
||||
// FIXME: The depth check could be made to return the last non-aliasing
|
||||
//
|
||||
// FIXME: The depth check could be made to return the last non-aliasing
|
||||
// chain we found before we hit a tokenfactor rather than the original
|
||||
// chain.
|
||||
if (Depth > 6 || Aliases.size() == 2) {
|
||||
|
@ -7309,9 +7309,9 @@ SDValue DAGCombiner::FindBetterChain(SDNode *N, SDValue OldChain) {
|
|||
// If a single operand then chain to it. We don't need to revisit it.
|
||||
return Aliases[0];
|
||||
}
|
||||
|
||||
|
||||
// Construct a custom tailored token factor.
|
||||
return DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), MVT::Other,
|
||||
return DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), MVT::Other,
|
||||
&Aliases[0], Aliases.size());
|
||||
}
|
||||
|
||||
|
|
|
@ -197,12 +197,12 @@ unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
|
|||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
||||
TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
|
||||
}
|
||||
|
||||
|
||||
// If target-independent code couldn't handle the value, give target-specific
|
||||
// code a try.
|
||||
if (!Reg && isa<Constant>(V))
|
||||
Reg = TargetMaterializeConstant(cast<Constant>(V));
|
||||
|
||||
|
||||
// Don't cache constant materializations in the general ValueMap.
|
||||
// To do so would require tracking what uses they dominate.
|
||||
if (Reg != 0) {
|
||||
|
@ -234,7 +234,7 @@ unsigned FastISel::UpdateValueMap(const Value *I, unsigned Reg) {
|
|||
LocalValueMap[I] = Reg;
|
||||
return Reg;
|
||||
}
|
||||
|
||||
|
||||
unsigned &AssignedReg = FuncInfo.ValueMap[I];
|
||||
if (AssignedReg == 0)
|
||||
// Use the new register.
|
||||
|
@ -414,7 +414,7 @@ bool FastISel::SelectGetElementPtr(const User *I) {
|
|||
// If this is a constant subscript, handle it quickly.
|
||||
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
|
||||
if (CI->isZero()) continue;
|
||||
uint64_t Offs =
|
||||
uint64_t Offs =
|
||||
TD.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
|
||||
N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, Offs, VT);
|
||||
if (N == 0)
|
||||
|
@ -423,7 +423,7 @@ bool FastISel::SelectGetElementPtr(const User *I) {
|
|||
NIsKill = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
// N = N + Idx * ElementSize;
|
||||
uint64_t ElementSize = TD.getTypeAllocSize(Ty);
|
||||
std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
|
||||
|
@ -479,13 +479,13 @@ bool FastISel::SelectCall(const User *I) {
|
|||
Offset = FuncInfo.getByValArgumentFrameIndex(Arg);
|
||||
if (Offset)
|
||||
Reg = TRI.getFrameRegister(*FuncInfo.MF);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!Reg)
|
||||
Reg = getRegForValue(Address);
|
||||
|
||||
|
||||
if (Reg)
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
||||
TII.get(TargetOpcode::DBG_VALUE))
|
||||
.addReg(Reg, RegState::Debug).addImm(Offset)
|
||||
.addMetadata(DI->getVariable());
|
||||
|
@ -521,7 +521,7 @@ bool FastISel::SelectCall(const User *I) {
|
|||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II)
|
||||
.addReg(0U).addImm(DI->getOffset())
|
||||
.addMetadata(DI->getVariable());
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
case Intrinsic::eh_exception: {
|
||||
|
@ -594,12 +594,12 @@ bool FastISel::SelectCall(const User *I) {
|
|||
bool FastISel::SelectCast(const User *I, unsigned Opcode) {
|
||||
EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
|
||||
EVT DstVT = TLI.getValueType(I->getType());
|
||||
|
||||
|
||||
if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
|
||||
DstVT == MVT::Other || !DstVT.isSimple())
|
||||
// Unhandled type. Halt "fast" selection and bail.
|
||||
return false;
|
||||
|
||||
|
||||
// Check if the destination type is legal. Or as a special case,
|
||||
// it may be i1 if we're doing a truncate because that's
|
||||
// easy and somewhat common.
|
||||
|
@ -641,7 +641,7 @@ bool FastISel::SelectCast(const User *I, unsigned Opcode) {
|
|||
InputReg, InputRegIsKill);
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
|
||||
|
||||
UpdateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
@ -656,23 +656,23 @@ bool FastISel::SelectBitCast(const User *I) {
|
|||
return true;
|
||||
}
|
||||
|
||||
// Bitcasts of other values become reg-reg copies or BIT_CONVERT operators.
|
||||
// Bitcasts of other values become reg-reg copies or BITCAST operators.
|
||||
EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
|
||||
EVT DstVT = TLI.getValueType(I->getType());
|
||||
|
||||
|
||||
if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
|
||||
DstVT == MVT::Other || !DstVT.isSimple() ||
|
||||
!TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
|
||||
// Unhandled type. Halt "fast" selection and bail.
|
||||
return false;
|
||||
|
||||
|
||||
unsigned Op0 = getRegForValue(I->getOperand(0));
|
||||
if (Op0 == 0)
|
||||
// Unhandled operand. Halt "fast" selection and bail.
|
||||
return false;
|
||||
|
||||
bool Op0IsKill = hasTrivialKill(I->getOperand(0));
|
||||
|
||||
|
||||
// First, try to perform the bitcast by inserting a reg-reg copy.
|
||||
unsigned ResultReg = 0;
|
||||
if (SrcVT.getSimpleVT() == DstVT.getSimpleVT()) {
|
||||
|
@ -685,15 +685,15 @@ bool FastISel::SelectBitCast(const User *I) {
|
|||
ResultReg).addReg(Op0);
|
||||
}
|
||||
}
|
||||
|
||||
// If the reg-reg copy failed, select a BIT_CONVERT opcode.
|
||||
|
||||
// If the reg-reg copy failed, select a BITCAST opcode.
|
||||
if (!ResultReg)
|
||||
ResultReg = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
|
||||
ISD::BIT_CONVERT, Op0, Op0IsKill);
|
||||
|
||||
ISD::BITCAST, Op0, Op0IsKill);
|
||||
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
|
||||
|
||||
UpdateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
@ -765,7 +765,7 @@ FastISel::SelectFNeg(const User *I) {
|
|||
return false;
|
||||
|
||||
unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
|
||||
ISD::BIT_CONVERT, OpReg, OpRegIsKill);
|
||||
ISD::BITCAST, OpReg, OpRegIsKill);
|
||||
if (IntReg == 0)
|
||||
return false;
|
||||
|
||||
|
@ -777,7 +777,7 @@ FastISel::SelectFNeg(const User *I) {
|
|||
return false;
|
||||
|
||||
ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(),
|
||||
ISD::BIT_CONVERT, IntResultReg, /*Kill=*/true);
|
||||
ISD::BITCAST, IntResultReg, /*Kill=*/true);
|
||||
if (ResultReg == 0)
|
||||
return false;
|
||||
|
||||
|
@ -857,10 +857,10 @@ FastISel::SelectOperator(const User *I, unsigned Opcode) {
|
|||
|
||||
// Dynamic-sized alloca is not handled yet.
|
||||
return false;
|
||||
|
||||
|
||||
case Instruction::Call:
|
||||
return SelectCall(I);
|
||||
|
||||
|
||||
case Instruction::BitCast:
|
||||
return SelectBitCast(I);
|
||||
|
||||
|
@ -923,7 +923,7 @@ unsigned FastISel::FastEmit_r(MVT, MVT,
|
|||
return 0;
|
||||
}
|
||||
|
||||
unsigned FastISel::FastEmit_rr(MVT, MVT,
|
||||
unsigned FastISel::FastEmit_rr(MVT, MVT,
|
||||
unsigned,
|
||||
unsigned /*Op0*/, bool /*Op0IsKill*/,
|
||||
unsigned /*Op1*/, bool /*Op1IsKill*/) {
|
||||
|
@ -1151,7 +1151,7 @@ unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
|
|||
uint64_t Imm) {
|
||||
unsigned ResultReg = createResultReg(RC);
|
||||
const TargetInstrDesc &II = TII.get(MachineInstOpcode);
|
||||
|
||||
|
||||
if (II.getNumDefs() >= 1)
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, II, ResultReg).addImm(Imm);
|
||||
else {
|
||||
|
|
|
@ -403,7 +403,7 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
|
|||
// Expand to a bitconvert of the value to the integer type of the
|
||||
// same size, then a (misaligned) int store.
|
||||
// FIXME: Does not handle truncating floating point stores!
|
||||
SDValue Result = DAG.getNode(ISD::BIT_CONVERT, dl, intVT, Val);
|
||||
SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val);
|
||||
return DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(),
|
||||
ST->isVolatile(), ST->isNonTemporal(), Alignment);
|
||||
} else {
|
||||
|
@ -515,14 +515,14 @@ SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
|
|||
SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr, LD->getPointerInfo(),
|
||||
LD->isVolatile(),
|
||||
LD->isNonTemporal(), LD->getAlignment());
|
||||
SDValue Result = DAG.getNode(ISD::BIT_CONVERT, dl, LoadedVT, newLoad);
|
||||
SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad);
|
||||
if (VT.isFloatingPoint() && LoadedVT != VT)
|
||||
Result = DAG.getNode(ISD::FP_EXTEND, dl, VT, Result);
|
||||
|
||||
SDValue Ops[] = { Result, Chain };
|
||||
return DAG.getMergeValues(Ops, 2, dl);
|
||||
}
|
||||
|
||||
|
||||
// Copy the value to a (aligned) stack slot using (unaligned) integer
|
||||
// loads and stores, then do a (aligned) load from the stack slot.
|
||||
EVT RegVT = TLI.getRegisterType(*DAG.getContext(), intVT);
|
||||
|
@ -733,7 +733,7 @@ SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) {
|
|||
return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(),
|
||||
isVolatile, isNonTemporal, Alignment);
|
||||
}
|
||||
|
||||
|
||||
if (CFP->getValueType(0) == MVT::f64) {
|
||||
// If this target supports 64-bit registers, do a single 64-bit store.
|
||||
if (getTypeAction(MVT::i64) == Legal) {
|
||||
|
@ -742,7 +742,7 @@ SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) {
|
|||
return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getPointerInfo(),
|
||||
isVolatile, isNonTemporal, Alignment);
|
||||
}
|
||||
|
||||
|
||||
if (getTypeAction(MVT::i32) == Legal && !ST->isVolatile()) {
|
||||
// Otherwise, if the target supports 32-bit registers, use 2 32-bit
|
||||
// stores. If the target supports neither 32- nor 64-bits, this
|
||||
|
@ -1145,7 +1145,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
|
|||
Tmp1 = DAG.getLoad(NVT, dl, Tmp1, Tmp2, LD->getPointerInfo(),
|
||||
LD->isVolatile(), LD->isNonTemporal(),
|
||||
LD->getAlignment());
|
||||
Tmp3 = LegalizeOp(DAG.getNode(ISD::BIT_CONVERT, dl, VT, Tmp1));
|
||||
Tmp3 = LegalizeOp(DAG.getNode(ISD::BITCAST, dl, VT, Tmp1));
|
||||
Tmp4 = LegalizeOp(Tmp1.getValue(1));
|
||||
break;
|
||||
}
|
||||
|
@ -1156,7 +1156,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
|
|||
AddLegalizedOperand(SDValue(Node, 1), Tmp4);
|
||||
return Op.getResNo() ? Tmp4 : Tmp3;
|
||||
}
|
||||
|
||||
|
||||
EVT SrcVT = LD->getMemoryVT();
|
||||
unsigned SrcWidth = SrcVT.getSizeInBits();
|
||||
unsigned Alignment = LD->getAlignment();
|
||||
|
@ -1410,7 +1410,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
|
|||
break;
|
||||
case TargetLowering::Promote:
|
||||
assert(VT.isVector() && "Unknown legal promote case!");
|
||||
Tmp3 = DAG.getNode(ISD::BIT_CONVERT, dl,
|
||||
Tmp3 = DAG.getNode(ISD::BITCAST, dl,
|
||||
TLI.getTypeToPromoteTo(ISD::STORE, VT), Tmp3);
|
||||
Result = DAG.getStore(Tmp1, dl, Tmp3, Tmp2,
|
||||
ST->getPointerInfo(), isVolatile,
|
||||
|
@ -1629,7 +1629,7 @@ SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) {
|
|||
EVT IVT = EVT::getIntegerVT(*DAG.getContext(), FloatVT.getSizeInBits());
|
||||
if (isTypeLegal(IVT)) {
|
||||
// Convert to an integer with the same sign bit.
|
||||
SignBit = DAG.getNode(ISD::BIT_CONVERT, dl, IVT, Tmp2);
|
||||
SignBit = DAG.getNode(ISD::BITCAST, dl, IVT, Tmp2);
|
||||
} else {
|
||||
// Store the float to memory, then load the sign part out as an integer.
|
||||
MVT LoadTy = TLI.getPointerTy();
|
||||
|
@ -2120,8 +2120,8 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
|
|||
DAG.getConstant(32, MVT::i64));
|
||||
SDValue LoOr = DAG.getNode(ISD::OR, dl, MVT::i64, Lo, TwoP52);
|
||||
SDValue HiOr = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, TwoP84);
|
||||
SDValue LoFlt = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, LoOr);
|
||||
SDValue HiFlt = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, HiOr);
|
||||
SDValue LoFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, LoOr);
|
||||
SDValue HiFlt = DAG.getNode(ISD::BITCAST, dl, MVT::f64, HiOr);
|
||||
SDValue HiSub = DAG.getNode(ISD::FSUB, dl, MVT::f64, HiFlt,
|
||||
TwoP84PlusTwoP52);
|
||||
return DAG.getNode(ISD::FADD, dl, MVT::f64, LoFlt, HiSub);
|
||||
|
@ -2134,28 +2134,28 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
|
|||
// algorithm from the x86_64 __floatundidf in compiler_rt.
|
||||
if (!isSigned) {
|
||||
SDValue Fast = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Op0);
|
||||
|
||||
|
||||
SDValue ShiftConst = DAG.getConstant(1, TLI.getShiftAmountTy());
|
||||
SDValue Shr = DAG.getNode(ISD::SRL, dl, MVT::i64, Op0, ShiftConst);
|
||||
SDValue AndConst = DAG.getConstant(1, MVT::i64);
|
||||
SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0, AndConst);
|
||||
SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And, Shr);
|
||||
|
||||
|
||||
SDValue SignCvt = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Or);
|
||||
SDValue Slow = DAG.getNode(ISD::FADD, dl, MVT::f32, SignCvt, SignCvt);
|
||||
|
||||
|
||||
// TODO: This really should be implemented using a branch rather than a
|
||||
// select. We happen to get lucky and machinesink does the right
|
||||
// thing most of the time. This would be a good candidate for a
|
||||
// select. We happen to get lucky and machinesink does the right
|
||||
// thing most of the time. This would be a good candidate for a
|
||||
//pseudo-op, or, even better, for whole-function isel.
|
||||
SDValue SignBitTest = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64),
|
||||
SDValue SignBitTest = DAG.getSetCC(dl, TLI.getSetCCResultType(MVT::i64),
|
||||
Op0, DAG.getConstant(0, MVT::i64), ISD::SETLT);
|
||||
return DAG.getNode(ISD::SELECT, dl, MVT::f32, SignBitTest, Slow, Fast);
|
||||
}
|
||||
|
||||
|
||||
// Otherwise, implement the fully general conversion.
|
||||
EVT SHVT = TLI.getShiftAmountTy();
|
||||
|
||||
|
||||
SDValue And = DAG.getNode(ISD::AND, dl, MVT::i64, Op0,
|
||||
DAG.getConstant(UINT64_C(0xfffffffffffff800), MVT::i64));
|
||||
SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i64, And,
|
||||
|
@ -2169,7 +2169,7 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
|
|||
Op0, DAG.getConstant(UINT64_C(0x0020000000000000), MVT::i64),
|
||||
ISD::SETUGE);
|
||||
SDValue Sel2 = DAG.getNode(ISD::SELECT, dl, MVT::i64, Ge, Sel, Op0);
|
||||
|
||||
|
||||
SDValue Sh = DAG.getNode(ISD::SRL, dl, MVT::i64, Sel2,
|
||||
DAG.getConstant(32, SHVT));
|
||||
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Sh);
|
||||
|
@ -2617,7 +2617,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
|
|||
break;
|
||||
}
|
||||
case ISD::FP_ROUND:
|
||||
case ISD::BIT_CONVERT:
|
||||
case ISD::BITCAST:
|
||||
Tmp1 = EmitStackConvert(Node->getOperand(0), Node->getValueType(0),
|
||||
Node->getValueType(0), dl);
|
||||
Results.push_back(Tmp1);
|
||||
|
@ -2739,7 +2739,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
|
|||
case ISD::EXTRACT_VECTOR_ELT:
|
||||
if (Node->getOperand(0).getValueType().getVectorNumElements() == 1)
|
||||
// This must be an access of the only element. Return it.
|
||||
Tmp1 = DAG.getNode(ISD::BIT_CONVERT, dl, Node->getValueType(0),
|
||||
Tmp1 = DAG.getNode(ISD::BITCAST, dl, Node->getValueType(0),
|
||||
Node->getOperand(0));
|
||||
else
|
||||
Tmp1 = ExpandExtractFromVectorThroughStack(SDValue(Node, 0));
|
||||
|
@ -3361,8 +3361,8 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node,
|
|||
case ISD::XOR: {
|
||||
unsigned ExtOp, TruncOp;
|
||||
if (OVT.isVector()) {
|
||||
ExtOp = ISD::BIT_CONVERT;
|
||||
TruncOp = ISD::BIT_CONVERT;
|
||||
ExtOp = ISD::BITCAST;
|
||||
TruncOp = ISD::BITCAST;
|
||||
} else {
|
||||
assert(OVT.isInteger() && "Cannot promote logic operation");
|
||||
ExtOp = ISD::ANY_EXTEND;
|
||||
|
@ -3379,8 +3379,8 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node,
|
|||
case ISD::SELECT: {
|
||||
unsigned ExtOp, TruncOp;
|
||||
if (Node->getValueType(0).isVector()) {
|
||||
ExtOp = ISD::BIT_CONVERT;
|
||||
TruncOp = ISD::BIT_CONVERT;
|
||||
ExtOp = ISD::BITCAST;
|
||||
TruncOp = ISD::BITCAST;
|
||||
} else if (Node->getValueType(0).isInteger()) {
|
||||
ExtOp = ISD::ANY_EXTEND;
|
||||
TruncOp = ISD::TRUNCATE;
|
||||
|
@ -3407,12 +3407,12 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node,
|
|||
cast<ShuffleVectorSDNode>(Node)->getMask(Mask);
|
||||
|
||||
// Cast the two input vectors.
|
||||
Tmp1 = DAG.getNode(ISD::BIT_CONVERT, dl, NVT, Node->getOperand(0));
|
||||
Tmp2 = DAG.getNode(ISD::BIT_CONVERT, dl, NVT, Node->getOperand(1));
|
||||
Tmp1 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(0));
|
||||
Tmp2 = DAG.getNode(ISD::BITCAST, dl, NVT, Node->getOperand(1));
|
||||
|
||||
// Convert the shuffle mask to the right # elements.
|
||||
Tmp1 = ShuffleWithNarrowerEltType(NVT, OVT, dl, Tmp1, Tmp2, Mask);
|
||||
Tmp1 = DAG.getNode(ISD::BIT_CONVERT, dl, OVT, Tmp1);
|
||||
Tmp1 = DAG.getNode(ISD::BITCAST, dl, OVT, Tmp1);
|
||||
Results.push_back(Tmp1);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -55,7 +55,7 @@ void DAGTypeLegalizer::SoftenFloatResult(SDNode *N, unsigned ResNo) {
|
|||
#endif
|
||||
llvm_unreachable("Do not know how to soften the result of this operator!");
|
||||
|
||||
case ISD::BIT_CONVERT: R = SoftenFloatRes_BIT_CONVERT(N); break;
|
||||
case ISD::BITCAST: R = SoftenFloatRes_BITCAST(N); break;
|
||||
case ISD::BUILD_PAIR: R = SoftenFloatRes_BUILD_PAIR(N); break;
|
||||
case ISD::ConstantFP:
|
||||
R = SoftenFloatRes_ConstantFP(cast<ConstantFPSDNode>(N));
|
||||
|
@ -102,7 +102,7 @@ void DAGTypeLegalizer::SoftenFloatResult(SDNode *N, unsigned ResNo) {
|
|||
SetSoftenedFloat(SDValue(N, ResNo), R);
|
||||
}
|
||||
|
||||
SDValue DAGTypeLegalizer::SoftenFloatRes_BIT_CONVERT(SDNode *N) {
|
||||
SDValue DAGTypeLegalizer::SoftenFloatRes_BITCAST(SDNode *N) {
|
||||
return BitConvertToInteger(N->getOperand(0));
|
||||
}
|
||||
|
||||
|
@ -557,7 +557,7 @@ bool DAGTypeLegalizer::SoftenFloatOperand(SDNode *N, unsigned OpNo) {
|
|||
#endif
|
||||
llvm_unreachable("Do not know how to soften this operator's operand!");
|
||||
|
||||
case ISD::BIT_CONVERT: Res = SoftenFloatOp_BIT_CONVERT(N); break;
|
||||
case ISD::BITCAST: Res = SoftenFloatOp_BITCAST(N); break;
|
||||
case ISD::BR_CC: Res = SoftenFloatOp_BR_CC(N); break;
|
||||
case ISD::FP_ROUND: Res = SoftenFloatOp_FP_ROUND(N); break;
|
||||
case ISD::FP_TO_SINT: Res = SoftenFloatOp_FP_TO_SINT(N); break;
|
||||
|
@ -669,8 +669,8 @@ void DAGTypeLegalizer::SoftenSetCCOperands(SDValue &NewLHS, SDValue &NewRHS,
|
|||
}
|
||||
}
|
||||
|
||||
SDValue DAGTypeLegalizer::SoftenFloatOp_BIT_CONVERT(SDNode *N) {
|
||||
return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), N->getValueType(0),
|
||||
SDValue DAGTypeLegalizer::SoftenFloatOp_BITCAST(SDNode *N) {
|
||||
return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), N->getValueType(0),
|
||||
GetSoftenedFloat(N->getOperand(0)));
|
||||
}
|
||||
|
||||
|
@ -815,7 +815,7 @@ void DAGTypeLegalizer::ExpandFloatResult(SDNode *N, unsigned ResNo) {
|
|||
case ISD::SELECT: SplitRes_SELECT(N, Lo, Hi); break;
|
||||
case ISD::SELECT_CC: SplitRes_SELECT_CC(N, Lo, Hi); break;
|
||||
|
||||
case ISD::BIT_CONVERT: ExpandRes_BIT_CONVERT(N, Lo, Hi); break;
|
||||
case ISD::BITCAST: ExpandRes_BITCAST(N, Lo, Hi); break;
|
||||
case ISD::BUILD_PAIR: ExpandRes_BUILD_PAIR(N, Lo, Hi); break;
|
||||
case ISD::EXTRACT_ELEMENT: ExpandRes_EXTRACT_ELEMENT(N, Lo, Hi); break;
|
||||
case ISD::EXTRACT_VECTOR_ELT: ExpandRes_EXTRACT_VECTOR_ELT(N, Lo, Hi); break;
|
||||
|
@ -1220,7 +1220,7 @@ bool DAGTypeLegalizer::ExpandFloatOperand(SDNode *N, unsigned OpNo) {
|
|||
#endif
|
||||
llvm_unreachable("Do not know how to expand this operator's operand!");
|
||||
|
||||
case ISD::BIT_CONVERT: Res = ExpandOp_BIT_CONVERT(N); break;
|
||||
case ISD::BITCAST: Res = ExpandOp_BITCAST(N); break;
|
||||
case ISD::BUILD_VECTOR: Res = ExpandOp_BUILD_VECTOR(N); break;
|
||||
case ISD::EXTRACT_ELEMENT: Res = ExpandOp_EXTRACT_ELEMENT(N); break;
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
|
|||
llvm_unreachable("Do not know how to promote this operator!");
|
||||
case ISD::AssertSext: Res = PromoteIntRes_AssertSext(N); break;
|
||||
case ISD::AssertZext: Res = PromoteIntRes_AssertZext(N); break;
|
||||
case ISD::BIT_CONVERT: Res = PromoteIntRes_BIT_CONVERT(N); break;
|
||||
case ISD::BITCAST: Res = PromoteIntRes_BITCAST(N); break;
|
||||
case ISD::BSWAP: Res = PromoteIntRes_BSWAP(N); break;
|
||||
case ISD::BUILD_PAIR: Res = PromoteIntRes_BUILD_PAIR(N); break;
|
||||
case ISD::Constant: Res = PromoteIntRes_Constant(N); break;
|
||||
|
@ -162,7 +162,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_Atomic2(AtomicSDNode *N) {
|
|||
return Res;
|
||||
}
|
||||
|
||||
SDValue DAGTypeLegalizer::PromoteIntRes_BIT_CONVERT(SDNode *N) {
|
||||
SDValue DAGTypeLegalizer::PromoteIntRes_BITCAST(SDNode *N) {
|
||||
SDValue InOp = N->getOperand(0);
|
||||
EVT InVT = InOp.getValueType();
|
||||
EVT NInVT = TLI.getTypeToTransformTo(*DAG.getContext(), InVT);
|
||||
|
@ -179,8 +179,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_BIT_CONVERT(SDNode *N) {
|
|||
case PromoteInteger:
|
||||
if (NOutVT.bitsEq(NInVT))
|
||||
// The input promotes to the same size. Convert the promoted value.
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl,
|
||||
NOutVT, GetPromotedInteger(InOp));
|
||||
return DAG.getNode(ISD::BITCAST, dl, NOutVT, GetPromotedInteger(InOp));
|
||||
break;
|
||||
case SoftenFloat:
|
||||
// Promote the integer operand by hand.
|
||||
|
@ -193,7 +192,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_BIT_CONVERT(SDNode *N) {
|
|||
return DAG.getNode(ISD::ANY_EXTEND, dl, NOutVT,
|
||||
BitConvertToInteger(GetScalarizedVector(InOp)));
|
||||
case SplitVector: {
|
||||
// For example, i32 = BIT_CONVERT v2i16 on alpha. Convert the split
|
||||
// For example, i32 = BITCAST v2i16 on alpha. Convert the split
|
||||
// pieces of the input into integers and reassemble in the final type.
|
||||
SDValue Lo, Hi;
|
||||
GetSplitVector(N->getOperand(0), Lo, Hi);
|
||||
|
@ -207,12 +206,12 @@ SDValue DAGTypeLegalizer::PromoteIntRes_BIT_CONVERT(SDNode *N) {
|
|||
EVT::getIntegerVT(*DAG.getContext(),
|
||||
NOutVT.getSizeInBits()),
|
||||
JoinIntegers(Lo, Hi));
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, InOp);
|
||||
return DAG.getNode(ISD::BITCAST, dl, NOutVT, InOp);
|
||||
}
|
||||
case WidenVector:
|
||||
if (OutVT.bitsEq(NInVT))
|
||||
// The input is widened to the same size. Convert to the widened value.
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, OutVT, GetWidenedVector(InOp));
|
||||
return DAG.getNode(ISD::BITCAST, dl, OutVT, GetWidenedVector(InOp));
|
||||
}
|
||||
|
||||
return DAG.getNode(ISD::ANY_EXTEND, dl, NOutVT,
|
||||
|
@ -631,7 +630,7 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
|
|||
llvm_unreachable("Do not know how to promote this operator's operand!");
|
||||
|
||||
case ISD::ANY_EXTEND: Res = PromoteIntOp_ANY_EXTEND(N); break;
|
||||
case ISD::BIT_CONVERT: Res = PromoteIntOp_BIT_CONVERT(N); break;
|
||||
case ISD::BITCAST: Res = PromoteIntOp_BITCAST(N); break;
|
||||
case ISD::BR_CC: Res = PromoteIntOp_BR_CC(N, OpNo); break;
|
||||
case ISD::BRCOND: Res = PromoteIntOp_BRCOND(N, OpNo); break;
|
||||
case ISD::BUILD_PAIR: Res = PromoteIntOp_BUILD_PAIR(N); break;
|
||||
|
@ -713,7 +712,7 @@ SDValue DAGTypeLegalizer::PromoteIntOp_ANY_EXTEND(SDNode *N) {
|
|||
return DAG.getNode(ISD::ANY_EXTEND, N->getDebugLoc(), N->getValueType(0), Op);
|
||||
}
|
||||
|
||||
SDValue DAGTypeLegalizer::PromoteIntOp_BIT_CONVERT(SDNode *N) {
|
||||
SDValue DAGTypeLegalizer::PromoteIntOp_BITCAST(SDNode *N) {
|
||||
// This should only occur in unusual situations like bitcasting to an
|
||||
// x86_fp80, so just turn it into a store+load
|
||||
return CreateStackStoreLoad(N->getOperand(0), N->getValueType(0));
|
||||
|
@ -950,7 +949,7 @@ void DAGTypeLegalizer::ExpandIntegerResult(SDNode *N, unsigned ResNo) {
|
|||
case ISD::SELECT_CC: SplitRes_SELECT_CC(N, Lo, Hi); break;
|
||||
case ISD::UNDEF: SplitRes_UNDEF(N, Lo, Hi); break;
|
||||
|
||||
case ISD::BIT_CONVERT: ExpandRes_BIT_CONVERT(N, Lo, Hi); break;
|
||||
case ISD::BITCAST: ExpandRes_BITCAST(N, Lo, Hi); break;
|
||||
case ISD::BUILD_PAIR: ExpandRes_BUILD_PAIR(N, Lo, Hi); break;
|
||||
case ISD::EXTRACT_ELEMENT: ExpandRes_EXTRACT_ELEMENT(N, Lo, Hi); break;
|
||||
case ISD::EXTRACT_VECTOR_ELT: ExpandRes_EXTRACT_VECTOR_ELT(N, Lo, Hi); break;
|
||||
|
@ -2076,7 +2075,7 @@ bool DAGTypeLegalizer::ExpandIntegerOperand(SDNode *N, unsigned OpNo) {
|
|||
#endif
|
||||
llvm_unreachable("Do not know how to expand this operator's operand!");
|
||||
|
||||
case ISD::BIT_CONVERT: Res = ExpandOp_BIT_CONVERT(N); break;
|
||||
case ISD::BITCAST: Res = ExpandOp_BITCAST(N); break;
|
||||
case ISD::BR_CC: Res = ExpandIntOp_BR_CC(N); break;
|
||||
case ISD::BUILD_VECTOR: Res = ExpandOp_BUILD_VECTOR(N); break;
|
||||
case ISD::EXTRACT_ELEMENT: Res = ExpandOp_EXTRACT_ELEMENT(N); break;
|
||||
|
@ -2320,7 +2319,7 @@ SDValue DAGTypeLegalizer::ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo) {
|
|||
N->getMemoryVT(), isVolatile, isNonTemporal,
|
||||
Alignment);
|
||||
}
|
||||
|
||||
|
||||
if (TLI.isLittleEndian()) {
|
||||
// Little-endian - low bits are at low addresses.
|
||||
GetExpandedInteger(N->getValue(), Lo, Hi);
|
||||
|
|
|
@ -858,7 +858,7 @@ void DAGTypeLegalizer::SetWidenedVector(SDValue Op, SDValue Result) {
|
|||
/// BitConvertToInteger - Convert to an integer of the same size.
|
||||
SDValue DAGTypeLegalizer::BitConvertToInteger(SDValue Op) {
|
||||
unsigned BitWidth = Op.getValueType().getSizeInBits();
|
||||
return DAG.getNode(ISD::BIT_CONVERT, Op.getDebugLoc(),
|
||||
return DAG.getNode(ISD::BITCAST, Op.getDebugLoc(),
|
||||
EVT::getIntegerVT(*DAG.getContext(), BitWidth), Op);
|
||||
}
|
||||
|
||||
|
@ -869,7 +869,7 @@ SDValue DAGTypeLegalizer::BitConvertVectorToIntegerVector(SDValue Op) {
|
|||
unsigned EltWidth = Op.getValueType().getVectorElementType().getSizeInBits();
|
||||
EVT EltNVT = EVT::getIntegerVT(*DAG.getContext(), EltWidth);
|
||||
unsigned NumElts = Op.getValueType().getVectorNumElements();
|
||||
return DAG.getNode(ISD::BIT_CONVERT, Op.getDebugLoc(),
|
||||
return DAG.getNode(ISD::BITCAST, Op.getDebugLoc(),
|
||||
EVT::getVectorVT(*DAG.getContext(), EltNVT, NumElts), Op);
|
||||
}
|
||||
|
||||
|
|
|
@ -99,7 +99,7 @@ private:
|
|||
return SoftenFloat;
|
||||
return ExpandFloat;
|
||||
}
|
||||
|
||||
|
||||
if (VT.getVectorNumElements() == 1)
|
||||
return ScalarizeVector;
|
||||
return SplitVector;
|
||||
|
@ -244,7 +244,7 @@ private:
|
|||
SDValue PromoteIntRes_AssertZext(SDNode *N);
|
||||
SDValue PromoteIntRes_Atomic1(AtomicSDNode *N);
|
||||
SDValue PromoteIntRes_Atomic2(AtomicSDNode *N);
|
||||
SDValue PromoteIntRes_BIT_CONVERT(SDNode *N);
|
||||
SDValue PromoteIntRes_BITCAST(SDNode *N);
|
||||
SDValue PromoteIntRes_BSWAP(SDNode *N);
|
||||
SDValue PromoteIntRes_BUILD_PAIR(SDNode *N);
|
||||
SDValue PromoteIntRes_Constant(SDNode *N);
|
||||
|
@ -278,7 +278,7 @@ private:
|
|||
// Integer Operand Promotion.
|
||||
bool PromoteIntegerOperand(SDNode *N, unsigned OperandNo);
|
||||
SDValue PromoteIntOp_ANY_EXTEND(SDNode *N);
|
||||
SDValue PromoteIntOp_BIT_CONVERT(SDNode *N);
|
||||
SDValue PromoteIntOp_BITCAST(SDNode *N);
|
||||
SDValue PromoteIntOp_BUILD_PAIR(SDNode *N);
|
||||
SDValue PromoteIntOp_BR_CC(SDNode *N, unsigned OpNo);
|
||||
SDValue PromoteIntOp_BRCOND(SDNode *N, unsigned OpNo);
|
||||
|
@ -352,7 +352,7 @@ private:
|
|||
|
||||
// Integer Operand Expansion.
|
||||
bool ExpandIntegerOperand(SDNode *N, unsigned OperandNo);
|
||||
SDValue ExpandIntOp_BIT_CONVERT(SDNode *N);
|
||||
SDValue ExpandIntOp_BITCAST(SDNode *N);
|
||||
SDValue ExpandIntOp_BR_CC(SDNode *N);
|
||||
SDValue ExpandIntOp_BUILD_VECTOR(SDNode *N);
|
||||
SDValue ExpandIntOp_EXTRACT_ELEMENT(SDNode *N);
|
||||
|
@ -387,7 +387,7 @@ private:
|
|||
|
||||
// Result Float to Integer Conversion.
|
||||
void SoftenFloatResult(SDNode *N, unsigned OpNo);
|
||||
SDValue SoftenFloatRes_BIT_CONVERT(SDNode *N);
|
||||
SDValue SoftenFloatRes_BITCAST(SDNode *N);
|
||||
SDValue SoftenFloatRes_BUILD_PAIR(SDNode *N);
|
||||
SDValue SoftenFloatRes_ConstantFP(ConstantFPSDNode *N);
|
||||
SDValue SoftenFloatRes_EXTRACT_VECTOR_ELT(SDNode *N);
|
||||
|
@ -426,7 +426,7 @@ private:
|
|||
|
||||
// Operand Float to Integer Conversion.
|
||||
bool SoftenFloatOperand(SDNode *N, unsigned OpNo);
|
||||
SDValue SoftenFloatOp_BIT_CONVERT(SDNode *N);
|
||||
SDValue SoftenFloatOp_BITCAST(SDNode *N);
|
||||
SDValue SoftenFloatOp_BR_CC(SDNode *N);
|
||||
SDValue SoftenFloatOp_FP_ROUND(SDNode *N);
|
||||
SDValue SoftenFloatOp_FP_TO_SINT(SDNode *N);
|
||||
|
@ -515,7 +515,7 @@ private:
|
|||
SDValue ScalarizeVecRes_UnaryOp(SDNode *N);
|
||||
SDValue ScalarizeVecRes_InregOp(SDNode *N);
|
||||
|
||||
SDValue ScalarizeVecRes_BIT_CONVERT(SDNode *N);
|
||||
SDValue ScalarizeVecRes_BITCAST(SDNode *N);
|
||||
SDValue ScalarizeVecRes_CONVERT_RNDSAT(SDNode *N);
|
||||
SDValue ScalarizeVecRes_EXTRACT_SUBVECTOR(SDNode *N);
|
||||
SDValue ScalarizeVecRes_FPOWI(SDNode *N);
|
||||
|
@ -532,7 +532,7 @@ private:
|
|||
|
||||
// Vector Operand Scalarization: <1 x ty> -> ty.
|
||||
bool ScalarizeVectorOperand(SDNode *N, unsigned OpNo);
|
||||
SDValue ScalarizeVecOp_BIT_CONVERT(SDNode *N);
|
||||
SDValue ScalarizeVecOp_BITCAST(SDNode *N);
|
||||
SDValue ScalarizeVecOp_CONCAT_VECTORS(SDNode *N);
|
||||
SDValue ScalarizeVecOp_EXTRACT_VECTOR_ELT(SDNode *N);
|
||||
SDValue ScalarizeVecOp_STORE(StoreSDNode *N, unsigned OpNo);
|
||||
|
@ -557,7 +557,7 @@ private:
|
|||
void SplitVecRes_UnaryOp(SDNode *N, SDValue &Lo, SDValue &Hi);
|
||||
void SplitVecRes_InregOp(SDNode *N, SDValue &Lo, SDValue &Hi);
|
||||
|
||||
void SplitVecRes_BIT_CONVERT(SDNode *N, SDValue &Lo, SDValue &Hi);
|
||||
void SplitVecRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi);
|
||||
void SplitVecRes_BUILD_PAIR(SDNode *N, SDValue &Lo, SDValue &Hi);
|
||||
void SplitVecRes_BUILD_VECTOR(SDNode *N, SDValue &Lo, SDValue &Hi);
|
||||
void SplitVecRes_CONCAT_VECTORS(SDNode *N, SDValue &Lo, SDValue &Hi);
|
||||
|
@ -577,7 +577,7 @@ private:
|
|||
bool SplitVectorOperand(SDNode *N, unsigned OpNo);
|
||||
SDValue SplitVecOp_UnaryOp(SDNode *N);
|
||||
|
||||
SDValue SplitVecOp_BIT_CONVERT(SDNode *N);
|
||||
SDValue SplitVecOp_BITCAST(SDNode *N);
|
||||
SDValue SplitVecOp_EXTRACT_SUBVECTOR(SDNode *N);
|
||||
SDValue SplitVecOp_EXTRACT_VECTOR_ELT(SDNode *N);
|
||||
SDValue SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo);
|
||||
|
@ -603,7 +603,7 @@ private:
|
|||
|
||||
// Widen Vector Result Promotion.
|
||||
void WidenVectorResult(SDNode *N, unsigned ResNo);
|
||||
SDValue WidenVecRes_BIT_CONVERT(SDNode* N);
|
||||
SDValue WidenVecRes_BITCAST(SDNode* N);
|
||||
SDValue WidenVecRes_BUILD_VECTOR(SDNode* N);
|
||||
SDValue WidenVecRes_CONCAT_VECTORS(SDNode* N);
|
||||
SDValue WidenVecRes_CONVERT_RNDSAT(SDNode* N);
|
||||
|
@ -628,7 +628,7 @@ private:
|
|||
|
||||
// Widen Vector Operand.
|
||||
bool WidenVectorOperand(SDNode *N, unsigned ResNo);
|
||||
SDValue WidenVecOp_BIT_CONVERT(SDNode *N);
|
||||
SDValue WidenVecOp_BITCAST(SDNode *N);
|
||||
SDValue WidenVecOp_CONCAT_VECTORS(SDNode *N);
|
||||
SDValue WidenVecOp_EXTRACT_VECTOR_ELT(SDNode *N);
|
||||
SDValue WidenVecOp_EXTRACT_SUBVECTOR(SDNode *N);
|
||||
|
@ -721,7 +721,7 @@ private:
|
|||
}
|
||||
|
||||
// Generic Result Expansion.
|
||||
void ExpandRes_BIT_CONVERT (SDNode *N, SDValue &Lo, SDValue &Hi);
|
||||
void ExpandRes_BITCAST (SDNode *N, SDValue &Lo, SDValue &Hi);
|
||||
void ExpandRes_BUILD_PAIR (SDNode *N, SDValue &Lo, SDValue &Hi);
|
||||
void ExpandRes_EXTRACT_ELEMENT (SDNode *N, SDValue &Lo, SDValue &Hi);
|
||||
void ExpandRes_EXTRACT_VECTOR_ELT(SDNode *N, SDValue &Lo, SDValue &Hi);
|
||||
|
@ -729,7 +729,7 @@ private:
|
|||
void ExpandRes_VAARG (SDNode *N, SDValue &Lo, SDValue &Hi);
|
||||
|
||||
// Generic Operand Expansion.
|
||||
SDValue ExpandOp_BIT_CONVERT (SDNode *N);
|
||||
SDValue ExpandOp_BITCAST (SDNode *N);
|
||||
SDValue ExpandOp_BUILD_VECTOR (SDNode *N);
|
||||
SDValue ExpandOp_EXTRACT_ELEMENT (SDNode *N);
|
||||
SDValue ExpandOp_INSERT_VECTOR_ELT(SDNode *N);
|
||||
|
|
|
@ -32,8 +32,7 @@ using namespace llvm;
|
|||
// little/big-endian machines, followed by the Hi/Lo part. This means that
|
||||
// they cannot be used as is on vectors, for which Lo is always stored first.
|
||||
|
||||
void DAGTypeLegalizer::ExpandRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
|
||||
SDValue &Hi) {
|
||||
void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {
|
||||
EVT OutVT = N->getValueType(0);
|
||||
EVT NOutVT = TLI.getTypeToTransformTo(*DAG.getContext(), OutVT);
|
||||
SDValue InOp = N->getOperand(0);
|
||||
|
@ -50,31 +49,31 @@ void DAGTypeLegalizer::ExpandRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
|
|||
case SoftenFloat:
|
||||
// Convert the integer operand instead.
|
||||
SplitInteger(GetSoftenedFloat(InOp), Lo, Hi);
|
||||
Lo = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Lo);
|
||||
Hi = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Hi);
|
||||
Lo = DAG.getNode(ISD::BITCAST, dl, NOutVT, Lo);
|
||||
Hi = DAG.getNode(ISD::BITCAST, dl, NOutVT, Hi);
|
||||
return;
|
||||
case ExpandInteger:
|
||||
case ExpandFloat:
|
||||
// Convert the expanded pieces of the input.
|
||||
GetExpandedOp(InOp, Lo, Hi);
|
||||
Lo = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Lo);
|
||||
Hi = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Hi);
|
||||
Lo = DAG.getNode(ISD::BITCAST, dl, NOutVT, Lo);
|
||||
Hi = DAG.getNode(ISD::BITCAST, dl, NOutVT, Hi);
|
||||
return;
|
||||
case SplitVector:
|
||||
GetSplitVector(InOp, Lo, Hi);
|
||||
if (TLI.isBigEndian())
|
||||
std::swap(Lo, Hi);
|
||||
Lo = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Lo);
|
||||
Hi = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Hi);
|
||||
Lo = DAG.getNode(ISD::BITCAST, dl, NOutVT, Lo);
|
||||
Hi = DAG.getNode(ISD::BITCAST, dl, NOutVT, Hi);
|
||||
return;
|
||||
case ScalarizeVector:
|
||||
// Convert the element instead.
|
||||
SplitInteger(BitConvertToInteger(GetScalarizedVector(InOp)), Lo, Hi);
|
||||
Lo = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Lo);
|
||||
Hi = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Hi);
|
||||
Lo = DAG.getNode(ISD::BITCAST, dl, NOutVT, Lo);
|
||||
Hi = DAG.getNode(ISD::BITCAST, dl, NOutVT, Hi);
|
||||
return;
|
||||
case WidenVector: {
|
||||
assert(!(InVT.getVectorNumElements() & 1) && "Unsupported BIT_CONVERT");
|
||||
assert(!(InVT.getVectorNumElements() & 1) && "Unsupported BITCAST");
|
||||
InOp = GetWidenedVector(InOp);
|
||||
EVT InNVT = EVT::getVectorVT(*DAG.getContext(), InVT.getVectorElementType(),
|
||||
InVT.getVectorNumElements()/2);
|
||||
|
@ -84,19 +83,19 @@ void DAGTypeLegalizer::ExpandRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
|
|||
DAG.getIntPtrConstant(InNVT.getVectorNumElements()));
|
||||
if (TLI.isBigEndian())
|
||||
std::swap(Lo, Hi);
|
||||
Lo = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Lo);
|
||||
Hi = DAG.getNode(ISD::BIT_CONVERT, dl, NOutVT, Hi);
|
||||
Lo = DAG.getNode(ISD::BITCAST, dl, NOutVT, Lo);
|
||||
Hi = DAG.getNode(ISD::BITCAST, dl, NOutVT, Hi);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (InVT.isVector() && OutVT.isInteger()) {
|
||||
// Handle cases like i64 = BIT_CONVERT v1i64 on x86, where the operand
|
||||
// Handle cases like i64 = BITCAST v1i64 on x86, where the operand
|
||||
// is legal but the result is not.
|
||||
EVT NVT = EVT::getVectorVT(*DAG.getContext(), NOutVT, 2);
|
||||
|
||||
if (isTypeLegal(NVT)) {
|
||||
SDValue CastInOp = DAG.getNode(ISD::BIT_CONVERT, dl, NVT, InOp);
|
||||
SDValue CastInOp = DAG.getNode(ISD::BITCAST, dl, NVT, InOp);
|
||||
Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NOutVT, CastInOp,
|
||||
DAG.getIntPtrConstant(0));
|
||||
Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NOutVT, CastInOp,
|
||||
|
@ -173,7 +172,7 @@ void DAGTypeLegalizer::ExpandRes_EXTRACT_VECTOR_ELT(SDNode *N, SDValue &Lo,
|
|||
EVT OldVT = N->getValueType(0);
|
||||
EVT NewVT = TLI.getTypeToTransformTo(*DAG.getContext(), OldVT);
|
||||
|
||||
SDValue NewVec = DAG.getNode(ISD::BIT_CONVERT, dl,
|
||||
SDValue NewVec = DAG.getNode(ISD::BITCAST, dl,
|
||||
EVT::getVectorVT(*DAG.getContext(),
|
||||
NewVT, 2*OldElts),
|
||||
OldVec);
|
||||
|
@ -262,14 +261,14 @@ void DAGTypeLegalizer::ExpandRes_VAARG(SDNode *N, SDValue &Lo, SDValue &Hi) {
|
|||
// Generic Operand Expansion.
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
||||
SDValue DAGTypeLegalizer::ExpandOp_BIT_CONVERT(SDNode *N) {
|
||||
SDValue DAGTypeLegalizer::ExpandOp_BITCAST(SDNode *N) {
|
||||
DebugLoc dl = N->getDebugLoc();
|
||||
if (N->getValueType(0).isVector()) {
|
||||
// An illegal expanding type is being converted to a legal vector type.
|
||||
// Make a two element vector out of the expanded parts and convert that
|
||||
// instead, but only if the new vector type is legal (otherwise there
|
||||
// is no point, and it might create expansion loops). For example, on
|
||||
// x86 this turns v1i64 = BIT_CONVERT i64 into v1i64 = BIT_CONVERT v2i32.
|
||||
// x86 this turns v1i64 = BITCAST i64 into v1i64 = BITCAST v2i32.
|
||||
EVT OVT = N->getOperand(0).getValueType();
|
||||
EVT NVT = EVT::getVectorVT(*DAG.getContext(),
|
||||
TLI.getTypeToTransformTo(*DAG.getContext(), OVT),
|
||||
|
@ -283,7 +282,7 @@ SDValue DAGTypeLegalizer::ExpandOp_BIT_CONVERT(SDNode *N) {
|
|||
std::swap(Parts[0], Parts[1]);
|
||||
|
||||
SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, Parts, 2);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, N->getValueType(0), Vec);
|
||||
return DAG.getNode(ISD::BITCAST, dl, N->getValueType(0), Vec);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -322,7 +321,7 @@ SDValue DAGTypeLegalizer::ExpandOp_BUILD_VECTOR(SDNode *N) {
|
|||
&NewElts[0], NewElts.size());
|
||||
|
||||
// Convert the new vector to the old vector type.
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, VecVT, NewVec);
|
||||
return DAG.getNode(ISD::BITCAST, dl, VecVT, NewVec);
|
||||
}
|
||||
|
||||
SDValue DAGTypeLegalizer::ExpandOp_EXTRACT_ELEMENT(SDNode *N) {
|
||||
|
@ -347,7 +346,7 @@ SDValue DAGTypeLegalizer::ExpandOp_INSERT_VECTOR_ELT(SDNode *N) {
|
|||
// Bitconvert to a vector of twice the length with elements of the expanded
|
||||
// type, insert the expanded vector elements, and then convert back.
|
||||
EVT NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewEVT, NumElts*2);
|
||||
SDValue NewVec = DAG.getNode(ISD::BIT_CONVERT, dl,
|
||||
SDValue NewVec = DAG.getNode(ISD::BITCAST, dl,
|
||||
NewVecVT, N->getOperand(0));
|
||||
|
||||
SDValue Lo, Hi;
|
||||
|
@ -363,7 +362,7 @@ SDValue DAGTypeLegalizer::ExpandOp_INSERT_VECTOR_ELT(SDNode *N) {
|
|||
NewVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, NewVecVT, NewVec, Hi, Idx);
|
||||
|
||||
// Convert the new vector to the old vector type.
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, VecVT, NewVec);
|
||||
return DAG.getNode(ISD::BITCAST, dl, VecVT, NewVec);
|
||||
}
|
||||
|
||||
SDValue DAGTypeLegalizer::ExpandOp_SCALAR_TO_VECTOR(SDNode *N) {
|
||||
|
|
|
@ -241,14 +241,14 @@ SDValue VectorLegalizer::PromoteVectorOp(SDValue Op) {
|
|||
|
||||
for (unsigned j = 0; j != Op.getNumOperands(); ++j) {
|
||||
if (Op.getOperand(j).getValueType().isVector())
|
||||
Operands[j] = DAG.getNode(ISD::BIT_CONVERT, dl, NVT, Op.getOperand(j));
|
||||
Operands[j] = DAG.getNode(ISD::BITCAST, dl, NVT, Op.getOperand(j));
|
||||
else
|
||||
Operands[j] = Op.getOperand(j);
|
||||
}
|
||||
|
||||
Op = DAG.getNode(Op.getOpcode(), dl, NVT, &Operands[0], Operands.size());
|
||||
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Op);
|
||||
return DAG.getNode(ISD::BITCAST, dl, VT, Op);
|
||||
}
|
||||
|
||||
SDValue VectorLegalizer::ExpandFNEG(SDValue Op) {
|
||||
|
|
|
@ -46,7 +46,7 @@ void DAGTypeLegalizer::ScalarizeVectorResult(SDNode *N, unsigned ResNo) {
|
|||
#endif
|
||||
llvm_unreachable("Do not know how to scalarize the result of this operator!");
|
||||
|
||||
case ISD::BIT_CONVERT: R = ScalarizeVecRes_BIT_CONVERT(N); break;
|
||||
case ISD::BITCAST: R = ScalarizeVecRes_BITCAST(N); break;
|
||||
case ISD::BUILD_VECTOR: R = N->getOperand(0); break;
|
||||
case ISD::CONVERT_RNDSAT: R = ScalarizeVecRes_CONVERT_RNDSAT(N); break;
|
||||
case ISD::EXTRACT_SUBVECTOR: R = ScalarizeVecRes_EXTRACT_SUBVECTOR(N); break;
|
||||
|
@ -122,9 +122,9 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_BinOp(SDNode *N) {
|
|||
LHS.getValueType(), LHS, RHS);
|
||||
}
|
||||
|
||||
SDValue DAGTypeLegalizer::ScalarizeVecRes_BIT_CONVERT(SDNode *N) {
|
||||
SDValue DAGTypeLegalizer::ScalarizeVecRes_BITCAST(SDNode *N) {
|
||||
EVT NewVT = N->getValueType(0).getVectorElementType();
|
||||
return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(),
|
||||
return DAG.getNode(ISD::BITCAST, N->getDebugLoc(),
|
||||
NewVT, N->getOperand(0));
|
||||
}
|
||||
|
||||
|
@ -296,8 +296,8 @@ bool DAGTypeLegalizer::ScalarizeVectorOperand(SDNode *N, unsigned OpNo) {
|
|||
dbgs() << "\n";
|
||||
#endif
|
||||
llvm_unreachable("Do not know how to scalarize this operator's operand!");
|
||||
case ISD::BIT_CONVERT:
|
||||
Res = ScalarizeVecOp_BIT_CONVERT(N);
|
||||
case ISD::BITCAST:
|
||||
Res = ScalarizeVecOp_BITCAST(N);
|
||||
break;
|
||||
case ISD::CONCAT_VECTORS:
|
||||
Res = ScalarizeVecOp_CONCAT_VECTORS(N);
|
||||
|
@ -326,11 +326,11 @@ bool DAGTypeLegalizer::ScalarizeVectorOperand(SDNode *N, unsigned OpNo) {
|
|||
return false;
|
||||
}
|
||||
|
||||
/// ScalarizeVecOp_BIT_CONVERT - If the value to convert is a vector that needs
|
||||
/// ScalarizeVecOp_BITCAST - If the value to convert is a vector that needs
|
||||
/// to be scalarized, it must be <1 x ty>. Convert the element instead.
|
||||
SDValue DAGTypeLegalizer::ScalarizeVecOp_BIT_CONVERT(SDNode *N) {
|
||||
SDValue DAGTypeLegalizer::ScalarizeVecOp_BITCAST(SDNode *N) {
|
||||
SDValue Elt = GetScalarizedVector(N->getOperand(0));
|
||||
return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(),
|
||||
return DAG.getNode(ISD::BITCAST, N->getDebugLoc(),
|
||||
N->getValueType(0), Elt);
|
||||
}
|
||||
|
||||
|
@ -406,7 +406,7 @@ void DAGTypeLegalizer::SplitVectorResult(SDNode *N, unsigned ResNo) {
|
|||
case ISD::SELECT_CC: SplitRes_SELECT_CC(N, Lo, Hi); break;
|
||||
case ISD::UNDEF: SplitRes_UNDEF(N, Lo, Hi); break;
|
||||
|
||||
case ISD::BIT_CONVERT: SplitVecRes_BIT_CONVERT(N, Lo, Hi); break;
|
||||
case ISD::BITCAST: SplitVecRes_BITCAST(N, Lo, Hi); break;
|
||||
case ISD::BUILD_VECTOR: SplitVecRes_BUILD_VECTOR(N, Lo, Hi); break;
|
||||
case ISD::CONCAT_VECTORS: SplitVecRes_CONCAT_VECTORS(N, Lo, Hi); break;
|
||||
case ISD::CONVERT_RNDSAT: SplitVecRes_CONVERT_RNDSAT(N, Lo, Hi); break;
|
||||
|
@ -496,8 +496,8 @@ void DAGTypeLegalizer::SplitVecRes_BinOp(SDNode *N, SDValue &Lo,
|
|||
Hi = DAG.getNode(N->getOpcode(), dl, LHSHi.getValueType(), LHSHi, RHSHi);
|
||||
}
|
||||
|
||||
void DAGTypeLegalizer::SplitVecRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
|
||||
SDValue &Hi) {
|
||||
void DAGTypeLegalizer::SplitVecRes_BITCAST(SDNode *N, SDValue &Lo,
|
||||
SDValue &Hi) {
|
||||
// We know the result is a vector. The input may be either a vector or a
|
||||
// scalar value.
|
||||
EVT LoVT, HiVT;
|
||||
|
@ -525,8 +525,8 @@ void DAGTypeLegalizer::SplitVecRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
|
|||
GetExpandedOp(InOp, Lo, Hi);
|
||||
if (TLI.isBigEndian())
|
||||
std::swap(Lo, Hi);
|
||||
Lo = DAG.getNode(ISD::BIT_CONVERT, dl, LoVT, Lo);
|
||||
Hi = DAG.getNode(ISD::BIT_CONVERT, dl, HiVT, Hi);
|
||||
Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, Lo);
|
||||
Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
|
@ -534,8 +534,8 @@ void DAGTypeLegalizer::SplitVecRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
|
|||
// If the input is a vector that needs to be split, convert each split
|
||||
// piece of the input now.
|
||||
GetSplitVector(InOp, Lo, Hi);
|
||||
Lo = DAG.getNode(ISD::BIT_CONVERT, dl, LoVT, Lo);
|
||||
Hi = DAG.getNode(ISD::BIT_CONVERT, dl, HiVT, Hi);
|
||||
Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, Lo);
|
||||
Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -549,8 +549,8 @@ void DAGTypeLegalizer::SplitVecRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
|
|||
|
||||
if (TLI.isBigEndian())
|
||||
std::swap(Lo, Hi);
|
||||
Lo = DAG.getNode(ISD::BIT_CONVERT, dl, LoVT, Lo);
|
||||
Hi = DAG.getNode(ISD::BIT_CONVERT, dl, HiVT, Hi);
|
||||
Lo = DAG.getNode(ISD::BITCAST, dl, LoVT, Lo);
|
||||
Hi = DAG.getNode(ISD::BITCAST, dl, HiVT, Hi);
|
||||
}
|
||||
|
||||
void DAGTypeLegalizer::SplitVecRes_BUILD_VECTOR(SDNode *N, SDValue &Lo,
|
||||
|
@ -978,7 +978,7 @@ bool DAGTypeLegalizer::SplitVectorOperand(SDNode *N, unsigned OpNo) {
|
|||
#endif
|
||||
llvm_unreachable("Do not know how to split this operator's operand!");
|
||||
|
||||
case ISD::BIT_CONVERT: Res = SplitVecOp_BIT_CONVERT(N); break;
|
||||
case ISD::BITCAST: Res = SplitVecOp_BITCAST(N); break;
|
||||
case ISD::EXTRACT_SUBVECTOR: Res = SplitVecOp_EXTRACT_SUBVECTOR(N); break;
|
||||
case ISD::EXTRACT_VECTOR_ELT:Res = SplitVecOp_EXTRACT_VECTOR_ELT(N); break;
|
||||
case ISD::CONCAT_VECTORS: Res = SplitVecOp_CONCAT_VECTORS(N); break;
|
||||
|
@ -1034,8 +1034,8 @@ SDValue DAGTypeLegalizer::SplitVecOp_UnaryOp(SDNode *N) {
|
|||
return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
|
||||
}
|
||||
|
||||
SDValue DAGTypeLegalizer::SplitVecOp_BIT_CONVERT(SDNode *N) {
|
||||
// For example, i64 = BIT_CONVERT v4i16 on alpha. Typically the vector will
|
||||
SDValue DAGTypeLegalizer::SplitVecOp_BITCAST(SDNode *N) {
|
||||
// For example, i64 = BITCAST v4i16 on alpha. Typically the vector will
|
||||
// end up being split all the way down to individual components. Convert the
|
||||
// split pieces into integers and reassemble.
|
||||
SDValue Lo, Hi;
|
||||
|
@ -1046,7 +1046,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_BIT_CONVERT(SDNode *N) {
|
|||
if (TLI.isBigEndian())
|
||||
std::swap(Lo, Hi);
|
||||
|
||||
return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), N->getValueType(0),
|
||||
return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), N->getValueType(0),
|
||||
JoinIntegers(Lo, Hi));
|
||||
}
|
||||
|
||||
|
@ -1151,7 +1151,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo) {
|
|||
|
||||
SDValue DAGTypeLegalizer::SplitVecOp_CONCAT_VECTORS(SDNode *N) {
|
||||
DebugLoc DL = N->getDebugLoc();
|
||||
|
||||
|
||||
// The input operands all must have the same type, and we know the result the
|
||||
// result type is valid. Convert this to a buildvector which extracts all the
|
||||
// input elements.
|
||||
|
@ -1168,7 +1168,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_CONCAT_VECTORS(SDNode *N) {
|
|||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return DAG.getNode(ISD::BUILD_VECTOR, DL, N->getValueType(0),
|
||||
&Elts[0], Elts.size());
|
||||
}
|
||||
|
@ -1197,7 +1197,7 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
|
|||
#endif
|
||||
llvm_unreachable("Do not know how to widen the result of this operator!");
|
||||
|
||||
case ISD::BIT_CONVERT: Res = WidenVecRes_BIT_CONVERT(N); break;
|
||||
case ISD::BITCAST: Res = WidenVecRes_BITCAST(N); break;
|
||||
case ISD::BUILD_VECTOR: Res = WidenVecRes_BUILD_VECTOR(N); break;
|
||||
case ISD::CONCAT_VECTORS: Res = WidenVecRes_CONCAT_VECTORS(N); break;
|
||||
case ISD::CONVERT_RNDSAT: Res = WidenVecRes_CONVERT_RNDSAT(N); break;
|
||||
|
@ -1304,11 +1304,11 @@ SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) {
|
|||
SDValue InOp2 = GetWidenedVector(N->getOperand(1));
|
||||
return DAG.getNode(N->getOpcode(), dl, WidenVT, InOp1, InOp2);
|
||||
}
|
||||
|
||||
|
||||
// No legal vector version so unroll the vector operation and then widen.
|
||||
if (NumElts == 1)
|
||||
return DAG.UnrollVectorOp(N, WidenVT.getVectorNumElements());
|
||||
|
||||
|
||||
// Since the operation can trap, apply operation on the original vector.
|
||||
EVT MaxVT = VT;
|
||||
SDValue InOp1 = GetWidenedVector(N->getOperand(0));
|
||||
|
@ -1341,9 +1341,9 @@ SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) {
|
|||
|
||||
if (NumElts == 1) {
|
||||
for (unsigned i = 0; i != CurNumElts; ++i, ++Idx) {
|
||||
SDValue EOp1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, WidenEltVT,
|
||||
SDValue EOp1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, WidenEltVT,
|
||||
InOp1, DAG.getIntPtrConstant(Idx));
|
||||
SDValue EOp2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, WidenEltVT,
|
||||
SDValue EOp2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, WidenEltVT,
|
||||
InOp2, DAG.getIntPtrConstant(Idx));
|
||||
ConcatOps[ConcatEnd++] = DAG.getNode(Opcode, dl, WidenEltVT,
|
||||
EOp1, EOp2);
|
||||
|
@ -1411,7 +1411,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) {
|
|||
if (VT == WidenVT)
|
||||
return ConcatOps[0];
|
||||
}
|
||||
|
||||
|
||||
// add undefs of size MaxVT until ConcatOps grows to length of WidenVT
|
||||
unsigned NumOps = WidenVT.getVectorNumElements()/MaxVT.getVectorNumElements();
|
||||
if (NumOps != ConcatEnd ) {
|
||||
|
@ -1532,7 +1532,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_InregOp(SDNode *N) {
|
|||
WidenVT, WidenLHS, DAG.getValueType(ExtVT));
|
||||
}
|
||||
|
||||
SDValue DAGTypeLegalizer::WidenVecRes_BIT_CONVERT(SDNode *N) {
|
||||
SDValue DAGTypeLegalizer::WidenVecRes_BITCAST(SDNode *N) {
|
||||
SDValue InOp = N->getOperand(0);
|
||||
EVT InVT = InOp.getValueType();
|
||||
EVT VT = N->getValueType(0);
|
||||
|
@ -1551,7 +1551,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_BIT_CONVERT(SDNode *N) {
|
|||
InOp = GetPromotedInteger(InOp);
|
||||
InVT = InOp.getValueType();
|
||||
if (WidenVT.bitsEq(InVT))
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, WidenVT, InOp);
|
||||
return DAG.getNode(ISD::BITCAST, dl, WidenVT, InOp);
|
||||
break;
|
||||
case SoftenFloat:
|
||||
case ExpandInteger:
|
||||
|
@ -1566,7 +1566,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_BIT_CONVERT(SDNode *N) {
|
|||
InVT = InOp.getValueType();
|
||||
if (WidenVT.bitsEq(InVT))
|
||||
// The input widens to the same size. Convert to the widen value.
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, WidenVT, InOp);
|
||||
return DAG.getNode(ISD::BITCAST, dl, WidenVT, InOp);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1606,7 +1606,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_BIT_CONVERT(SDNode *N) {
|
|||
else
|
||||
NewVec = DAG.getNode(ISD::BUILD_VECTOR, dl,
|
||||
NewInVT, &Ops[0], NewNumElts);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, WidenVT, NewVec);
|
||||
return DAG.getNode(ISD::BITCAST, dl, WidenVT, NewVec);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1982,7 +1982,7 @@ bool DAGTypeLegalizer::WidenVectorOperand(SDNode *N, unsigned ResNo) {
|
|||
#endif
|
||||
llvm_unreachable("Do not know how to widen this operator's operand!");
|
||||
|
||||
case ISD::BIT_CONVERT: Res = WidenVecOp_BIT_CONVERT(N); break;
|
||||
case ISD::BITCAST: Res = WidenVecOp_BITCAST(N); break;
|
||||
case ISD::CONCAT_VECTORS: Res = WidenVecOp_CONCAT_VECTORS(N); break;
|
||||
case ISD::EXTRACT_SUBVECTOR: Res = WidenVecOp_EXTRACT_SUBVECTOR(N); break;
|
||||
case ISD::EXTRACT_VECTOR_ELT: Res = WidenVecOp_EXTRACT_VECTOR_ELT(N); break;
|
||||
|
@ -2041,7 +2041,7 @@ SDValue DAGTypeLegalizer::WidenVecOp_Convert(SDNode *N) {
|
|||
return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, &Ops[0], NumElts);
|
||||
}
|
||||
|
||||
SDValue DAGTypeLegalizer::WidenVecOp_BIT_CONVERT(SDNode *N) {
|
||||
SDValue DAGTypeLegalizer::WidenVecOp_BITCAST(SDNode *N) {
|
||||
EVT VT = N->getValueType(0);
|
||||
SDValue InOp = GetWidenedVector(N->getOperand(0));
|
||||
EVT InWidenVT = InOp.getValueType();
|
||||
|
@ -2055,7 +2055,7 @@ SDValue DAGTypeLegalizer::WidenVecOp_BIT_CONVERT(SDNode *N) {
|
|||
unsigned NewNumElts = InWidenSize / Size;
|
||||
EVT NewVT = EVT::getVectorVT(*DAG.getContext(), VT, NewNumElts);
|
||||
if (TLI.isTypeLegal(NewVT)) {
|
||||
SDValue BitOp = DAG.getNode(ISD::BIT_CONVERT, dl, NewVT, InOp);
|
||||
SDValue BitOp = DAG.getNode(ISD::BITCAST, dl, NewVT, InOp);
|
||||
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, BitOp,
|
||||
DAG.getIntPtrConstant(0));
|
||||
}
|
||||
|
@ -2144,7 +2144,7 @@ static EVT FindMemType(SelectionDAG& DAG, const TargetLowering &TLI,
|
|||
if (Width == WidenEltWidth)
|
||||
return RetVT;
|
||||
|
||||
// See if there is larger legal integer than the element type to load/store
|
||||
// See if there is larger legal integer than the element type to load/store
|
||||
unsigned VT;
|
||||
for (VT = (unsigned)MVT::LAST_INTEGER_VALUETYPE;
|
||||
VT >= (unsigned)MVT::FIRST_INTEGER_VALUETYPE; --VT) {
|
||||
|
@ -2199,7 +2199,7 @@ static SDValue BuildVectorFromScalar(SelectionDAG& DAG, EVT VecTy,
|
|||
if (NewLdTy != LdTy) {
|
||||
NumElts = Width / NewLdTy.getSizeInBits();
|
||||
NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewLdTy, NumElts);
|
||||
VecOp = DAG.getNode(ISD::BIT_CONVERT, dl, NewVecVT, VecOp);
|
||||
VecOp = DAG.getNode(ISD::BITCAST, dl, NewVecVT, VecOp);
|
||||
// Readjust position and vector position based on new load type
|
||||
Idx = Idx * LdTy.getSizeInBits() / NewLdTy.getSizeInBits();
|
||||
LdTy = NewLdTy;
|
||||
|
@ -2207,7 +2207,7 @@ static SDValue BuildVectorFromScalar(SelectionDAG& DAG, EVT VecTy,
|
|||
VecOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, NewVecVT, VecOp, LdOps[i],
|
||||
DAG.getIntPtrConstant(Idx++));
|
||||
}
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, VecTy, VecOp);
|
||||
return DAG.getNode(ISD::BITCAST, dl, VecTy, VecOp);
|
||||
}
|
||||
|
||||
SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVector<SDValue, 16> &LdChain,
|
||||
|
@ -2247,7 +2247,7 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVector<SDValue, 16> &LdChain,
|
|||
unsigned NumElts = WidenWidth / NewVTWidth;
|
||||
EVT NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewVT, NumElts);
|
||||
SDValue VecOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, NewVecVT, LdOp);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, WidenVT, VecOp);
|
||||
return DAG.getNode(ISD::BITCAST, dl, WidenVT, VecOp);
|
||||
}
|
||||
if (NewVT == WidenVT)
|
||||
return LdOp;
|
||||
|
@ -2297,7 +2297,7 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVector<SDValue, 16> &LdChain,
|
|||
if (!LdOps[0].getValueType().isVector())
|
||||
// All the loads are scalar loads.
|
||||
return BuildVectorFromScalar(DAG, WidenVT, LdOps, 0, End);
|
||||
|
||||
|
||||
// If the load contains vectors, build the vector using concat vector.
|
||||
// All of the vectors used to loads are power of 2 and the scalars load
|
||||
// can be combined to make a power of 2 vector.
|
||||
|
@ -2441,7 +2441,7 @@ void DAGTypeLegalizer::GenWidenVectorStores(SmallVector<SDValue, 16>& StChain,
|
|||
// Cast the vector to the scalar type we can store
|
||||
unsigned NumElts = ValWidth / NewVTWidth;
|
||||
EVT NewVecVT = EVT::getVectorVT(*DAG.getContext(), NewVT, NumElts);
|
||||
SDValue VecOp = DAG.getNode(ISD::BIT_CONVERT, dl, NewVecVT, ValOp);
|
||||
SDValue VecOp = DAG.getNode(ISD::BITCAST, dl, NewVecVT, ValOp);
|
||||
// Readjust index position based on new vector type
|
||||
Idx = Idx * ValEltWidth / NewVTWidth;
|
||||
do {
|
||||
|
@ -2474,7 +2474,7 @@ DAGTypeLegalizer::GenWidenVectorTruncStores(SmallVector<SDValue, 16>& StChain,
|
|||
bool isNonTemporal = ST->isNonTemporal();
|
||||
SDValue ValOp = GetWidenedVector(ST->getValue());
|
||||
DebugLoc dl = ST->getDebugLoc();
|
||||
|
||||
|
||||
EVT StVT = ST->getMemoryVT();
|
||||
EVT ValVT = ValOp.getValueType();
|
||||
|
||||
|
|
|
@ -111,7 +111,7 @@ bool ConstantFPSDNode::isValueValidForType(EVT VT,
|
|||
/// BUILD_VECTOR where all of the elements are ~0 or undef.
|
||||
bool ISD::isBuildVectorAllOnes(const SDNode *N) {
|
||||
// Look through a bit convert.
|
||||
if (N->getOpcode() == ISD::BIT_CONVERT)
|
||||
if (N->getOpcode() == ISD::BITCAST)
|
||||
N = N->getOperand(0).getNode();
|
||||
|
||||
if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
|
||||
|
@ -152,7 +152,7 @@ bool ISD::isBuildVectorAllOnes(const SDNode *N) {
|
|||
/// BUILD_VECTOR where all of the elements are 0 or undef.
|
||||
bool ISD::isBuildVectorAllZeros(const SDNode *N) {
|
||||
// Look through a bit convert.
|
||||
if (N->getOpcode() == ISD::BIT_CONVERT)
|
||||
if (N->getOpcode() == ISD::BITCAST)
|
||||
N = N->getOperand(0).getNode();
|
||||
|
||||
if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
|
||||
|
@ -1356,7 +1356,7 @@ SDValue SelectionDAG::getEHLabel(DebugLoc dl, SDValue Root, MCSymbol *Label) {
|
|||
void *IP = 0;
|
||||
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
|
||||
return SDValue(E, 0);
|
||||
|
||||
|
||||
SDNode *N = new (NodeAllocator) EHLabelSDNode(dl, Root, Label);
|
||||
CSEMap.InsertNode(N, IP);
|
||||
AllNodes.push_back(N);
|
||||
|
@ -1406,11 +1406,11 @@ SDValue SelectionDAG::getMDNode(const MDNode *MD) {
|
|||
FoldingSetNodeID ID;
|
||||
AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), 0, 0);
|
||||
ID.AddPointer(MD);
|
||||
|
||||
|
||||
void *IP = 0;
|
||||
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
|
||||
return SDValue(E, 0);
|
||||
|
||||
|
||||
SDNode *N = new (NodeAllocator) MDNodeSDNode(MD);
|
||||
CSEMap.InsertNode(N, IP);
|
||||
AllNodes.push_back(N);
|
||||
|
@ -2365,7 +2365,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
|
|||
APFloat::rmNearestTiesToEven);
|
||||
return getConstantFP(apf, VT);
|
||||
}
|
||||
case ISD::BIT_CONVERT:
|
||||
case ISD::BITCAST:
|
||||
if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
|
||||
return getConstantFP(Val.bitsToFloat(), VT);
|
||||
else if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
|
||||
|
@ -2416,7 +2416,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
|
|||
APInt api(VT.getSizeInBits(), 2, x);
|
||||
return getConstant(api, VT);
|
||||
}
|
||||
case ISD::BIT_CONVERT:
|
||||
case ISD::BITCAST:
|
||||
if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
|
||||
return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), VT);
|
||||
else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
|
||||
|
@ -2518,13 +2518,13 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
|
|||
return Operand.getNode()->getOperand(0);
|
||||
}
|
||||
break;
|
||||
case ISD::BIT_CONVERT:
|
||||
case ISD::BITCAST:
|
||||
// Basic sanity checking.
|
||||
assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits()
|
||||
&& "Cannot BIT_CONVERT between types of different sizes!");
|
||||
&& "Cannot BITCAST between types of different sizes!");
|
||||
if (VT == Operand.getValueType()) return Operand; // noop conversion.
|
||||
if (OpOpcode == ISD::BIT_CONVERT) // bitconv(bitconv(x)) -> bitconv(x)
|
||||
return getNode(ISD::BIT_CONVERT, DL, VT, Operand.getOperand(0));
|
||||
if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
|
||||
return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
|
||||
if (OpOpcode == ISD::UNDEF)
|
||||
return getUNDEF(VT);
|
||||
break;
|
||||
|
@ -3060,7 +3060,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
|
|||
case ISD::VECTOR_SHUFFLE:
|
||||
llvm_unreachable("should use getVectorShuffle constructor!");
|
||||
break;
|
||||
case ISD::BIT_CONVERT:
|
||||
case ISD::BITCAST:
|
||||
// Fold bit_convert nodes from a type to themselves.
|
||||
if (N1.getValueType() == VT)
|
||||
return N1;
|
||||
|
@ -3177,7 +3177,7 @@ static SDValue getMemsetStringVal(EVT VT, DebugLoc dl, SelectionDAG &DAG,
|
|||
else if (VT.isVector()) {
|
||||
unsigned NumElts = VT.getVectorNumElements();
|
||||
MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
|
||||
return DAG.getNode(ISD::BITCAST, dl, VT,
|
||||
DAG.getConstant(0, EVT::getVectorVT(*DAG.getContext(),
|
||||
EltVT, NumElts)));
|
||||
} else
|
||||
|
@ -3274,7 +3274,7 @@ static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
|
|||
if (VT.bitsGT(LVT))
|
||||
VT = LVT;
|
||||
}
|
||||
|
||||
|
||||
// If we're optimizing for size, and there is a limit, bump the maximum number
|
||||
// of operations inserted down to 4. This is a wild guess that approximates
|
||||
// the size of a call to memcpy or memset (3 arguments + call).
|
||||
|
@ -3340,7 +3340,7 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
|
|||
bool CopyFromStr = isMemSrcFromString(Src, Str);
|
||||
bool isZeroStr = CopyFromStr && Str.empty();
|
||||
unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy();
|
||||
|
||||
|
||||
if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
|
||||
(DstAlignCanChange ? 0 : Align),
|
||||
(isZeroStr ? 0 : SrcAlign),
|
||||
|
@ -3682,7 +3682,7 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
|
|||
if (Result.getNode())
|
||||
return Result;
|
||||
|
||||
// Emit a library call.
|
||||
// Emit a library call.
|
||||
const Type *IntPtrTy = TLI.getTargetData()->getIntPtrType(*getContext());
|
||||
TargetLowering::ArgListTy Args;
|
||||
TargetLowering::ArgListEntry Entry;
|
||||
|
@ -3912,7 +3912,7 @@ static MachinePointerInfo InferPointerInfo(SDValue Ptr, int64_t Offset = 0) {
|
|||
!isa<ConstantSDNode>(Ptr.getOperand(1)) ||
|
||||
!isa<FrameIndexSDNode>(Ptr.getOperand(0)))
|
||||
return MachinePointerInfo();
|
||||
|
||||
|
||||
int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
|
||||
return MachinePointerInfo::getFixedStack(FI, Offset+
|
||||
cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
|
||||
|
@ -3930,7 +3930,7 @@ static MachinePointerInfo InferPointerInfo(SDValue Ptr, SDValue OffsetOp) {
|
|||
return InferPointerInfo(Ptr);
|
||||
return MachinePointerInfo();
|
||||
}
|
||||
|
||||
|
||||
|
||||
SDValue
|
||||
SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
|
||||
|
@ -3947,12 +3947,12 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
|
|||
Flags |= MachineMemOperand::MOVolatile;
|
||||
if (isNonTemporal)
|
||||
Flags |= MachineMemOperand::MONonTemporal;
|
||||
|
||||
|
||||
// If we don't have a PtrInfo, infer the trivial frame index case to simplify
|
||||
// clients.
|
||||
if (PtrInfo.V == 0)
|
||||
PtrInfo = InferPointerInfo(Ptr, Offset);
|
||||
|
||||
|
||||
MachineFunction &MF = getMachineFunction();
|
||||
MachineMemOperand *MMO =
|
||||
MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment,
|
||||
|
@ -3961,7 +3961,7 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
|
|||
}
|
||||
|
||||
SDValue
|
||||
SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
|
||||
SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
|
||||
EVT VT, DebugLoc dl, SDValue Chain,
|
||||
SDValue Ptr, SDValue Offset, EVT MemVT,
|
||||
MachineMemOperand *MMO) {
|
||||
|
@ -4052,7 +4052,7 @@ SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val,
|
|||
Flags |= MachineMemOperand::MOVolatile;
|
||||
if (isNonTemporal)
|
||||
Flags |= MachineMemOperand::MONonTemporal;
|
||||
|
||||
|
||||
if (PtrInfo.V == 0)
|
||||
PtrInfo = InferPointerInfo(Ptr);
|
||||
|
||||
|
@ -4101,7 +4101,7 @@ SDValue SelectionDAG::getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val,
|
|||
Flags |= MachineMemOperand::MOVolatile;
|
||||
if (isNonTemporal)
|
||||
Flags |= MachineMemOperand::MONonTemporal;
|
||||
|
||||
|
||||
if (PtrInfo.V == 0)
|
||||
PtrInfo = InferPointerInfo(Ptr);
|
||||
|
||||
|
@ -5431,7 +5431,7 @@ MemSDNode::MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs, EVT memvt,
|
|||
}
|
||||
|
||||
MemSDNode::MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs,
|
||||
const SDValue *Ops, unsigned NumOps, EVT memvt,
|
||||
const SDValue *Ops, unsigned NumOps, EVT memvt,
|
||||
MachineMemOperand *mmo)
|
||||
: SDNode(Opc, dl, VTs, Ops, NumOps),
|
||||
MemoryVT(memvt), MMO(mmo) {
|
||||
|
@ -5450,7 +5450,7 @@ void SDNode::Profile(FoldingSetNodeID &ID) const {
|
|||
namespace {
|
||||
struct EVTArray {
|
||||
std::vector<EVT> VTs;
|
||||
|
||||
|
||||
EVTArray() {
|
||||
VTs.reserve(MVT::LAST_VALUETYPE);
|
||||
for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
|
||||
|
@ -5542,8 +5542,8 @@ bool SDNode::isOperandOf(SDNode *N) const {
|
|||
|
||||
/// reachesChainWithoutSideEffects - Return true if this operand (which must
|
||||
/// be a chain) reaches the specified operand without crossing any
|
||||
/// side-effecting instructions on any chain path. In practice, this looks
|
||||
/// through token factors and non-volatile loads. In order to remain efficient,
|
||||
/// side-effecting instructions on any chain path. In practice, this looks
|
||||
/// through token factors and non-volatile loads. In order to remain efficient,
|
||||
/// this only looks a couple of nodes in, it does not do an exhaustive search.
|
||||
bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
|
||||
unsigned Depth) const {
|
||||
|
@ -5788,7 +5788,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
|
|||
case ISD::UINT_TO_FP: return "uint_to_fp";
|
||||
case ISD::FP_TO_SINT: return "fp_to_sint";
|
||||
case ISD::FP_TO_UINT: return "fp_to_uint";
|
||||
case ISD::BIT_CONVERT: return "bit_convert";
|
||||
case ISD::BITCAST: return "bit_convert";
|
||||
case ISD::FP16_TO_FP32: return "fp16_to_fp32";
|
||||
case ISD::FP32_TO_FP16: return "fp32_to_fp16";
|
||||
|
||||
|
@ -6051,7 +6051,7 @@ void SDNode::print_details(raw_ostream &OS, const SelectionDAG *G) const {
|
|||
const char *AM = getIndexedModeName(ST->getAddressingMode());
|
||||
if (*AM)
|
||||
OS << ", " << AM;
|
||||
|
||||
|
||||
OS << ">";
|
||||
} else if (const MemSDNode* M = dyn_cast<MemSDNode>(this)) {
|
||||
OS << "<" << *M->getMemOperand() << ">";
|
||||
|
@ -6102,7 +6102,7 @@ void SDNode::print(raw_ostream &OS, const SelectionDAG *G) const {
|
|||
|
||||
static void printrWithDepthHelper(raw_ostream &OS, const SDNode *N,
|
||||
const SelectionDAG *G, unsigned depth,
|
||||
unsigned indent)
|
||||
unsigned indent)
|
||||
{
|
||||
if (depth == 0)
|
||||
return;
|
||||
|
@ -6123,7 +6123,7 @@ static void printrWithDepthHelper(raw_ostream &OS, const SDNode *N,
|
|||
void SDNode::printrWithDepth(raw_ostream &OS, const SelectionDAG *G,
|
||||
unsigned depth) const {
|
||||
printrWithDepthHelper(OS, this, G, depth, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void SDNode::printrFull(raw_ostream &OS, const SelectionDAG *G) const {
|
||||
// Don't print impossibly deep things.
|
||||
|
@ -6137,7 +6137,7 @@ void SDNode::dumprWithDepth(const SelectionDAG *G, unsigned depth) const {
|
|||
void SDNode::dumprFull(const SelectionDAG *G) const {
|
||||
// Don't print impossibly deep things.
|
||||
dumprWithDepth(G, 100);
|
||||
}
|
||||
}
|
||||
|
||||
static void DumpNodes(const SDNode *N, unsigned indent, const SelectionDAG *G) {
|
||||
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
|
||||
|
@ -6221,10 +6221,10 @@ SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
|
|||
}
|
||||
|
||||
|
||||
/// isConsecutiveLoad - Return true if LD is loading 'Bytes' bytes from a
|
||||
/// location that is 'Dist' units away from the location that the 'Base' load
|
||||
/// isConsecutiveLoad - Return true if LD is loading 'Bytes' bytes from a
|
||||
/// location that is 'Dist' units away from the location that the 'Base' load
|
||||
/// is loading from.
|
||||
bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
|
||||
bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
|
||||
unsigned Bytes, int Dist) const {
|
||||
if (LD->getChain() != Base->getChain())
|
||||
return false;
|
||||
|
@ -6477,7 +6477,7 @@ static void checkForCyclesHelper(const SDNode *N,
|
|||
// If this node has already been checked, don't check it again.
|
||||
if (Checked.count(N))
|
||||
return;
|
||||
|
||||
|
||||
// If a node has already been visited on this depth-first walk, reject it as
|
||||
// a cycle.
|
||||
if (!Visited.insert(N)) {
|
||||
|
@ -6486,10 +6486,10 @@ static void checkForCyclesHelper(const SDNode *N,
|
|||
errs() << "Detected cycle in SelectionDAG\n";
|
||||
abort();
|
||||
}
|
||||
|
||||
|
||||
for(unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
|
||||
checkForCyclesHelper(N->getOperand(i).getNode(), Visited, Checked);
|
||||
|
||||
|
||||
Checked.insert(N);
|
||||
Visited.erase(N);
|
||||
}
|
||||
|
|
|
@ -131,8 +131,8 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc DL,
|
|||
Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2,
|
||||
RoundParts / 2, PartVT, HalfVT);
|
||||
} else {
|
||||
Lo = DAG.getNode(ISD::BIT_CONVERT, DL, HalfVT, Parts[0]);
|
||||
Hi = DAG.getNode(ISD::BIT_CONVERT, DL, HalfVT, Parts[1]);
|
||||
Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
|
||||
Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
|
||||
}
|
||||
|
||||
if (TLI.isBigEndian())
|
||||
|
@ -164,8 +164,8 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc DL,
|
|||
assert(ValueVT == EVT(MVT::ppcf128) && PartVT == EVT(MVT::f64) &&
|
||||
"Unexpected split");
|
||||
SDValue Lo, Hi;
|
||||
Lo = DAG.getNode(ISD::BIT_CONVERT, DL, EVT(MVT::f64), Parts[0]);
|
||||
Hi = DAG.getNode(ISD::BIT_CONVERT, DL, EVT(MVT::f64), Parts[1]);
|
||||
Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
|
||||
Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
|
||||
if (TLI.isBigEndian())
|
||||
std::swap(Lo, Hi);
|
||||
Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
|
||||
|
@ -207,7 +207,7 @@ static SDValue getCopyFromParts(SelectionDAG &DAG, DebugLoc DL,
|
|||
}
|
||||
|
||||
if (PartVT.getSizeInBits() == ValueVT.getSizeInBits())
|
||||
return DAG.getNode(ISD::BIT_CONVERT, DL, ValueVT, Val);
|
||||
return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
|
||||
|
||||
llvm_unreachable("Unknown mismatch!");
|
||||
return SDValue();
|
||||
|
@ -284,7 +284,7 @@ static SDValue getCopyFromPartsVector(SelectionDAG &DAG, DebugLoc DL,
|
|||
}
|
||||
|
||||
// Vector/Vector bitcast.
|
||||
return DAG.getNode(ISD::BIT_CONVERT, DL, ValueVT, Val);
|
||||
return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
|
||||
}
|
||||
|
||||
assert(ValueVT.getVectorElementType() == PartVT &&
|
||||
|
@ -342,7 +342,7 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc DL,
|
|||
} else if (PartBits == ValueVT.getSizeInBits()) {
|
||||
// Different types of the same size.
|
||||
assert(NumParts == 1 && PartVT != ValueVT);
|
||||
Val = DAG.getNode(ISD::BIT_CONVERT, DL, PartVT, Val);
|
||||
Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
|
||||
} else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
|
||||
// If the parts cover less bits than value has, truncate the value.
|
||||
assert(PartVT.isInteger() && ValueVT.isInteger() &&
|
||||
|
@ -385,7 +385,7 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc DL,
|
|||
|
||||
// The number of parts is a power of 2. Repeatedly bisect the value using
|
||||
// EXTRACT_ELEMENT.
|
||||
Parts[0] = DAG.getNode(ISD::BIT_CONVERT, DL,
|
||||
Parts[0] = DAG.getNode(ISD::BITCAST, DL,
|
||||
EVT::getIntegerVT(*DAG.getContext(),
|
||||
ValueVT.getSizeInBits()),
|
||||
Val);
|
||||
|
@ -403,8 +403,8 @@ static void getCopyToParts(SelectionDAG &DAG, DebugLoc DL,
|
|||
ThisVT, Part0, DAG.getIntPtrConstant(0));
|
||||
|
||||
if (ThisBits == PartBits && ThisVT != PartVT) {
|
||||
Part0 = DAG.getNode(ISD::BIT_CONVERT, DL, PartVT, Part0);
|
||||
Part1 = DAG.getNode(ISD::BIT_CONVERT, DL, PartVT, Part1);
|
||||
Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
|
||||
Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -428,7 +428,7 @@ static void getCopyToPartsVector(SelectionDAG &DAG, DebugLoc DL,
|
|||
// Nothing to do.
|
||||
} else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
|
||||
// Bitconvert vector->vector case.
|
||||
Val = DAG.getNode(ISD::BIT_CONVERT, DL, PartVT, Val);
|
||||
Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
|
||||
} else if (PartVT.isVector() &&
|
||||
PartVT.getVectorElementType() == ValueVT.getVectorElementType()&&
|
||||
PartVT.getVectorNumElements() > ValueVT.getVectorNumElements()) {
|
||||
|
@ -2579,9 +2579,9 @@ void SelectionDAGBuilder::visitBitCast(const User &I) {
|
|||
EVT DestVT = TLI.getValueType(I.getType());
|
||||
|
||||
// BitCast assures us that source and destination are the same size so this is
|
||||
// either a BIT_CONVERT or a no-op.
|
||||
// either a BITCAST or a no-op.
|
||||
if (DestVT != N.getValueType())
|
||||
setValue(&I, DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
|
||||
setValue(&I, DAG.getNode(ISD::BITCAST, getCurDebugLoc(),
|
||||
DestVT, N)); // convert types.
|
||||
else
|
||||
setValue(&I, N); // noop cast.
|
||||
|
@ -3021,7 +3021,7 @@ void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
|
|||
// Do not serialize non-volatile loads against each other.
|
||||
Root = DAG.getRoot();
|
||||
}
|
||||
|
||||
|
||||
SmallVector<SDValue, 4> Values(NumValues);
|
||||
SmallVector<SDValue, 4> Chains(std::min(unsigned(MaxParallelChains),
|
||||
NumValues));
|
||||
|
@ -3198,7 +3198,7 @@ void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
|
|||
if (!I.getType()->isVoidTy()) {
|
||||
if (const VectorType *PTy = dyn_cast<VectorType>(I.getType())) {
|
||||
EVT VT = TLI.getValueType(PTy);
|
||||
Result = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(), VT, Result);
|
||||
Result = DAG.getNode(ISD::BITCAST, getCurDebugLoc(), VT, Result);
|
||||
}
|
||||
|
||||
setValue(&I, Result);
|
||||
|
@ -3217,7 +3217,7 @@ GetSignificand(SelectionDAG &DAG, SDValue Op, DebugLoc dl) {
|
|||
DAG.getConstant(0x007fffff, MVT::i32));
|
||||
SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
|
||||
DAG.getConstant(0x3f800000, MVT::i32));
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t2);
|
||||
return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
|
||||
}
|
||||
|
||||
/// GetExponent - Get the exponent:
|
||||
|
@ -3316,13 +3316,13 @@ SelectionDAGBuilder::visitExp(const CallInst &I) {
|
|||
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
|
||||
SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
|
||||
getF32Constant(DAG, 0x3f7f5e7e));
|
||||
SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t5);
|
||||
SDValue TwoToFracPartOfX = DAG.getNode(ISD::BITCAST, dl,MVT::i32, t5);
|
||||
|
||||
// Add the exponent into the result in integer domain.
|
||||
SDValue t6 = DAG.getNode(ISD::ADD, dl, MVT::i32,
|
||||
TwoToFracPartOfX, IntegerPartOfX);
|
||||
|
||||
result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t6);
|
||||
result = DAG.getNode(ISD::BITCAST, dl, MVT::f32, t6);
|
||||
} else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
|
||||
// For floating-point precision of 12:
|
||||
//
|
||||
|
@ -3342,13 +3342,13 @@ SelectionDAGBuilder::visitExp(const CallInst &I) {
|
|||
SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
|
||||
SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
|
||||
getF32Constant(DAG, 0x3f7ff8fd));
|
||||
SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,MVT::i32, t7);
|
||||
SDValue TwoToFracPartOfX = DAG.getNode(ISD::BITCAST, dl,MVT::i32, t7);
|
||||
|
||||
// Add the exponent into the result in integer domain.
|
||||
SDValue t8 = DAG.getNode(ISD::ADD, dl, MVT::i32,
|
||||
TwoToFracPartOfX, IntegerPartOfX);
|
||||
|
||||
result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t8);
|
||||
result = DAG.getNode(ISD::BITCAST, dl, MVT::f32, t8);
|
||||
} else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
|
||||
// For floating-point precision of 18:
|
||||
//
|
||||
|
@ -3380,14 +3380,14 @@ SelectionDAGBuilder::visitExp(const CallInst &I) {
|
|||
SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
|
||||
SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
|
||||
getF32Constant(DAG, 0x3f800000));
|
||||
SDValue TwoToFracPartOfX = DAG.getNode(ISD::BIT_CONVERT, dl,
|
||||
SDValue TwoToFracPartOfX = DAG.getNode(ISD::BITCAST, dl,
|
||||
MVT::i32, t13);
|
||||
|
||||
// Add the exponent into the result in integer domain.
|
||||
SDValue t14 = DAG.getNode(ISD::ADD, dl, MVT::i32,
|
||||
TwoToFracPartOfX, IntegerPartOfX);
|
||||
|
||||
result = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, t14);
|
||||
result = DAG.getNode(ISD::BITCAST, dl, MVT::f32, t14);
|
||||
}
|
||||
} else {
|
||||
// No special expansion.
|
||||
|
@ -3409,7 +3409,7 @@ SelectionDAGBuilder::visitLog(const CallInst &I) {
|
|||
if (getValue(I.getArgOperand(0)).getValueType() == MVT::f32 &&
|
||||
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
|
||||
SDValue Op = getValue(I.getArgOperand(0));
|
||||
SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
|
||||
SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
|
||||
|
||||
// Scale the exponent by log(2) [0.69314718f].
|
||||
SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
|
||||
|
@ -3519,7 +3519,7 @@ SelectionDAGBuilder::visitLog2(const CallInst &I) {
|
|||
if (getValue(I.getArgOperand(0)).getValueType() == MVT::f32 &&
|
||||
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
|
||||
SDValue Op = getValue(I.getArgOperand(0));
|
||||
SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
|
||||
SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
|
||||
|
||||
// Get the exponent.
|
||||
SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
|
||||
|
@ -3628,7 +3628,7 @@ SelectionDAGBuilder::visitLog10(const CallInst &I) {
|
|||
if (getValue(I.getArgOperand(0)).getValueType() == MVT::f32 &&
|
||||
LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
|
||||
SDValue Op = getValue(I.getArgOperand(0));
|
||||
SDValue Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
|
||||
SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
|
||||
|
||||
// Scale the exponent by log10(2) [0.30102999f].
|
||||
SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
|
||||
|
@ -3756,11 +3756,11 @@ SelectionDAGBuilder::visitExp2(const CallInst &I) {
|
|||
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
|
||||
SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
|
||||
getF32Constant(DAG, 0x3f7f5e7e));
|
||||
SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5);
|
||||
SDValue t6 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, t5);
|
||||
SDValue TwoToFractionalPartOfX =
|
||||
DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
|
||||
|
||||
result = DAG.getNode(ISD::BIT_CONVERT, dl,
|
||||
result = DAG.getNode(ISD::BITCAST, dl,
|
||||
MVT::f32, TwoToFractionalPartOfX);
|
||||
} else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
|
||||
// For floating-point precision of 12:
|
||||
|
@ -3781,11 +3781,11 @@ SelectionDAGBuilder::visitExp2(const CallInst &I) {
|
|||
SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
|
||||
SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
|
||||
getF32Constant(DAG, 0x3f7ff8fd));
|
||||
SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7);
|
||||
SDValue t8 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, t7);
|
||||
SDValue TwoToFractionalPartOfX =
|
||||
DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
|
||||
|
||||
result = DAG.getNode(ISD::BIT_CONVERT, dl,
|
||||
result = DAG.getNode(ISD::BITCAST, dl,
|
||||
MVT::f32, TwoToFractionalPartOfX);
|
||||
} else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
|
||||
// For floating-point precision of 18:
|
||||
|
@ -3817,11 +3817,11 @@ SelectionDAGBuilder::visitExp2(const CallInst &I) {
|
|||
SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
|
||||
SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
|
||||
getF32Constant(DAG, 0x3f800000));
|
||||
SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13);
|
||||
SDValue t14 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, t13);
|
||||
SDValue TwoToFractionalPartOfX =
|
||||
DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
|
||||
|
||||
result = DAG.getNode(ISD::BIT_CONVERT, dl,
|
||||
result = DAG.getNode(ISD::BITCAST, dl,
|
||||
MVT::f32, TwoToFractionalPartOfX);
|
||||
}
|
||||
} else {
|
||||
|
@ -3889,11 +3889,11 @@ SelectionDAGBuilder::visitPow(const CallInst &I) {
|
|||
SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
|
||||
SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
|
||||
getF32Constant(DAG, 0x3f7f5e7e));
|
||||
SDValue t6 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t5);
|
||||
SDValue t6 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, t5);
|
||||
SDValue TwoToFractionalPartOfX =
|
||||
DAG.getNode(ISD::ADD, dl, MVT::i32, t6, IntegerPartOfX);
|
||||
|
||||
result = DAG.getNode(ISD::BIT_CONVERT, dl,
|
||||
result = DAG.getNode(ISD::BITCAST, dl,
|
||||
MVT::f32, TwoToFractionalPartOfX);
|
||||
} else if (LimitFloatPrecision > 6 && LimitFloatPrecision <= 12) {
|
||||
// For floating-point precision of 12:
|
||||
|
@ -3914,11 +3914,11 @@ SelectionDAGBuilder::visitPow(const CallInst &I) {
|
|||
SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
|
||||
SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
|
||||
getF32Constant(DAG, 0x3f7ff8fd));
|
||||
SDValue t8 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t7);
|
||||
SDValue t8 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, t7);
|
||||
SDValue TwoToFractionalPartOfX =
|
||||
DAG.getNode(ISD::ADD, dl, MVT::i32, t8, IntegerPartOfX);
|
||||
|
||||
result = DAG.getNode(ISD::BIT_CONVERT, dl,
|
||||
result = DAG.getNode(ISD::BITCAST, dl,
|
||||
MVT::f32, TwoToFractionalPartOfX);
|
||||
} else { // LimitFloatPrecision > 12 && LimitFloatPrecision <= 18
|
||||
// For floating-point precision of 18:
|
||||
|
@ -3950,11 +3950,11 @@ SelectionDAGBuilder::visitPow(const CallInst &I) {
|
|||
SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
|
||||
SDValue t13 = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
|
||||
getF32Constant(DAG, 0x3f800000));
|
||||
SDValue t14 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, t13);
|
||||
SDValue t14 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, t13);
|
||||
SDValue TwoToFractionalPartOfX =
|
||||
DAG.getNode(ISD::ADD, dl, MVT::i32, t14, IntegerPartOfX);
|
||||
|
||||
result = DAG.getNode(ISD::BIT_CONVERT, dl,
|
||||
result = DAG.getNode(ISD::BITCAST, dl,
|
||||
MVT::f32, TwoToFractionalPartOfX);
|
||||
}
|
||||
} else {
|
||||
|
@ -4072,11 +4072,11 @@ SelectionDAGBuilder::EmitFuncArgumentDbgValue(const Value *V, MDNode *Variable,
|
|||
if (VMI != FuncInfo.ValueMap.end())
|
||||
Reg = VMI->second;
|
||||
}
|
||||
|
||||
|
||||
if (!Reg && N.getNode()) {
|
||||
// Check if frame index is available.
|
||||
if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(N.getNode()))
|
||||
if (FrameIndexSDNode *FINode =
|
||||
if (FrameIndexSDNode *FINode =
|
||||
dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode())) {
|
||||
Reg = TRI->getFrameRegister(MF);
|
||||
Offset = FINode->getIndex();
|
||||
|
@ -4476,7 +4476,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
|
|||
ShOps[1] = DAG.getConstant(0, MVT::i32);
|
||||
ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, ShAmtVT, &ShOps[0], 2);
|
||||
EVT DestVT = TLI.getValueType(I.getType());
|
||||
ShAmt = DAG.getNode(ISD::BIT_CONVERT, dl, DestVT, ShAmt);
|
||||
ShAmt = DAG.getNode(ISD::BITCAST, dl, DestVT, ShAmt);
|
||||
Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
|
||||
DAG.getConstant(NewIntrinsic, MVT::i32),
|
||||
getValue(I.getArgOperand(0)), ShAmt);
|
||||
|
@ -4713,7 +4713,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
|
|||
Ops[3] = getValue(I.getArgOperand(2));
|
||||
DAG.setRoot(DAG.getMemIntrinsicNode(ISD::PREFETCH, dl,
|
||||
DAG.getVTList(MVT::Other),
|
||||
&Ops[0], 4,
|
||||
&Ops[0], 4,
|
||||
EVT::getIntegerVT(*Context, 8),
|
||||
MachinePointerInfo(I.getArgOperand(0)),
|
||||
0, /* align */
|
||||
|
@ -5119,7 +5119,7 @@ void SelectionDAGBuilder::visitCall(const CallInst &I) {
|
|||
!MMI.callsExternalVAFunctionWithFloatingPointArguments()) {
|
||||
for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) {
|
||||
const Type* T = I.getArgOperand(i)->getType();
|
||||
for (po_iterator<const Type*> i = po_begin(T), e = po_end(T);
|
||||
for (po_iterator<const Type*> i = po_begin(T), e = po_end(T);
|
||||
i != e; ++i) {
|
||||
if (!i->isFloatingPointTy()) continue;
|
||||
MMI.setCallsExternalVAFunctionWithFloatingPointArguments(true);
|
||||
|
@ -5419,7 +5419,7 @@ GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
|
|||
// vector types).
|
||||
EVT RegVT = *PhysReg.second->vt_begin();
|
||||
if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
|
||||
OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
|
||||
OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, getCurDebugLoc(),
|
||||
RegVT, OpInfo.CallOperand);
|
||||
OpInfo.ConstraintVT = RegVT;
|
||||
} else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
|
||||
|
@ -5429,7 +5429,7 @@ GetRegistersForValue(SDISelAsmOperandInfo &OpInfo,
|
|||
// machine.
|
||||
RegVT = EVT::getIntegerVT(Context,
|
||||
OpInfo.ConstraintVT.getSizeInBits());
|
||||
OpInfo.CallOperand = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
|
||||
OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, getCurDebugLoc(),
|
||||
RegVT, OpInfo.CallOperand);
|
||||
OpInfo.ConstraintVT = RegVT;
|
||||
}
|
||||
|
@ -5945,7 +5945,7 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
|
|||
// not have the same VT as was expected. Convert it to the right type
|
||||
// with bit_convert.
|
||||
if (ResultType != Val.getValueType() && Val.getValueType().isVector()) {
|
||||
Val = DAG.getNode(ISD::BIT_CONVERT, getCurDebugLoc(),
|
||||
Val = DAG.getNode(ISD::BITCAST, getCurDebugLoc(),
|
||||
ResultType, Val);
|
||||
|
||||
} else if (ResultType != Val.getValueType() &&
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1519,7 +1519,7 @@ bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
|
|||
break;
|
||||
}
|
||||
case CCValAssign::BCvt: {
|
||||
unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BIT_CONVERT, Arg,
|
||||
unsigned BC = FastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg,
|
||||
/*TODO: Kill=*/false);
|
||||
assert(BC != 0 && "Failed to emit a bitcast!");
|
||||
Arg = BC;
|
||||
|
|
|
@ -238,7 +238,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
|
|||
setLibcallName(RTLIB::SRA_I128, 0);
|
||||
|
||||
if (Subtarget->isAAPCS_ABI()) {
|
||||
// Double-precision floating-point arithmetic helper functions
|
||||
// Double-precision floating-point arithmetic helper functions
|
||||
// RTABI chapter 4.1.2, Table 2
|
||||
setLibcallName(RTLIB::ADD_F64, "__aeabi_dadd");
|
||||
setLibcallName(RTLIB::DIV_F64, "__aeabi_ddiv");
|
||||
|
@ -338,7 +338,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
|
|||
setLibcallName(RTLIB::FPROUND_F64_F32, "__aeabi_d2f");
|
||||
setLibcallName(RTLIB::FPEXT_F32_F64, "__aeabi_f2d");
|
||||
setLibcallCallingConv(RTLIB::FPROUND_F64_F32, CallingConv::ARM_AAPCS);
|
||||
setLibcallCallingConv(RTLIB::FPEXT_F32_F64, CallingConv::ARM_AAPCS);
|
||||
setLibcallCallingConv(RTLIB::FPEXT_F32_F64, CallingConv::ARM_AAPCS);
|
||||
|
||||
// Integer to floating-point conversions.
|
||||
// RTABI chapter 4.1.2, Table 8
|
||||
|
@ -387,7 +387,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
|
|||
setLibcallCallingConv(RTLIB::SDIV_I32, CallingConv::ARM_AAPCS);
|
||||
setLibcallCallingConv(RTLIB::UDIV_I8, CallingConv::ARM_AAPCS);
|
||||
setLibcallCallingConv(RTLIB::UDIV_I16, CallingConv::ARM_AAPCS);
|
||||
setLibcallCallingConv(RTLIB::UDIV_I32, CallingConv::ARM_AAPCS);
|
||||
setLibcallCallingConv(RTLIB::UDIV_I32, CallingConv::ARM_AAPCS);
|
||||
}
|
||||
|
||||
if (Subtarget->isThumb1Only())
|
||||
|
@ -609,7 +609,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
|
|||
if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
|
||||
// Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR
|
||||
// iff target supports vfp2.
|
||||
setOperationAction(ISD::BIT_CONVERT, MVT::i64, Custom);
|
||||
setOperationAction(ISD::BITCAST, MVT::i64, Custom);
|
||||
setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
|
||||
}
|
||||
|
||||
|
@ -1061,7 +1061,7 @@ ARMTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
|
|||
default: llvm_unreachable("Unknown loc info!");
|
||||
case CCValAssign::Full: break;
|
||||
case CCValAssign::BCvt:
|
||||
Val = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), Val);
|
||||
Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1209,7 +1209,7 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
|||
Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
|
||||
break;
|
||||
case CCValAssign::BCvt:
|
||||
Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg);
|
||||
Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1666,7 +1666,7 @@ ARMTargetLowering::LowerReturn(SDValue Chain,
|
|||
default: llvm_unreachable("Unknown loc info!");
|
||||
case CCValAssign::Full: break;
|
||||
case CCValAssign::BCvt:
|
||||
Arg = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getLocVT(), Arg);
|
||||
Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -2223,7 +2223,7 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
|
|||
default: llvm_unreachable("Unknown loc info!");
|
||||
case CCValAssign::Full: break;
|
||||
case CCValAssign::BCvt:
|
||||
ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), ArgValue);
|
||||
ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
|
||||
break;
|
||||
case CCValAssign::SExt:
|
||||
ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
|
||||
|
@ -2689,7 +2689,7 @@ static SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
|
|||
break;
|
||||
}
|
||||
Op = DAG.getNode(Opc, dl, MVT::f32, Op.getOperand(0));
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
|
||||
return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
|
||||
}
|
||||
|
||||
static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
|
||||
|
@ -2708,7 +2708,7 @@ static SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
|
|||
break;
|
||||
}
|
||||
|
||||
Op = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Op.getOperand(0));
|
||||
Op = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0));
|
||||
return DAG.getNode(Opc, dl, VT, Op);
|
||||
}
|
||||
|
||||
|
@ -2765,12 +2765,12 @@ SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
|
|||
return FrameAddr;
|
||||
}
|
||||
|
||||
/// ExpandBIT_CONVERT - If the target supports VFP, this function is called to
|
||||
/// ExpandBITCAST - If the target supports VFP, this function is called to
|
||||
/// expand a bit convert where either the source or destination type is i64 to
|
||||
/// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64
|
||||
/// operand type is illegal (e.g., v2f32 for a target that doesn't support
|
||||
/// vectors), since the legalizer won't know what to do with that.
|
||||
static SDValue ExpandBIT_CONVERT(SDNode *N, SelectionDAG &DAG) {
|
||||
static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) {
|
||||
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
||||
DebugLoc dl = N->getDebugLoc();
|
||||
SDValue Op = N->getOperand(0);
|
||||
|
@ -2780,7 +2780,7 @@ static SDValue ExpandBIT_CONVERT(SDNode *N, SelectionDAG &DAG) {
|
|||
EVT SrcVT = Op.getValueType();
|
||||
EVT DstVT = N->getValueType(0);
|
||||
assert((SrcVT == MVT::i64 || DstVT == MVT::i64) &&
|
||||
"ExpandBIT_CONVERT called for non-i64 type");
|
||||
"ExpandBITCAST called for non-i64 type");
|
||||
|
||||
// Turn i64->f64 into VMOVDRR.
|
||||
if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) {
|
||||
|
@ -2788,7 +2788,7 @@ static SDValue ExpandBIT_CONVERT(SDNode *N, SelectionDAG &DAG) {
|
|||
DAG.getConstant(0, MVT::i32));
|
||||
SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
|
||||
DAG.getConstant(1, MVT::i32));
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, DstVT,
|
||||
return DAG.getNode(ISD::BITCAST, dl, DstVT,
|
||||
DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi));
|
||||
}
|
||||
|
||||
|
@ -2815,7 +2815,7 @@ static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
|
|||
SDValue EncodedVal = DAG.getTargetConstant(0, MVT::i32);
|
||||
EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
|
||||
SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vmov);
|
||||
return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
|
||||
}
|
||||
|
||||
/// LowerShiftRightParts - Lower SRA_PARTS, which returns two
|
||||
|
@ -3068,13 +3068,13 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
|
|||
AndOp = Op1;
|
||||
|
||||
// Ignore bitconvert.
|
||||
if (AndOp.getNode() && AndOp.getOpcode() == ISD::BIT_CONVERT)
|
||||
if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST)
|
||||
AndOp = AndOp.getOperand(0);
|
||||
|
||||
if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) {
|
||||
Opc = ARMISD::VTST;
|
||||
Op0 = DAG.getNode(ISD::BIT_CONVERT, dl, VT, AndOp.getOperand(0));
|
||||
Op1 = DAG.getNode(ISD::BIT_CONVERT, dl, VT, AndOp.getOperand(1));
|
||||
Op0 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(0));
|
||||
Op1 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(1));
|
||||
Invert = !Invert;
|
||||
}
|
||||
}
|
||||
|
@ -3095,7 +3095,7 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
|
|||
Opc = ARMISD::VCLTZ;
|
||||
SingleOp = Op1;
|
||||
}
|
||||
|
||||
|
||||
SDValue Result;
|
||||
if (SingleOp.getNode()) {
|
||||
switch (Opc) {
|
||||
|
@ -3499,7 +3499,7 @@ static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
|
|||
VMOVModImm);
|
||||
if (Val.getNode()) {
|
||||
SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vmov);
|
||||
return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
|
||||
}
|
||||
|
||||
// Try an immediate VMVN.
|
||||
|
@ -3507,11 +3507,11 @@ static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
|
|||
((1LL << SplatBitSize) - 1));
|
||||
Val = isNEONModifiedImm(NegatedImm,
|
||||
SplatUndef.getZExtValue(), SplatBitSize,
|
||||
DAG, VmovVT, VT.is128BitVector(),
|
||||
DAG, VmovVT, VT.is128BitVector(),
|
||||
VMVNModImm);
|
||||
if (Val.getNode()) {
|
||||
SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vmov);
|
||||
return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -3553,13 +3553,13 @@ static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
|
|||
if (VT.getVectorElementType().isFloatingPoint()) {
|
||||
SmallVector<SDValue, 8> Ops;
|
||||
for (unsigned i = 0; i < NumElts; ++i)
|
||||
Ops.push_back(DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32,
|
||||
Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32,
|
||||
Op.getOperand(i)));
|
||||
EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
|
||||
SDValue Val = DAG.getNode(ISD::BUILD_VECTOR, dl, VecVT, &Ops[0], NumElts);
|
||||
Val = LowerBUILD_VECTOR(Val, DAG, ST);
|
||||
if (Val.getNode())
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Val);
|
||||
return DAG.getNode(ISD::BITCAST, dl, VT, Val);
|
||||
}
|
||||
SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl);
|
||||
if (Val.getNode())
|
||||
|
@ -3582,9 +3582,9 @@ static SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
|
|||
EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
|
||||
SmallVector<SDValue, 8> Ops;
|
||||
for (unsigned i = 0; i < NumElts; ++i)
|
||||
Ops.push_back(DAG.getNode(ISD::BIT_CONVERT, dl, EltVT, Op.getOperand(i)));
|
||||
Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i)));
|
||||
SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Val);
|
||||
return DAG.getNode(ISD::BITCAST, dl, VT, Val);
|
||||
}
|
||||
|
||||
return SDValue();
|
||||
|
@ -3805,8 +3805,8 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
|
|||
// registers are defined to use, and since i64 is not legal.
|
||||
EVT EltVT = EVT::getFloatingPointVT(EltSize);
|
||||
EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
|
||||
V1 = DAG.getNode(ISD::BIT_CONVERT, dl, VecVT, V1);
|
||||
V2 = DAG.getNode(ISD::BIT_CONVERT, dl, VecVT, V2);
|
||||
V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1);
|
||||
V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2);
|
||||
SmallVector<SDValue, 8> Ops;
|
||||
for (unsigned i = 0; i < NumElts; ++i) {
|
||||
if (ShuffleMask[i] < 0)
|
||||
|
@ -3818,7 +3818,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
|
|||
MVT::i32)));
|
||||
}
|
||||
SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, &Ops[0],NumElts);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Val);
|
||||
return DAG.getNode(ISD::BITCAST, dl, VT, Val);
|
||||
}
|
||||
|
||||
return SDValue();
|
||||
|
@ -3851,13 +3851,13 @@ static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
|
|||
SDValue Op1 = Op.getOperand(1);
|
||||
if (Op0.getOpcode() != ISD::UNDEF)
|
||||
Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
|
||||
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op0),
|
||||
DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0),
|
||||
DAG.getIntPtrConstant(0));
|
||||
if (Op1.getOpcode() != ISD::UNDEF)
|
||||
Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
|
||||
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op1),
|
||||
DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1),
|
||||
DAG.getIntPtrConstant(1));
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Val);
|
||||
return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val);
|
||||
}
|
||||
|
||||
/// SkipExtension - For a node that is either a SIGN_EXTEND, ZERO_EXTEND, or
|
||||
|
@ -3933,7 +3933,7 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
|
|||
case ISD::EH_SJLJ_DISPATCHSETUP: return LowerEH_SJLJ_DISPATCHSETUP(Op, DAG);
|
||||
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG,
|
||||
Subtarget);
|
||||
case ISD::BIT_CONVERT: return ExpandBIT_CONVERT(Op.getNode(), DAG);
|
||||
case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG);
|
||||
case ISD::SHL:
|
||||
case ISD::SRL:
|
||||
case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget);
|
||||
|
@ -3962,8 +3962,8 @@ void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
|
|||
default:
|
||||
llvm_unreachable("Don't know how to custom expand this!");
|
||||
break;
|
||||
case ISD::BIT_CONVERT:
|
||||
Res = ExpandBIT_CONVERT(N, DAG);
|
||||
case ISD::BITCAST:
|
||||
Res = ExpandBITCAST(N, DAG);
|
||||
break;
|
||||
case ISD::SRL:
|
||||
case ISD::SRA:
|
||||
|
@ -4497,7 +4497,7 @@ static SDValue PerformANDCombine(SDNode *N,
|
|||
DebugLoc dl = N->getDebugLoc();
|
||||
EVT VT = N->getValueType(0);
|
||||
SelectionDAG &DAG = DCI.DAG;
|
||||
|
||||
|
||||
APInt SplatBits, SplatUndef;
|
||||
unsigned SplatBitSize;
|
||||
bool HasAnyUndefs;
|
||||
|
@ -4507,17 +4507,17 @@ static SDValue PerformANDCombine(SDNode *N,
|
|||
EVT VbicVT;
|
||||
SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(),
|
||||
SplatUndef.getZExtValue(), SplatBitSize,
|
||||
DAG, VbicVT, VT.is128BitVector(),
|
||||
DAG, VbicVT, VT.is128BitVector(),
|
||||
OtherModImm);
|
||||
if (Val.getNode()) {
|
||||
SDValue Input =
|
||||
DAG.getNode(ISD::BIT_CONVERT, dl, VbicVT, N->getOperand(0));
|
||||
DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0));
|
||||
SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vbic);
|
||||
return DAG.getNode(ISD::BITCAST, dl, VT, Vbic);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return SDValue();
|
||||
}
|
||||
|
||||
|
@ -4530,7 +4530,7 @@ static SDValue PerformORCombine(SDNode *N,
|
|||
DebugLoc dl = N->getDebugLoc();
|
||||
EVT VT = N->getValueType(0);
|
||||
SelectionDAG &DAG = DCI.DAG;
|
||||
|
||||
|
||||
APInt SplatBits, SplatUndef;
|
||||
unsigned SplatBitSize;
|
||||
bool HasAnyUndefs;
|
||||
|
@ -4544,9 +4544,9 @@ static SDValue PerformORCombine(SDNode *N,
|
|||
OtherModImm);
|
||||
if (Val.getNode()) {
|
||||
SDValue Input =
|
||||
DAG.getNode(ISD::BIT_CONVERT, dl, VorrVT, N->getOperand(0));
|
||||
DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0));
|
||||
SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vorr);
|
||||
return DAG.getNode(ISD::BITCAST, dl, VT, Vorr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4640,7 +4640,7 @@ static SDValue PerformORCombine(SDNode *N,
|
|||
DCI.CombineTo(N, Res, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return SDValue();
|
||||
}
|
||||
|
||||
|
@ -4661,14 +4661,14 @@ static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) {
|
|||
// N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X)
|
||||
SDValue Op0 = N->getOperand(0);
|
||||
SDValue Op1 = N->getOperand(1);
|
||||
if (Op0.getOpcode() == ISD::BIT_CONVERT)
|
||||
if (Op0.getOpcode() == ISD::BITCAST)
|
||||
Op0 = Op0.getOperand(0);
|
||||
if (Op1.getOpcode() == ISD::BIT_CONVERT)
|
||||
if (Op1.getOpcode() == ISD::BITCAST)
|
||||
Op1 = Op1.getOperand(0);
|
||||
if (Op0.getOpcode() == ARMISD::VMOVRRD &&
|
||||
Op0.getNode() == Op1.getNode() &&
|
||||
Op0.getResNo() == 0 && Op1.getResNo() == 1)
|
||||
return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(),
|
||||
return DAG.getNode(ISD::BITCAST, N->getDebugLoc(),
|
||||
N->getValueType(0), Op0.getOperand(0));
|
||||
return SDValue();
|
||||
}
|
||||
|
@ -4748,7 +4748,7 @@ static SDValue PerformVDUPLANECombine(SDNode *N, SelectionDAG &DAG) {
|
|||
EVT VT = N->getValueType(0);
|
||||
|
||||
// Ignore bit_converts.
|
||||
while (Op.getOpcode() == ISD::BIT_CONVERT)
|
||||
while (Op.getOpcode() == ISD::BITCAST)
|
||||
Op = Op.getOperand(0);
|
||||
if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM)
|
||||
return SDValue();
|
||||
|
@ -4763,7 +4763,7 @@ static SDValue PerformVDUPLANECombine(SDNode *N, SelectionDAG &DAG) {
|
|||
if (EltSize > VT.getVectorElementType().getSizeInBits())
|
||||
return SDValue();
|
||||
|
||||
return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), VT, Op);
|
||||
return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op);
|
||||
}
|
||||
|
||||
/// getVShiftImm - Check if this is a valid build_vector for the immediate
|
||||
|
@ -4771,7 +4771,7 @@ static SDValue PerformVDUPLANECombine(SDNode *N, SelectionDAG &DAG) {
|
|||
/// build_vector must have the same constant integer value.
|
||||
static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
|
||||
// Ignore bit_converts.
|
||||
while (Op.getOpcode() == ISD::BIT_CONVERT)
|
||||
while (Op.getOpcode() == ISD::BITCAST)
|
||||
Op = Op.getOperand(0);
|
||||
BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
|
||||
APInt SplatBits, SplatUndef;
|
||||
|
@ -5935,7 +5935,7 @@ bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
|
|||
return false;
|
||||
}
|
||||
|
||||
/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
|
||||
/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
|
||||
/// MemIntrinsicNodes. The associated MachineMemOperands record the alignment
|
||||
/// specified in the intrinsic calls.
|
||||
bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
|
||||
|
|
|
@ -125,7 +125,7 @@ AlphaTargetLowering::AlphaTargetLowering(TargetMachine &TM)
|
|||
|
||||
setOperationAction(ISD::SETCC, MVT::f32, Promote);
|
||||
|
||||
setOperationAction(ISD::BIT_CONVERT, MVT::f32, Promote);
|
||||
setOperationAction(ISD::BITCAST, MVT::f32, Promote);
|
||||
|
||||
setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
|
||||
|
||||
|
@ -616,7 +616,7 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op,
|
|||
"Unhandled SINT_TO_FP type in custom expander!");
|
||||
SDValue LD;
|
||||
bool isDouble = Op.getValueType() == MVT::f64;
|
||||
LD = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op.getOperand(0));
|
||||
LD = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op.getOperand(0));
|
||||
SDValue FP = DAG.getNode(isDouble?AlphaISD::CVTQT_:AlphaISD::CVTQS_, dl,
|
||||
isDouble?MVT::f64:MVT::f32, LD);
|
||||
return FP;
|
||||
|
@ -630,7 +630,7 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op,
|
|||
|
||||
src = DAG.getNode(AlphaISD::CVTTQ_, dl, MVT::f64, src);
|
||||
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, src);
|
||||
return DAG.getNode(ISD::BITCAST, dl, MVT::i64, src);
|
||||
}
|
||||
case ISD::ConstantPool: {
|
||||
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
|
||||
|
@ -648,11 +648,11 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op,
|
|||
case ISD::GlobalAddress: {
|
||||
GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
|
||||
const GlobalValue *GV = GSDN->getGlobal();
|
||||
SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i64,
|
||||
SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i64,
|
||||
GSDN->getOffset());
|
||||
// FIXME there isn't really any debug info here
|
||||
|
||||
// if (!GV->hasWeakLinkage() && !GV->isDeclaration()
|
||||
// if (!GV->hasWeakLinkage() && !GV->isDeclaration()
|
||||
// && !GV->hasLinkOnceLinkage()) {
|
||||
if (GV->hasLocalLinkage()) {
|
||||
SDValue Hi = DAG.getNode(AlphaISD::GPRelHi, dl, MVT::i64, GA,
|
||||
|
@ -727,7 +727,7 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op,
|
|||
SDValue Val = DAG.getLoad(getPointerTy(), dl, Chain, SrcP,
|
||||
MachinePointerInfo(SrcS),
|
||||
false, false, 0);
|
||||
SDValue Result = DAG.getStore(Val.getValue(1), dl, Val, DestP,
|
||||
SDValue Result = DAG.getStore(Val.getValue(1), dl, Val, DestP,
|
||||
MachinePointerInfo(DestS),
|
||||
false, false, 0);
|
||||
SDValue NP = DAG.getNode(ISD::ADD, dl, MVT::i64, SrcP,
|
||||
|
@ -779,7 +779,7 @@ void AlphaTargetLowering::ReplaceNodeResults(SDNode *N,
|
|||
|
||||
SDValue Chain, DataPtr;
|
||||
LowerVAARG(N, Chain, DataPtr, DAG);
|
||||
SDValue Res = DAG.getLoad(N->getValueType(0), dl, Chain, DataPtr,
|
||||
SDValue Res = DAG.getLoad(N->getValueType(0), dl, Chain, DataPtr,
|
||||
MachinePointerInfo(),
|
||||
false, false, 0);
|
||||
Results.push_back(Res);
|
||||
|
|
|
@ -213,7 +213,7 @@ namespace {
|
|||
unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
|
||||
SDValue CGPoolOffset =
|
||||
SPU::LowerConstantPool(CPIdx, *CurDAG, TM);
|
||||
|
||||
|
||||
HandleSDNode Dummy(CurDAG->getLoad(vecVT, dl,
|
||||
CurDAG->getEntryNode(), CGPoolOffset,
|
||||
MachinePointerInfo::getConstantPool(),
|
||||
|
@ -308,9 +308,9 @@ namespace {
|
|||
assert(II && "No InstrInfo?");
|
||||
return new SPUHazardRecognizer(*II);
|
||||
}
|
||||
|
||||
|
||||
private:
|
||||
SDValue getRC( MVT );
|
||||
SDValue getRC( MVT );
|
||||
|
||||
// Include the pieces autogenerated from the target description.
|
||||
#include "SPUGenDAGISel.inc"
|
||||
|
@ -512,8 +512,8 @@ SPUDAGToDAGISel::DFormAddressPredicate(SDNode *Op, SDValue N, SDValue &Base,
|
|||
Base = CurDAG->getTargetConstant(0, N.getValueType());
|
||||
Index = N;
|
||||
return true;
|
||||
} else if (Opc == ISD::Register
|
||||
||Opc == ISD::CopyFromReg
|
||||
} else if (Opc == ISD::Register
|
||||
||Opc == ISD::CopyFromReg
|
||||
||Opc == ISD::UNDEF
|
||||
||Opc == ISD::Constant) {
|
||||
unsigned OpOpc = Op->getOpcode();
|
||||
|
@ -574,7 +574,7 @@ SPUDAGToDAGISel::SelectXFormAddr(SDNode *Op, SDValue N, SDValue &Base,
|
|||
}
|
||||
|
||||
/*!
|
||||
Utility function to use with COPY_TO_REGCLASS instructions. Returns a SDValue
|
||||
Utility function to use with COPY_TO_REGCLASS instructions. Returns a SDValue
|
||||
to be used as the last parameter of a
|
||||
CurDAG->getMachineNode(COPY_TO_REGCLASS,..., ) function call
|
||||
\arg VT the value type for which we want a register class
|
||||
|
@ -582,19 +582,19 @@ CurDAG->getMachineNode(COPY_TO_REGCLASS,..., ) function call
|
|||
SDValue SPUDAGToDAGISel::getRC( MVT VT ) {
|
||||
switch( VT.SimpleTy ) {
|
||||
case MVT::i8:
|
||||
return CurDAG->getTargetConstant(SPU::R8CRegClass.getID(), MVT::i32);
|
||||
break;
|
||||
return CurDAG->getTargetConstant(SPU::R8CRegClass.getID(), MVT::i32);
|
||||
break;
|
||||
case MVT::i16:
|
||||
return CurDAG->getTargetConstant(SPU::R16CRegClass.getID(), MVT::i32);
|
||||
break;
|
||||
return CurDAG->getTargetConstant(SPU::R16CRegClass.getID(), MVT::i32);
|
||||
break;
|
||||
case MVT::i32:
|
||||
return CurDAG->getTargetConstant(SPU::R32CRegClass.getID(), MVT::i32);
|
||||
break;
|
||||
return CurDAG->getTargetConstant(SPU::R32CRegClass.getID(), MVT::i32);
|
||||
break;
|
||||
case MVT::f32:
|
||||
return CurDAG->getTargetConstant(SPU::R32FPRegClass.getID(), MVT::i32);
|
||||
break;
|
||||
return CurDAG->getTargetConstant(SPU::R32FPRegClass.getID(), MVT::i32);
|
||||
break;
|
||||
case MVT::i64:
|
||||
return CurDAG->getTargetConstant(SPU::R64CRegClass.getID(), MVT::i32);
|
||||
return CurDAG->getTargetConstant(SPU::R64CRegClass.getID(), MVT::i32);
|
||||
break;
|
||||
case MVT::v16i8:
|
||||
case MVT::v8i16:
|
||||
|
@ -602,7 +602,7 @@ SDValue SPUDAGToDAGISel::getRC( MVT VT ) {
|
|||
case MVT::v4f32:
|
||||
case MVT::v2i64:
|
||||
case MVT::v2f64:
|
||||
return CurDAG->getTargetConstant(SPU::VECREGRegClass.getID(), MVT::i32);
|
||||
return CurDAG->getTargetConstant(SPU::VECREGRegClass.getID(), MVT::i32);
|
||||
break;
|
||||
default:
|
||||
assert( false && "add a new case here" );
|
||||
|
@ -654,7 +654,7 @@ SPUDAGToDAGISel::Select(SDNode *N) {
|
|||
EVT Op0VT = Op0.getValueType();
|
||||
EVT Op0VecVT = EVT::getVectorVT(*CurDAG->getContext(),
|
||||
Op0VT, (128 / Op0VT.getSizeInBits()));
|
||||
EVT OpVecVT = EVT::getVectorVT(*CurDAG->getContext(),
|
||||
EVT OpVecVT = EVT::getVectorVT(*CurDAG->getContext(),
|
||||
OpVT, (128 / OpVT.getSizeInBits()));
|
||||
SDValue shufMask;
|
||||
|
||||
|
@ -688,19 +688,19 @@ SPUDAGToDAGISel::Select(SDNode *N) {
|
|||
}
|
||||
|
||||
SDNode *shufMaskLoad = emitBuildVector(shufMask.getNode());
|
||||
|
||||
|
||||
HandleSDNode PromoteScalar(CurDAG->getNode(SPUISD::PREFSLOT2VEC, dl,
|
||||
Op0VecVT, Op0));
|
||||
|
||||
|
||||
SDValue PromScalar;
|
||||
if (SDNode *N = SelectCode(PromoteScalar.getValue().getNode()))
|
||||
PromScalar = SDValue(N, 0);
|
||||
else
|
||||
PromScalar = PromoteScalar.getValue();
|
||||
|
||||
|
||||
SDValue zextShuffle =
|
||||
CurDAG->getNode(SPUISD::SHUFB, dl, OpVecVT,
|
||||
PromScalar, PromScalar,
|
||||
PromScalar, PromScalar,
|
||||
SDValue(shufMaskLoad, 0));
|
||||
|
||||
HandleSDNode Dummy2(zextShuffle);
|
||||
|
@ -710,7 +710,7 @@ SPUDAGToDAGISel::Select(SDNode *N) {
|
|||
zextShuffle = Dummy2.getValue();
|
||||
HandleSDNode Dummy(CurDAG->getNode(SPUISD::VEC2PREFSLOT, dl, OpVT,
|
||||
zextShuffle));
|
||||
|
||||
|
||||
CurDAG->ReplaceAllUsesWith(N, Dummy.getValue().getNode());
|
||||
SelectCode(Dummy.getValue().getNode());
|
||||
return Dummy.getValue().getNode();
|
||||
|
@ -721,7 +721,7 @@ SPUDAGToDAGISel::Select(SDNode *N) {
|
|||
HandleSDNode Dummy(CurDAG->getNode(SPUISD::ADD64_MARKER, dl, OpVT,
|
||||
N->getOperand(0), N->getOperand(1),
|
||||
SDValue(CGLoad, 0)));
|
||||
|
||||
|
||||
CurDAG->ReplaceAllUsesWith(N, Dummy.getValue().getNode());
|
||||
if (SDNode *N = SelectCode(Dummy.getValue().getNode()))
|
||||
return N;
|
||||
|
@ -733,7 +733,7 @@ SPUDAGToDAGISel::Select(SDNode *N) {
|
|||
HandleSDNode Dummy(CurDAG->getNode(SPUISD::SUB64_MARKER, dl, OpVT,
|
||||
N->getOperand(0), N->getOperand(1),
|
||||
SDValue(CGLoad, 0)));
|
||||
|
||||
|
||||
CurDAG->ReplaceAllUsesWith(N, Dummy.getValue().getNode());
|
||||
if (SDNode *N = SelectCode(Dummy.getValue().getNode()))
|
||||
return N;
|
||||
|
@ -847,12 +847,12 @@ SPUDAGToDAGISel::Select(SDNode *N) {
|
|||
SDValue Arg = N->getOperand(0);
|
||||
SDValue Chain = N->getOperand(1);
|
||||
SDNode *Result;
|
||||
|
||||
|
||||
Result = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl, VT,
|
||||
MVT::Other, Arg,
|
||||
getRC( VT.getSimpleVT()), Chain);
|
||||
return Result;
|
||||
|
||||
|
||||
} else if (Opc == SPUISD::IndirectAddr) {
|
||||
// Look at the operands: SelectCode() will catch the cases that aren't
|
||||
// specifically handled here.
|
||||
|
@ -878,10 +878,10 @@ SPUDAGToDAGISel::Select(SDNode *N) {
|
|||
NewOpc = SPU::AIr32;
|
||||
Ops[1] = Op1;
|
||||
} else {
|
||||
Ops[1] = SDValue(CurDAG->getMachineNode(SPU::ILr32, dl,
|
||||
N->getValueType(0),
|
||||
Ops[1] = SDValue(CurDAG->getMachineNode(SPU::ILr32, dl,
|
||||
N->getValueType(0),
|
||||
Op1),
|
||||
0);
|
||||
0);
|
||||
}
|
||||
}
|
||||
Ops[0] = Op0;
|
||||
|
@ -913,7 +913,7 @@ SPUDAGToDAGISel::Select(SDNode *N) {
|
|||
SDNode *
|
||||
SPUDAGToDAGISel::SelectSHLi64(SDNode *N, EVT OpVT) {
|
||||
SDValue Op0 = N->getOperand(0);
|
||||
EVT VecVT = EVT::getVectorVT(*CurDAG->getContext(),
|
||||
EVT VecVT = EVT::getVectorVT(*CurDAG->getContext(),
|
||||
OpVT, (128 / OpVT.getSizeInBits()));
|
||||
SDValue ShiftAmt = N->getOperand(1);
|
||||
EVT ShiftAmtVT = ShiftAmt.getValueType();
|
||||
|
@ -966,7 +966,7 @@ SPUDAGToDAGISel::SelectSHLi64(SDNode *N, EVT OpVT) {
|
|||
SDValue(Shift, 0), SDValue(Bits, 0));
|
||||
}
|
||||
|
||||
return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
|
||||
return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
|
||||
OpVT, SDValue(Shift, 0), getRC(MVT::i64));
|
||||
}
|
||||
|
||||
|
@ -1035,7 +1035,7 @@ SPUDAGToDAGISel::SelectSRLi64(SDNode *N, EVT OpVT) {
|
|||
SDValue(Shift, 0), SDValue(Bits, 0));
|
||||
}
|
||||
|
||||
return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
|
||||
return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
|
||||
OpVT, SDValue(Shift, 0), getRC(MVT::i64));
|
||||
}
|
||||
|
||||
|
@ -1050,14 +1050,14 @@ SPUDAGToDAGISel::SelectSRLi64(SDNode *N, EVT OpVT) {
|
|||
SDNode *
|
||||
SPUDAGToDAGISel::SelectSRAi64(SDNode *N, EVT OpVT) {
|
||||
// Promote Op0 to vector
|
||||
EVT VecVT = EVT::getVectorVT(*CurDAG->getContext(),
|
||||
EVT VecVT = EVT::getVectorVT(*CurDAG->getContext(),
|
||||
OpVT, (128 / OpVT.getSizeInBits()));
|
||||
SDValue ShiftAmt = N->getOperand(1);
|
||||
EVT ShiftAmtVT = ShiftAmt.getValueType();
|
||||
DebugLoc dl = N->getDebugLoc();
|
||||
|
||||
SDNode *VecOp0 =
|
||||
CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
|
||||
CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
|
||||
VecVT, N->getOperand(0), getRC(MVT::v2i64));
|
||||
|
||||
SDValue SignRotAmt = CurDAG->getTargetConstant(31, ShiftAmtVT);
|
||||
|
@ -1065,7 +1065,7 @@ SPUDAGToDAGISel::SelectSRAi64(SDNode *N, EVT OpVT) {
|
|||
CurDAG->getMachineNode(SPU::ROTMAIv2i64_i32, dl, MVT::v2i64,
|
||||
SDValue(VecOp0, 0), SignRotAmt);
|
||||
SDNode *UpperHalfSign =
|
||||
CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
|
||||
CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
|
||||
MVT::i32, SDValue(SignRot, 0), getRC(MVT::i32));
|
||||
|
||||
SDNode *UpperHalfSignMask =
|
||||
|
@ -1113,7 +1113,7 @@ SPUDAGToDAGISel::SelectSRAi64(SDNode *N, EVT OpVT) {
|
|||
SDValue(Shift, 0), SDValue(NegShift, 0));
|
||||
}
|
||||
|
||||
return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
|
||||
return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
|
||||
OpVT, SDValue(Shift, 0), getRC(MVT::i64));
|
||||
}
|
||||
|
||||
|
@ -1135,7 +1135,7 @@ SDNode *SPUDAGToDAGISel::SelectI64Constant(uint64_t Value64, EVT OpVT,
|
|||
// Here's where it gets interesting, because we have to parse out the
|
||||
// subtree handed back in i64vec:
|
||||
|
||||
if (i64vec.getOpcode() == ISD::BIT_CONVERT) {
|
||||
if (i64vec.getOpcode() == ISD::BITCAST) {
|
||||
// The degenerate case where the upper and lower bits in the splat are
|
||||
// identical:
|
||||
SDValue Op0 = i64vec.getOperand(0);
|
||||
|
@ -1149,7 +1149,7 @@ SDNode *SPUDAGToDAGISel::SelectI64Constant(uint64_t Value64, EVT OpVT,
|
|||
SDValue rhs = i64vec.getOperand(1);
|
||||
SDValue shufmask = i64vec.getOperand(2);
|
||||
|
||||
if (lhs.getOpcode() == ISD::BIT_CONVERT) {
|
||||
if (lhs.getOpcode() == ISD::BITCAST) {
|
||||
ReplaceUses(lhs, lhs.getOperand(0));
|
||||
lhs = lhs.getOperand(0);
|
||||
}
|
||||
|
@ -1158,7 +1158,7 @@ SDNode *SPUDAGToDAGISel::SelectI64Constant(uint64_t Value64, EVT OpVT,
|
|||
? lhs.getNode()
|
||||
: emitBuildVector(lhs.getNode()));
|
||||
|
||||
if (rhs.getOpcode() == ISD::BIT_CONVERT) {
|
||||
if (rhs.getOpcode() == ISD::BITCAST) {
|
||||
ReplaceUses(rhs, rhs.getOperand(0));
|
||||
rhs = rhs.getOperand(0);
|
||||
}
|
||||
|
@ -1167,7 +1167,7 @@ SDNode *SPUDAGToDAGISel::SelectI64Constant(uint64_t Value64, EVT OpVT,
|
|||
? rhs.getNode()
|
||||
: emitBuildVector(rhs.getNode()));
|
||||
|
||||
if (shufmask.getOpcode() == ISD::BIT_CONVERT) {
|
||||
if (shufmask.getOpcode() == ISD::BITCAST) {
|
||||
ReplaceUses(shufmask, shufmask.getOperand(0));
|
||||
shufmask = shufmask.getOperand(0);
|
||||
}
|
||||
|
@ -1183,8 +1183,8 @@ SDNode *SPUDAGToDAGISel::SelectI64Constant(uint64_t Value64, EVT OpVT,
|
|||
HandleSDNode Dummy(shufNode);
|
||||
SDNode *SN = SelectCode(Dummy.getValue().getNode());
|
||||
if (SN == 0) SN = Dummy.getValue().getNode();
|
||||
|
||||
return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
|
||||
|
||||
return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl,
|
||||
OpVT, SDValue(SN, 0), getRC(MVT::i64));
|
||||
} else if (i64vec.getOpcode() == ISD::BUILD_VECTOR) {
|
||||
return CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, dl, OpVT,
|
||||
|
|
|
@ -45,9 +45,9 @@ namespace {
|
|||
// Byte offset of the preferred slot (counted from the MSB)
|
||||
int prefslotOffset(EVT VT) {
|
||||
int retval=0;
|
||||
if (VT==MVT::i1) retval=3;
|
||||
if (VT==MVT::i8) retval=3;
|
||||
if (VT==MVT::i16) retval=2;
|
||||
if (VT==MVT::i1) retval=3;
|
||||
if (VT==MVT::i8) retval=3;
|
||||
if (VT==MVT::i16) retval=2;
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
@ -348,10 +348,10 @@ SPUTargetLowering::SPUTargetLowering(SPUTargetMachine &TM)
|
|||
setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
|
||||
setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
|
||||
|
||||
setOperationAction(ISD::BIT_CONVERT, MVT::i32, Legal);
|
||||
setOperationAction(ISD::BIT_CONVERT, MVT::f32, Legal);
|
||||
setOperationAction(ISD::BIT_CONVERT, MVT::i64, Legal);
|
||||
setOperationAction(ISD::BIT_CONVERT, MVT::f64, Legal);
|
||||
setOperationAction(ISD::BITCAST, MVT::i32, Legal);
|
||||
setOperationAction(ISD::BITCAST, MVT::f32, Legal);
|
||||
setOperationAction(ISD::BITCAST, MVT::i64, Legal);
|
||||
setOperationAction(ISD::BITCAST, MVT::f64, Legal);
|
||||
|
||||
// We cannot sextinreg(i1). Expand to shifts.
|
||||
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
|
||||
|
@ -550,13 +550,13 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
|
|||
(128 / InVT.getSizeInBits()));
|
||||
|
||||
// two sanity checks
|
||||
assert( LN->getAddressingMode() == ISD::UNINDEXED
|
||||
assert( LN->getAddressingMode() == ISD::UNINDEXED
|
||||
&& "we should get only UNINDEXED adresses");
|
||||
// clean aligned loads can be selected as-is
|
||||
if (InVT.getSizeInBits() == 128 && alignment == 16)
|
||||
return SDValue();
|
||||
|
||||
// Get pointerinfos to the memory chunk(s) that contain the data to load
|
||||
// Get pointerinfos to the memory chunk(s) that contain the data to load
|
||||
uint64_t mpi_offset = LN->getPointerInfo().Offset;
|
||||
mpi_offset -= mpi_offset%16;
|
||||
MachinePointerInfo lowMemPtr(LN->getPointerInfo().V, mpi_offset);
|
||||
|
@ -649,7 +649,7 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
|
|||
SDValue low = DAG.getLoad(MVT::i128, dl, the_chain, basePtr,
|
||||
lowMemPtr,
|
||||
LN->isVolatile(), LN->isNonTemporal(), 16);
|
||||
|
||||
|
||||
// When the size is not greater than alignment we get all data with just
|
||||
// one load
|
||||
if (alignment >= InVT.getSizeInBits()/8) {
|
||||
|
@ -662,30 +662,30 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
|
|||
|
||||
// Convert the loaded v16i8 vector to the appropriate vector type
|
||||
// specified by the operand:
|
||||
EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
|
||||
EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
|
||||
InVT, (128 / InVT.getSizeInBits()));
|
||||
result = DAG.getNode(SPUISD::VEC2PREFSLOT, dl, InVT,
|
||||
DAG.getNode(ISD::BIT_CONVERT, dl, vecVT, result));
|
||||
DAG.getNode(ISD::BITCAST, dl, vecVT, result));
|
||||
}
|
||||
// When alignment is less than the size, we might need (known only at
|
||||
// run-time) two loads
|
||||
// TODO: if the memory address is composed only from constants, we have
|
||||
// TODO: if the memory address is composed only from constants, we have
|
||||
// extra kowledge, and might avoid the second load
|
||||
else {
|
||||
// storage position offset from lower 16 byte aligned memory chunk
|
||||
SDValue offset = DAG.getNode(ISD::AND, dl, MVT::i32,
|
||||
SDValue offset = DAG.getNode(ISD::AND, dl, MVT::i32,
|
||||
basePtr, DAG.getConstant( 0xf, MVT::i32 ) );
|
||||
// 16 - offset
|
||||
SDValue offset_compl = DAG.getNode(ISD::SUB, dl, MVT::i32,
|
||||
SDValue offset_compl = DAG.getNode(ISD::SUB, dl, MVT::i32,
|
||||
DAG.getConstant( 16, MVT::i32),
|
||||
offset );
|
||||
// get a registerfull of ones. (this implementation is a workaround: LLVM
|
||||
// get a registerfull of ones. (this implementation is a workaround: LLVM
|
||||
// cannot handle 128 bit signed int constants)
|
||||
SDValue ones = DAG.getConstant(-1, MVT::v4i32 );
|
||||
ones = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, ones);
|
||||
ones = DAG.getNode(ISD::BITCAST, dl, MVT::i128, ones);
|
||||
|
||||
SDValue high = DAG.getLoad(MVT::i128, dl, the_chain,
|
||||
DAG.getNode(ISD::ADD, dl, PtrVT,
|
||||
DAG.getNode(ISD::ADD, dl, PtrVT,
|
||||
basePtr,
|
||||
DAG.getConstant(16, PtrVT)),
|
||||
highMemPtr,
|
||||
|
@ -695,20 +695,20 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
|
|||
high.getValue(1));
|
||||
|
||||
// Shift the (possible) high part right to compensate the misalignemnt.
|
||||
// if there is no highpart (i.e. value is i64 and offset is 4), this
|
||||
// if there is no highpart (i.e. value is i64 and offset is 4), this
|
||||
// will zero out the high value.
|
||||
high = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, high,
|
||||
high = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, high,
|
||||
DAG.getNode(ISD::SUB, dl, MVT::i32,
|
||||
DAG.getConstant( 16, MVT::i32),
|
||||
offset
|
||||
));
|
||||
|
||||
|
||||
// Shift the low similarily
|
||||
// TODO: add SPUISD::SHL_BYTES
|
||||
low = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, low, offset );
|
||||
|
||||
// Merge the two parts
|
||||
result = DAG.getNode(ISD::BIT_CONVERT, dl, vecVT,
|
||||
result = DAG.getNode(ISD::BITCAST, dl, vecVT,
|
||||
DAG.getNode(ISD::OR, dl, MVT::i128, low, high));
|
||||
|
||||
if (!InVT.isVector()) {
|
||||
|
@ -759,7 +759,7 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
|
|||
SDValue result;
|
||||
EVT vecVT = StVT.isVector()? StVT: EVT::getVectorVT(*DAG.getContext(), StVT,
|
||||
(128 / StVT.getSizeInBits()));
|
||||
// Get pointerinfos to the memory chunk(s) that contain the data to load
|
||||
// Get pointerinfos to the memory chunk(s) that contain the data to load
|
||||
uint64_t mpi_offset = SN->getPointerInfo().Offset;
|
||||
mpi_offset -= mpi_offset%16;
|
||||
MachinePointerInfo lowMemPtr(SN->getPointerInfo().V, mpi_offset);
|
||||
|
@ -767,7 +767,7 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
|
|||
|
||||
|
||||
// two sanity checks
|
||||
assert( SN->getAddressingMode() == ISD::UNINDEXED
|
||||
assert( SN->getAddressingMode() == ISD::UNINDEXED
|
||||
&& "we should get only UNINDEXED adresses");
|
||||
// clean aligned loads can be selected as-is
|
||||
if (StVT.getSizeInBits() == 128 && alignment == 16)
|
||||
|
@ -876,12 +876,12 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
|
|||
|
||||
SDValue insertEltOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, vecVT,
|
||||
insertEltOffs);
|
||||
SDValue vectorizeOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, vecVT,
|
||||
SDValue vectorizeOp = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, vecVT,
|
||||
theValue);
|
||||
|
||||
result = DAG.getNode(SPUISD::SHUFB, dl, vecVT,
|
||||
vectorizeOp, low,
|
||||
DAG.getNode(ISD::BIT_CONVERT, dl,
|
||||
DAG.getNode(ISD::BITCAST, dl,
|
||||
MVT::v4i32, insertEltOp));
|
||||
|
||||
result = DAG.getStore(the_chain, dl, result, basePtr,
|
||||
|
@ -892,59 +892,59 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
|
|||
}
|
||||
// do the store when it might cross the 16 byte memory access boundary.
|
||||
else {
|
||||
// TODO issue a warning if SN->isVolatile()== true? This is likely not
|
||||
// TODO issue a warning if SN->isVolatile()== true? This is likely not
|
||||
// what the user wanted.
|
||||
|
||||
|
||||
// address offset from nearest lower 16byte alinged address
|
||||
SDValue offset = DAG.getNode(ISD::AND, dl, MVT::i32,
|
||||
SN->getBasePtr(),
|
||||
SDValue offset = DAG.getNode(ISD::AND, dl, MVT::i32,
|
||||
SN->getBasePtr(),
|
||||
DAG.getConstant(0xf, MVT::i32));
|
||||
// 16 - offset
|
||||
SDValue offset_compl = DAG.getNode(ISD::SUB, dl, MVT::i32,
|
||||
SDValue offset_compl = DAG.getNode(ISD::SUB, dl, MVT::i32,
|
||||
DAG.getConstant( 16, MVT::i32),
|
||||
offset);
|
||||
SDValue hi_shift = DAG.getNode(ISD::SUB, dl, MVT::i32,
|
||||
SDValue hi_shift = DAG.getNode(ISD::SUB, dl, MVT::i32,
|
||||
DAG.getConstant( VT.getSizeInBits()/8,
|
||||
MVT::i32),
|
||||
offset_compl);
|
||||
// 16 - sizeof(Value)
|
||||
SDValue surplus = DAG.getNode(ISD::SUB, dl, MVT::i32,
|
||||
SDValue surplus = DAG.getNode(ISD::SUB, dl, MVT::i32,
|
||||
DAG.getConstant( 16, MVT::i32),
|
||||
DAG.getConstant( VT.getSizeInBits()/8,
|
||||
MVT::i32));
|
||||
// get a registerfull of ones
|
||||
// get a registerfull of ones
|
||||
SDValue ones = DAG.getConstant(-1, MVT::v4i32);
|
||||
ones = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, ones);
|
||||
ones = DAG.getNode(ISD::BITCAST, dl, MVT::i128, ones);
|
||||
|
||||
// Create the 128 bit masks that have ones where the data to store is
|
||||
// located.
|
||||
SDValue lowmask, himask;
|
||||
// if the value to store don't fill up the an entire 128 bits, zero
|
||||
SDValue lowmask, himask;
|
||||
// if the value to store don't fill up the an entire 128 bits, zero
|
||||
// out the last bits of the mask so that only the value we want to store
|
||||
// is masked.
|
||||
// is masked.
|
||||
// this is e.g. in the case of store i32, align 2
|
||||
if (!VT.isVector()){
|
||||
Value = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, vecVT, Value);
|
||||
lowmask = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, ones, surplus);
|
||||
lowmask = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, lowmask,
|
||||
lowmask = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, lowmask,
|
||||
surplus);
|
||||
Value = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, Value);
|
||||
Value = DAG.getNode(ISD::BITCAST, dl, MVT::i128, Value);
|
||||
Value = DAG.getNode(ISD::AND, dl, MVT::i128, Value, lowmask);
|
||||
|
||||
|
||||
}
|
||||
else {
|
||||
lowmask = ones;
|
||||
Value = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, Value);
|
||||
Value = DAG.getNode(ISD::BITCAST, dl, MVT::i128, Value);
|
||||
}
|
||||
// this will zero, if there are no data that goes to the high quad
|
||||
himask = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, lowmask,
|
||||
// this will zero, if there are no data that goes to the high quad
|
||||
himask = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, lowmask,
|
||||
offset_compl);
|
||||
lowmask = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, lowmask,
|
||||
lowmask = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, lowmask,
|
||||
offset);
|
||||
|
||||
|
||||
// Load in the old data and zero out the parts that will be overwritten with
|
||||
// the new data to store.
|
||||
SDValue hi = DAG.getLoad(MVT::i128, dl, the_chain,
|
||||
SDValue hi = DAG.getLoad(MVT::i128, dl, the_chain,
|
||||
DAG.getNode(ISD::ADD, dl, PtrVT, basePtr,
|
||||
DAG.getConstant( 16, PtrVT)),
|
||||
highMemPtr,
|
||||
|
@ -952,40 +952,40 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) {
|
|||
the_chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(1),
|
||||
hi.getValue(1));
|
||||
|
||||
low = DAG.getNode(ISD::AND, dl, MVT::i128,
|
||||
DAG.getNode( ISD::BIT_CONVERT, dl, MVT::i128, low),
|
||||
low = DAG.getNode(ISD::AND, dl, MVT::i128,
|
||||
DAG.getNode( ISD::BITCAST, dl, MVT::i128, low),
|
||||
DAG.getNode( ISD::XOR, dl, MVT::i128, lowmask, ones));
|
||||
hi = DAG.getNode(ISD::AND, dl, MVT::i128,
|
||||
DAG.getNode( ISD::BIT_CONVERT, dl, MVT::i128, hi),
|
||||
hi = DAG.getNode(ISD::AND, dl, MVT::i128,
|
||||
DAG.getNode( ISD::BITCAST, dl, MVT::i128, hi),
|
||||
DAG.getNode( ISD::XOR, dl, MVT::i128, himask, ones));
|
||||
|
||||
// Shift the Value to store into place. rlow contains the parts that go to
|
||||
// the lower memory chunk, rhi has the parts that go to the upper one.
|
||||
// the lower memory chunk, rhi has the parts that go to the upper one.
|
||||
SDValue rlow = DAG.getNode(SPUISD::SRL_BYTES, dl, MVT::i128, Value, offset);
|
||||
rlow = DAG.getNode(ISD::AND, dl, MVT::i128, rlow, lowmask);
|
||||
SDValue rhi = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, Value,
|
||||
SDValue rhi = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, Value,
|
||||
offset_compl);
|
||||
|
||||
// Merge the old data and the new data and store the results
|
||||
// Need to convert vectors here to integer as 'OR'ing floats assert
|
||||
rlow = DAG.getNode(ISD::OR, dl, MVT::i128,
|
||||
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, low),
|
||||
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, rlow));
|
||||
rhi = DAG.getNode(ISD::OR, dl, MVT::i128,
|
||||
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, hi),
|
||||
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, rhi));
|
||||
// Need to convert vectors here to integer as 'OR'ing floats assert
|
||||
rlow = DAG.getNode(ISD::OR, dl, MVT::i128,
|
||||
DAG.getNode(ISD::BITCAST, dl, MVT::i128, low),
|
||||
DAG.getNode(ISD::BITCAST, dl, MVT::i128, rlow));
|
||||
rhi = DAG.getNode(ISD::OR, dl, MVT::i128,
|
||||
DAG.getNode(ISD::BITCAST, dl, MVT::i128, hi),
|
||||
DAG.getNode(ISD::BITCAST, dl, MVT::i128, rhi));
|
||||
|
||||
low = DAG.getStore(the_chain, dl, rlow, basePtr,
|
||||
lowMemPtr,
|
||||
SN->isVolatile(), SN->isNonTemporal(), 16);
|
||||
hi = DAG.getStore(the_chain, dl, rhi,
|
||||
hi = DAG.getStore(the_chain, dl, rhi,
|
||||
DAG.getNode(ISD::ADD, dl, PtrVT, basePtr,
|
||||
DAG.getConstant( 16, PtrVT)),
|
||||
highMemPtr,
|
||||
SN->isVolatile(), SN->isNonTemporal(), 16);
|
||||
result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, low.getValue(0),
|
||||
hi.getValue(0));
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -1095,7 +1095,7 @@ LowerConstantFP(SDValue Op, SelectionDAG &DAG) {
|
|||
SDValue T = DAG.getConstant(dbits, MVT::i64);
|
||||
SDValue Tvec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T);
|
||||
return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT,
|
||||
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Tvec));
|
||||
DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Tvec));
|
||||
}
|
||||
|
||||
return SDValue();
|
||||
|
@ -1194,8 +1194,8 @@ SPUTargetLowering::LowerFormalArguments(SDValue Chain,
|
|||
|
||||
// vararg handling:
|
||||
if (isVarArg) {
|
||||
// FIXME: we should be able to query the argument registers from
|
||||
// tablegen generated code.
|
||||
// FIXME: we should be able to query the argument registers from
|
||||
// tablegen generated code.
|
||||
static const unsigned ArgRegs[] = {
|
||||
SPU::R3, SPU::R4, SPU::R5, SPU::R6, SPU::R7, SPU::R8, SPU::R9,
|
||||
SPU::R10, SPU::R11, SPU::R12, SPU::R13, SPU::R14, SPU::R15, SPU::R16,
|
||||
|
@ -1270,10 +1270,10 @@ SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
|||
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CallConv, isVarArg, getTargetMachine(), ArgLocs,
|
||||
*DAG.getContext());
|
||||
*DAG.getContext());
|
||||
// FIXME: allow for other calling conventions
|
||||
CCInfo.AnalyzeCallOperands(Outs, CCC_SPU);
|
||||
|
||||
|
||||
const unsigned NumArgRegs = ArgLocs.size();
|
||||
|
||||
|
||||
|
@ -1438,7 +1438,7 @@ SPUTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
|||
// If the call has results, copy the values out of the ret val registers.
|
||||
for (unsigned i = 0; i != RVLocs.size(); ++i) {
|
||||
CCValAssign VA = RVLocs[i];
|
||||
|
||||
|
||||
SDValue Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
|
||||
InFlag);
|
||||
Chain = Val.getValue(1);
|
||||
|
@ -1671,7 +1671,7 @@ LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
|
|||
&& "LowerBUILD_VECTOR: Unexpected floating point vector element.");
|
||||
// NOTE: pretend the constant is an integer. LLVM won't load FP constants
|
||||
SDValue T = DAG.getConstant(Value32, MVT::i32);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32,
|
||||
return DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,
|
||||
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, T,T,T,T));
|
||||
break;
|
||||
}
|
||||
|
@ -1681,7 +1681,7 @@ LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
|
|||
&& "LowerBUILD_VECTOR: 64-bit float vector size > 8 bytes.");
|
||||
// NOTE: pretend the constant is an integer. LLVM won't load FP constants
|
||||
SDValue T = DAG.getConstant(f64val, MVT::i64);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64,
|
||||
return DAG.getNode(ISD::BITCAST, dl, MVT::v2f64,
|
||||
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, T, T));
|
||||
break;
|
||||
}
|
||||
|
@ -1691,7 +1691,7 @@ LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) {
|
|||
SmallVector<SDValue, 8> Ops;
|
||||
|
||||
Ops.assign(8, DAG.getConstant(Value16, MVT::i16));
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
|
||||
return DAG.getNode(ISD::BITCAST, dl, VT,
|
||||
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i16, &Ops[0], Ops.size()));
|
||||
}
|
||||
case MVT::v8i16: {
|
||||
|
@ -1725,7 +1725,7 @@ SPU::LowerV2I64Splat(EVT OpVT, SelectionDAG& DAG, uint64_t SplatVal,
|
|||
if (upper == lower) {
|
||||
// Magic constant that can be matched by IL, ILA, et. al.
|
||||
SDValue Val = DAG.getTargetConstant(upper, MVT::i32);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, OpVT,
|
||||
return DAG.getNode(ISD::BITCAST, dl, OpVT,
|
||||
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
|
||||
Val, Val, Val, Val));
|
||||
} else {
|
||||
|
@ -1754,7 +1754,7 @@ SPU::LowerV2I64Splat(EVT OpVT, SelectionDAG& DAG, uint64_t SplatVal,
|
|||
// Create lower vector if not a special pattern
|
||||
if (!lower_special) {
|
||||
SDValue LO32C = DAG.getConstant(lower, MVT::i32);
|
||||
LO32 = DAG.getNode(ISD::BIT_CONVERT, dl, OpVT,
|
||||
LO32 = DAG.getNode(ISD::BITCAST, dl, OpVT,
|
||||
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
|
||||
LO32C, LO32C, LO32C, LO32C));
|
||||
}
|
||||
|
@ -1762,7 +1762,7 @@ SPU::LowerV2I64Splat(EVT OpVT, SelectionDAG& DAG, uint64_t SplatVal,
|
|||
// Create upper vector if not a special pattern
|
||||
if (!upper_special) {
|
||||
SDValue HI32C = DAG.getConstant(upper, MVT::i32);
|
||||
HI32 = DAG.getNode(ISD::BIT_CONVERT, dl, OpVT,
|
||||
HI32 = DAG.getNode(ISD::BITCAST, dl, OpVT,
|
||||
DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
|
||||
HI32C, HI32C, HI32C, HI32C));
|
||||
}
|
||||
|
@ -1846,7 +1846,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
|
|||
|
||||
if (EltVT == MVT::i8) {
|
||||
V2EltIdx0 = 16;
|
||||
maskVT = MVT::v16i8;
|
||||
maskVT = MVT::v16i8;
|
||||
} else if (EltVT == MVT::i16) {
|
||||
V2EltIdx0 = 8;
|
||||
maskVT = MVT::v8i16;
|
||||
|
@ -1862,7 +1862,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
|
|||
for (unsigned i = 0; i != MaxElts; ++i) {
|
||||
if (SVN->getMaskElt(i) < 0)
|
||||
continue;
|
||||
|
||||
|
||||
unsigned SrcElt = SVN->getMaskElt(i);
|
||||
|
||||
if (monotonic) {
|
||||
|
@ -1909,7 +1909,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
|
|||
SDValue Pointer = DAG.getNode(SPUISD::IndirectAddr, dl, PtrVT,
|
||||
DAG.getRegister(SPU::R1, PtrVT),
|
||||
DAG.getConstant(V2EltOffset, MVT::i32));
|
||||
SDValue ShufMaskOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl,
|
||||
SDValue ShufMaskOp = DAG.getNode(SPUISD::SHUFFLE_MASK, dl,
|
||||
maskVT, Pointer);
|
||||
|
||||
// Use shuffle mask in SHUFB synthetic instruction:
|
||||
|
@ -2173,7 +2173,7 @@ static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
|
|||
DAG.getRegister(SPU::R1, PtrVT),
|
||||
DAG.getConstant(Offset, PtrVT));
|
||||
// widen the mask when dealing with half vectors
|
||||
EVT maskVT = EVT::getVectorVT(*(DAG.getContext()), VT.getVectorElementType(),
|
||||
EVT maskVT = EVT::getVectorVT(*(DAG.getContext()), VT.getVectorElementType(),
|
||||
128/ VT.getVectorElementType().getSizeInBits());
|
||||
SDValue ShufMask = DAG.getNode(SPUISD::SHUFFLE_MASK, dl, maskVT, Pointer);
|
||||
|
||||
|
@ -2181,7 +2181,7 @@ static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
|
|||
DAG.getNode(SPUISD::SHUFB, dl, VT,
|
||||
DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, ValOp),
|
||||
VecOp,
|
||||
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4i32, ShufMask));
|
||||
DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, ShufMask));
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -2301,12 +2301,12 @@ LowerByteImmed(SDValue Op, SelectionDAG &DAG) {
|
|||
ConstVec = Op.getOperand(0);
|
||||
Arg = Op.getOperand(1);
|
||||
if (ConstVec.getNode()->getOpcode() != ISD::BUILD_VECTOR) {
|
||||
if (ConstVec.getNode()->getOpcode() == ISD::BIT_CONVERT) {
|
||||
if (ConstVec.getNode()->getOpcode() == ISD::BITCAST) {
|
||||
ConstVec = ConstVec.getOperand(0);
|
||||
} else {
|
||||
ConstVec = Op.getOperand(1);
|
||||
Arg = Op.getOperand(0);
|
||||
if (ConstVec.getNode()->getOpcode() == ISD::BIT_CONVERT) {
|
||||
if (ConstVec.getNode()->getOpcode() == ISD::BITCAST) {
|
||||
ConstVec = ConstVec.getOperand(0);
|
||||
}
|
||||
}
|
||||
|
@ -2347,7 +2347,7 @@ LowerByteImmed(SDValue Op, SelectionDAG &DAG) {
|
|||
*/
|
||||
static SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) {
|
||||
EVT VT = Op.getValueType();
|
||||
EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
|
||||
EVT vecVT = EVT::getVectorVT(*DAG.getContext(),
|
||||
VT, (128 / VT.getSizeInBits()));
|
||||
DebugLoc dl = Op.getDebugLoc();
|
||||
|
||||
|
@ -2523,7 +2523,7 @@ static SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG,
|
|||
|
||||
// Take advantage of the fact that (truncate (sra arg, 32)) is efficiently
|
||||
// selected to a NOP:
|
||||
SDValue i64lhs = DAG.getNode(ISD::BIT_CONVERT, dl, IntVT, lhs);
|
||||
SDValue i64lhs = DAG.getNode(ISD::BITCAST, dl, IntVT, lhs);
|
||||
SDValue lhsHi32 =
|
||||
DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
|
||||
DAG.getNode(ISD::SRL, dl, IntVT,
|
||||
|
@ -2557,7 +2557,7 @@ static SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG,
|
|||
ISD::SETGT));
|
||||
}
|
||||
|
||||
SDValue i64rhs = DAG.getNode(ISD::BIT_CONVERT, dl, IntVT, rhs);
|
||||
SDValue i64rhs = DAG.getNode(ISD::BITCAST, dl, IntVT, rhs);
|
||||
SDValue rhsHi32 =
|
||||
DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
|
||||
DAG.getNode(ISD::SRL, dl, IntVT,
|
||||
|
@ -2671,7 +2671,7 @@ static SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG)
|
|||
// Type to truncate to
|
||||
EVT VT = Op.getValueType();
|
||||
MVT simpleVT = VT.getSimpleVT();
|
||||
EVT VecVT = EVT::getVectorVT(*DAG.getContext(),
|
||||
EVT VecVT = EVT::getVectorVT(*DAG.getContext(),
|
||||
VT, (128 / VT.getSizeInBits()));
|
||||
DebugLoc dl = Op.getDebugLoc();
|
||||
|
||||
|
@ -2745,16 +2745,16 @@ static SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG)
|
|||
DAG.getConstant(31, MVT::i32));
|
||||
|
||||
// reinterpret as a i128 (SHUFB requires it). This gets lowered away.
|
||||
SDValue extended = SDValue(DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
|
||||
SDValue extended = SDValue(DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
|
||||
dl, Op0VT, Op0,
|
||||
DAG.getTargetConstant(
|
||||
SPU::GPRCRegClass.getID(),
|
||||
SPU::GPRCRegClass.getID(),
|
||||
MVT::i32)), 0);
|
||||
// Shuffle bytes - Copy the sign bits into the upper 64 bits
|
||||
// and the input value into the lower 64 bits.
|
||||
SDValue extShuffle = DAG.getNode(SPUISD::SHUFB, dl, mvt,
|
||||
extended, sraVal, shufMask);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i128, extShuffle);
|
||||
return DAG.getNode(ISD::BITCAST, dl, MVT::i128, extShuffle);
|
||||
}
|
||||
|
||||
//! Custom (target-specific) lowering entry point
|
||||
|
@ -3234,14 +3234,14 @@ bool SPUTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
|
|||
return isInt<10>(Imm);
|
||||
}
|
||||
|
||||
bool
|
||||
SPUTargetLowering::isLegalAddressingMode(const AddrMode &AM,
|
||||
bool
|
||||
SPUTargetLowering::isLegalAddressingMode(const AddrMode &AM,
|
||||
const Type * ) const{
|
||||
|
||||
// A-form: 18bit absolute address.
|
||||
// A-form: 18bit absolute address.
|
||||
if (AM.BaseGV && !AM.HasBaseReg && AM.Scale == 0 && AM.BaseOffs == 0)
|
||||
return true;
|
||||
|
||||
|
||||
// D-form: reg + 14bit offset
|
||||
if (AM.BaseGV ==0 && AM.HasBaseReg && AM.Scale == 0 && isInt<14>(AM.BaseOffs))
|
||||
return true;
|
||||
|
|
|
@ -116,8 +116,8 @@ MBlazeTargetLowering::MBlazeTargetLowering(MBlazeTargetMachine &TM)
|
|||
}
|
||||
|
||||
// Expand unsupported conversions
|
||||
setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand);
|
||||
setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand);
|
||||
setOperationAction(ISD::BITCAST, MVT::f32, Expand);
|
||||
setOperationAction(ISD::BITCAST, MVT::i32, Expand);
|
||||
|
||||
// Expand SELECT_CC
|
||||
setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
|
||||
|
@ -926,8 +926,8 @@ MBlazeTargetLowering::getSingleConstraintMatchWeight(
|
|||
default:
|
||||
weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
|
||||
break;
|
||||
case 'd':
|
||||
case 'y':
|
||||
case 'd':
|
||||
case 'y':
|
||||
if (type->isIntegerTy())
|
||||
weight = CW_Register;
|
||||
break;
|
||||
|
|
|
@ -57,7 +57,7 @@ MipsTargetLowering(MipsTargetMachine &TM)
|
|||
Subtarget = &TM.getSubtarget<MipsSubtarget>();
|
||||
|
||||
// Mips does not have i1 type, so use i32 for
|
||||
// setcc operations results (slt, sgt, ...).
|
||||
// setcc operations results (slt, sgt, ...).
|
||||
setBooleanContents(ZeroOrOneBooleanContent);
|
||||
|
||||
// Set up the register classes
|
||||
|
@ -69,7 +69,7 @@ MipsTargetLowering(MipsTargetMachine &TM)
|
|||
if (!Subtarget->isFP64bit())
|
||||
addRegisterClass(MVT::f64, Mips::AFGR64RegisterClass);
|
||||
|
||||
// Load extented operations for i1 types must be promoted
|
||||
// Load extented operations for i1 types must be promoted
|
||||
setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
|
||||
setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
|
||||
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
|
||||
|
@ -78,9 +78,9 @@ MipsTargetLowering(MipsTargetMachine &TM)
|
|||
setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
|
||||
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
|
||||
|
||||
// Used by legalize types to correctly generate the setcc result.
|
||||
// Without this, every float setcc comes with a AND/OR with the result,
|
||||
// we don't want this, since the fpcmp result goes to a flag register,
|
||||
// Used by legalize types to correctly generate the setcc result.
|
||||
// Without this, every float setcc comes with a AND/OR with the result,
|
||||
// we don't want this, since the fpcmp result goes to a flag register,
|
||||
// which is used implicitly by brcond and select operations.
|
||||
AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
|
||||
|
||||
|
@ -100,8 +100,8 @@ MipsTargetLowering(MipsTargetMachine &TM)
|
|||
setOperationAction(ISD::VASTART, MVT::Other, Custom);
|
||||
|
||||
|
||||
// We custom lower AND/OR to handle the case where the DAG contain 'ands/ors'
|
||||
// with operands comming from setcc fp comparions. This is necessary since
|
||||
// We custom lower AND/OR to handle the case where the DAG contain 'ands/ors'
|
||||
// with operands comming from setcc fp comparions. This is necessary since
|
||||
// the result from these setcc are in a flag registers (FCR31).
|
||||
setOperationAction(ISD::AND, MVT::i32, Custom);
|
||||
setOperationAction(ISD::OR, MVT::i32, Custom);
|
||||
|
@ -168,7 +168,7 @@ unsigned MipsTargetLowering::getFunctionAlignment(const Function *) const {
|
|||
SDValue MipsTargetLowering::
|
||||
LowerOperation(SDValue Op, SelectionDAG &DAG) const
|
||||
{
|
||||
switch (Op.getOpcode())
|
||||
switch (Op.getOpcode())
|
||||
{
|
||||
case ISD::AND: return LowerANDOR(Op, DAG);
|
||||
case ISD::BRCOND: return LowerBRCOND(Op, DAG);
|
||||
|
@ -194,7 +194,7 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) const
|
|||
// MachineFunction as a live in value. It also creates a corresponding
|
||||
// virtual register for it.
|
||||
static unsigned
|
||||
AddLiveIn(MachineFunction &MF, unsigned PReg, TargetRegisterClass *RC)
|
||||
AddLiveIn(MachineFunction &MF, unsigned PReg, TargetRegisterClass *RC)
|
||||
{
|
||||
assert(RC->contains(PReg) && "Not the correct regclass!");
|
||||
unsigned VReg = MF.getRegInfo().createVirtualRegister(RC);
|
||||
|
@ -212,7 +212,7 @@ static Mips::FPBranchCode GetFPBranchCodeFromCond(Mips::CondCode CC) {
|
|||
|
||||
return Mips::BRANCH_INVALID;
|
||||
}
|
||||
|
||||
|
||||
static unsigned FPBranchCodeToOpc(Mips::FPBranchCode BC) {
|
||||
switch(BC) {
|
||||
default:
|
||||
|
@ -227,24 +227,24 @@ static unsigned FPBranchCodeToOpc(Mips::FPBranchCode BC) {
|
|||
static Mips::CondCode FPCondCCodeToFCC(ISD::CondCode CC) {
|
||||
switch (CC) {
|
||||
default: llvm_unreachable("Unknown fp condition code!");
|
||||
case ISD::SETEQ:
|
||||
case ISD::SETEQ:
|
||||
case ISD::SETOEQ: return Mips::FCOND_EQ;
|
||||
case ISD::SETUNE: return Mips::FCOND_OGL;
|
||||
case ISD::SETLT:
|
||||
case ISD::SETLT:
|
||||
case ISD::SETOLT: return Mips::FCOND_OLT;
|
||||
case ISD::SETGT:
|
||||
case ISD::SETGT:
|
||||
case ISD::SETOGT: return Mips::FCOND_OGT;
|
||||
case ISD::SETLE:
|
||||
case ISD::SETOLE: return Mips::FCOND_OLE;
|
||||
case ISD::SETLE:
|
||||
case ISD::SETOLE: return Mips::FCOND_OLE;
|
||||
case ISD::SETGE:
|
||||
case ISD::SETOGE: return Mips::FCOND_OGE;
|
||||
case ISD::SETULT: return Mips::FCOND_ULT;
|
||||
case ISD::SETULE: return Mips::FCOND_ULE;
|
||||
case ISD::SETULE: return Mips::FCOND_ULE;
|
||||
case ISD::SETUGT: return Mips::FCOND_UGT;
|
||||
case ISD::SETUGE: return Mips::FCOND_UGE;
|
||||
case ISD::SETUO: return Mips::FCOND_UN;
|
||||
case ISD::SETUO: return Mips::FCOND_UN;
|
||||
case ISD::SETO: return Mips::FCOND_OR;
|
||||
case ISD::SETNE:
|
||||
case ISD::SETNE:
|
||||
case ISD::SETONE: return Mips::FCOND_NEQ;
|
||||
case ISD::SETUEQ: return Mips::FCOND_UEQ;
|
||||
}
|
||||
|
@ -364,7 +364,7 @@ LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const
|
|||
// Emit the round instruction and bit convert to integer
|
||||
SDValue Trunc = DAG.getNode(MipsISD::FPRound, dl, MVT::f32,
|
||||
Src, CondReg.getValue(1));
|
||||
SDValue BitCvt = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Trunc);
|
||||
SDValue BitCvt = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Trunc);
|
||||
return BitCvt;
|
||||
}
|
||||
|
||||
|
@ -382,11 +382,11 @@ LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
|
|||
// obtain the new stack size.
|
||||
SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, StackPointer, Size);
|
||||
|
||||
// The Sub result contains the new stack start address, so it
|
||||
// The Sub result contains the new stack start address, so it
|
||||
// must be placed in the stack pointer register.
|
||||
Chain = DAG.getCopyToReg(StackPointer.getValue(1), dl, Mips::SP, Sub);
|
||||
|
||||
// This node always has two return values: a new stack pointer
|
||||
|
||||
// This node always has two return values: a new stack pointer
|
||||
// value and a chain
|
||||
SDValue Ops[2] = { Sub, Chain };
|
||||
return DAG.getMergeValues(Ops, 2, dl);
|
||||
|
@ -405,9 +405,9 @@ LowerANDOR(SDValue Op, SelectionDAG &DAG) const
|
|||
SDValue True = DAG.getConstant(1, MVT::i32);
|
||||
SDValue False = DAG.getConstant(0, MVT::i32);
|
||||
|
||||
SDValue LSEL = DAG.getNode(MipsISD::FPSelectCC, dl, True.getValueType(),
|
||||
SDValue LSEL = DAG.getNode(MipsISD::FPSelectCC, dl, True.getValueType(),
|
||||
LHS, True, False, LHS.getOperand(2));
|
||||
SDValue RSEL = DAG.getNode(MipsISD::FPSelectCC, dl, True.getValueType(),
|
||||
SDValue RSEL = DAG.getNode(MipsISD::FPSelectCC, dl, True.getValueType(),
|
||||
RHS, True, False, RHS.getOperand(2));
|
||||
|
||||
return DAG.getNode(Op.getOpcode(), dl, MVT::i32, LSEL, RSEL);
|
||||
|
@ -416,7 +416,7 @@ LowerANDOR(SDValue Op, SelectionDAG &DAG) const
|
|||
SDValue MipsTargetLowering::
|
||||
LowerBRCOND(SDValue Op, SelectionDAG &DAG) const
|
||||
{
|
||||
// The first operand is the chain, the second is the condition, the third is
|
||||
// The first operand is the chain, the second is the condition, the third is
|
||||
// the block to branch to if the condition is true.
|
||||
SDValue Chain = Op.getOperand(0);
|
||||
SDValue Dest = Op.getOperand(2);
|
||||
|
@ -424,55 +424,55 @@ LowerBRCOND(SDValue Op, SelectionDAG &DAG) const
|
|||
|
||||
if (Op.getOperand(1).getOpcode() != MipsISD::FPCmp)
|
||||
return Op;
|
||||
|
||||
|
||||
SDValue CondRes = Op.getOperand(1);
|
||||
SDValue CCNode = CondRes.getOperand(2);
|
||||
Mips::CondCode CC =
|
||||
(Mips::CondCode)cast<ConstantSDNode>(CCNode)->getZExtValue();
|
||||
SDValue BrCode = DAG.getConstant(GetFPBranchCodeFromCond(CC), MVT::i32);
|
||||
SDValue BrCode = DAG.getConstant(GetFPBranchCodeFromCond(CC), MVT::i32);
|
||||
|
||||
return DAG.getNode(MipsISD::FPBrcond, dl, Op.getValueType(), Chain, BrCode,
|
||||
return DAG.getNode(MipsISD::FPBrcond, dl, Op.getValueType(), Chain, BrCode,
|
||||
Dest, CondRes);
|
||||
}
|
||||
|
||||
SDValue MipsTargetLowering::
|
||||
LowerSETCC(SDValue Op, SelectionDAG &DAG) const
|
||||
{
|
||||
// The operands to this are the left and right operands to compare (ops #0,
|
||||
// and #1) and the condition code to compare them with (op #2) as a
|
||||
// The operands to this are the left and right operands to compare (ops #0,
|
||||
// and #1) and the condition code to compare them with (op #2) as a
|
||||
// CondCodeSDNode.
|
||||
SDValue LHS = Op.getOperand(0);
|
||||
SDValue LHS = Op.getOperand(0);
|
||||
SDValue RHS = Op.getOperand(1);
|
||||
DebugLoc dl = Op.getDebugLoc();
|
||||
|
||||
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
|
||||
|
||||
return DAG.getNode(MipsISD::FPCmp, dl, Op.getValueType(), LHS, RHS,
|
||||
|
||||
return DAG.getNode(MipsISD::FPCmp, dl, Op.getValueType(), LHS, RHS,
|
||||
DAG.getConstant(FPCondCCodeToFCC(CC), MVT::i32));
|
||||
}
|
||||
|
||||
SDValue MipsTargetLowering::
|
||||
LowerSELECT(SDValue Op, SelectionDAG &DAG) const
|
||||
{
|
||||
SDValue Cond = Op.getOperand(0);
|
||||
SDValue Cond = Op.getOperand(0);
|
||||
SDValue True = Op.getOperand(1);
|
||||
SDValue False = Op.getOperand(2);
|
||||
DebugLoc dl = Op.getDebugLoc();
|
||||
|
||||
// if the incomming condition comes from a integer compare, the select
|
||||
// operation must be SelectCC or a conditional move if the subtarget
|
||||
// if the incomming condition comes from a integer compare, the select
|
||||
// operation must be SelectCC or a conditional move if the subtarget
|
||||
// supports it.
|
||||
if (Cond.getOpcode() != MipsISD::FPCmp) {
|
||||
if (Subtarget->hasCondMov() && !True.getValueType().isFloatingPoint())
|
||||
return Op;
|
||||
return DAG.getNode(MipsISD::SelectCC, dl, True.getValueType(),
|
||||
return DAG.getNode(MipsISD::SelectCC, dl, True.getValueType(),
|
||||
Cond, True, False);
|
||||
}
|
||||
|
||||
// if the incomming condition comes from fpcmp, the select
|
||||
// operation must use FPSelectCC.
|
||||
SDValue CCNode = Cond.getOperand(2);
|
||||
return DAG.getNode(MipsISD::FPSelectCC, dl, True.getValueType(),
|
||||
return DAG.getNode(MipsISD::FPSelectCC, dl, True.getValueType(),
|
||||
Cond, True, False, CCNode);
|
||||
}
|
||||
|
||||
|
@ -484,16 +484,16 @@ SDValue MipsTargetLowering::LowerGlobalAddress(SDValue Op,
|
|||
|
||||
if (getTargetMachine().getRelocationModel() != Reloc::PIC_) {
|
||||
SDVTList VTs = DAG.getVTList(MVT::i32);
|
||||
|
||||
|
||||
MipsTargetObjectFile &TLOF = (MipsTargetObjectFile&)getObjFileLowering();
|
||||
|
||||
|
||||
// %gp_rel relocation
|
||||
if (TLOF.IsGlobalInSmallSection(GV, getTargetMachine())) {
|
||||
SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, 0,
|
||||
if (TLOF.IsGlobalInSmallSection(GV, getTargetMachine())) {
|
||||
SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, 0,
|
||||
MipsII::MO_GPREL);
|
||||
SDValue GPRelNode = DAG.getNode(MipsISD::GPRel, dl, VTs, &GA, 1);
|
||||
SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(MVT::i32);
|
||||
return DAG.getNode(ISD::ADD, dl, MVT::i32, GOT, GPRelNode);
|
||||
return DAG.getNode(ISD::ADD, dl, MVT::i32, GOT, GPRelNode);
|
||||
}
|
||||
// %hi/%lo relocation
|
||||
SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, 0,
|
||||
|
@ -505,7 +505,7 @@ SDValue MipsTargetLowering::LowerGlobalAddress(SDValue Op,
|
|||
} else {
|
||||
SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, 0,
|
||||
MipsII::MO_GOT);
|
||||
SDValue ResNode = DAG.getLoad(MVT::i32, dl,
|
||||
SDValue ResNode = DAG.getLoad(MVT::i32, dl,
|
||||
DAG.getEntryNode(), GA, MachinePointerInfo(),
|
||||
false, false, 0);
|
||||
// On functions and global targets not internal linked only
|
||||
|
@ -531,7 +531,7 @@ SDValue MipsTargetLowering::
|
|||
LowerJumpTable(SDValue Op, SelectionDAG &DAG) const
|
||||
{
|
||||
SDValue ResNode;
|
||||
SDValue HiPart;
|
||||
SDValue HiPart;
|
||||
// FIXME there isn't actually debug info here
|
||||
DebugLoc dl = Op.getDebugLoc();
|
||||
bool IsPIC = getTargetMachine().getRelocationModel() == Reloc::PIC_;
|
||||
|
@ -566,25 +566,25 @@ LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
|
|||
DebugLoc dl = Op.getDebugLoc();
|
||||
|
||||
// gp_rel relocation
|
||||
// FIXME: we should reference the constant pool using small data sections,
|
||||
// FIXME: we should reference the constant pool using small data sections,
|
||||
// but the asm printer currently doens't support this feature without
|
||||
// hacking it. This feature should come soon so we can uncomment the
|
||||
// hacking it. This feature should come soon so we can uncomment the
|
||||
// stuff below.
|
||||
//if (IsInSmallSection(C->getType())) {
|
||||
// SDValue GPRelNode = DAG.getNode(MipsISD::GPRel, MVT::i32, CP);
|
||||
// SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(MVT::i32);
|
||||
// ResNode = DAG.getNode(ISD::ADD, MVT::i32, GOT, GPRelNode);
|
||||
// ResNode = DAG.getNode(ISD::ADD, MVT::i32, GOT, GPRelNode);
|
||||
|
||||
if (getTargetMachine().getRelocationModel() != Reloc::PIC_) {
|
||||
SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment(),
|
||||
SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment(),
|
||||
N->getOffset(), MipsII::MO_ABS_HILO);
|
||||
SDValue HiPart = DAG.getNode(MipsISD::Hi, dl, MVT::i32, CP);
|
||||
SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, CP);
|
||||
ResNode = DAG.getNode(ISD::ADD, dl, MVT::i32, HiPart, Lo);
|
||||
} else {
|
||||
SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment(),
|
||||
SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment(),
|
||||
N->getOffset(), MipsII::MO_GOT);
|
||||
SDValue Load = DAG.getLoad(MVT::i32, dl, DAG.getEntryNode(),
|
||||
SDValue Load = DAG.getLoad(MVT::i32, dl, DAG.getEntryNode(),
|
||||
CP, MachinePointerInfo::getConstantPool(),
|
||||
false, false, 0);
|
||||
SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, CP);
|
||||
|
@ -617,14 +617,14 @@ SDValue MipsTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
|
|||
#include "MipsGenCallingConv.inc"
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// TODO: Implement a generic logic using tblgen that can support this.
|
||||
// TODO: Implement a generic logic using tblgen that can support this.
|
||||
// Mips O32 ABI rules:
|
||||
// ---
|
||||
// i32 - Passed in A0, A1, A2, A3 and stack
|
||||
// f32 - Only passed in f32 registers if no int reg has been used yet to hold
|
||||
// f32 - Only passed in f32 registers if no int reg has been used yet to hold
|
||||
// an argument. Otherwise, passed in A1, A2, A3 and stack.
|
||||
// f64 - Only passed in two aliased f32 registers if no int reg has been used
|
||||
// yet to hold an argument. Otherwise, use A2, A3 and stack. If A1 is
|
||||
// f64 - Only passed in two aliased f32 registers if no int reg has been used
|
||||
// yet to hold an argument. Otherwise, use A2, A3 and stack. If A1 is
|
||||
// not used, it must be shadowed. If only A3 is avaiable, shadow it and
|
||||
// go to stack.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -633,7 +633,7 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT,
|
|||
MVT LocVT, CCValAssign::LocInfo LocInfo,
|
||||
ISD::ArgFlagsTy ArgFlags, CCState &State) {
|
||||
|
||||
static const unsigned IntRegsSize=4, FloatRegsSize=2;
|
||||
static const unsigned IntRegsSize=4, FloatRegsSize=2;
|
||||
|
||||
static const unsigned IntRegs[] = {
|
||||
Mips::A0, Mips::A1, Mips::A2, Mips::A3
|
||||
|
@ -681,7 +681,7 @@ static bool CC_MipsO32(unsigned ValNo, MVT ValVT,
|
|||
Reg = Mips::A2;
|
||||
for (;UnallocIntReg < IntRegsSize; ++UnallocIntReg)
|
||||
State.AllocateReg(UnallocIntReg);
|
||||
}
|
||||
}
|
||||
LocVT = MVT::i32;
|
||||
}
|
||||
|
||||
|
@ -739,7 +739,7 @@ static bool CC_MipsO32_VarArgs(unsigned ValNo, MVT ValVT,
|
|||
IntRegs[UnallocIntReg] == (unsigned (Mips::A2))) {
|
||||
unsigned Reg = State.AllocateReg(IntRegs, IntRegsSize);
|
||||
State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, MVT::i32, LocInfo));
|
||||
// Shadow the next register so it can be used
|
||||
// Shadow the next register so it can be used
|
||||
// later to get the other 32bit part.
|
||||
State.AllocateReg(IntRegs, IntRegsSize);
|
||||
return false;
|
||||
|
@ -791,11 +791,11 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
|||
if (Subtarget->isABI_O32()) {
|
||||
int VTsize = MVT(MVT::i32).getSizeInBits()/8;
|
||||
MFI->CreateFixedObject(VTsize, (VTsize*3), true);
|
||||
CCInfo.AnalyzeCallOperands(Outs,
|
||||
CCInfo.AnalyzeCallOperands(Outs,
|
||||
isVarArg ? CC_MipsO32_VarArgs : CC_MipsO32);
|
||||
} else
|
||||
CCInfo.AnalyzeCallOperands(Outs, CC_Mips);
|
||||
|
||||
|
||||
// Get a count of how many bytes are to be pushed on the stack.
|
||||
unsigned NumBytes = CCInfo.getNextStackOffset();
|
||||
Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true));
|
||||
|
@ -804,7 +804,7 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
|||
SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass;
|
||||
SmallVector<SDValue, 8> MemOpChains;
|
||||
|
||||
// First/LastArgStackLoc contains the first/last
|
||||
// First/LastArgStackLoc contains the first/last
|
||||
// "at stack" argument location.
|
||||
int LastArgStackLoc = 0;
|
||||
unsigned FirstStackArgLoc = (Subtarget->isABI_EABI() ? 0 : 16);
|
||||
|
@ -817,12 +817,12 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
|||
// Promote the value if needed.
|
||||
switch (VA.getLocInfo()) {
|
||||
default: llvm_unreachable("Unknown loc info!");
|
||||
case CCValAssign::Full:
|
||||
case CCValAssign::Full:
|
||||
if (Subtarget->isABI_O32() && VA.isRegLoc()) {
|
||||
if (VA.getValVT() == MVT::f32 && VA.getLocVT() == MVT::i32)
|
||||
Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Arg);
|
||||
Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
|
||||
if (VA.getValVT() == MVT::f64 && VA.getLocVT() == MVT::i32) {
|
||||
Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, Arg);
|
||||
Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
|
||||
SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Arg,
|
||||
DAG.getConstant(0, getPointerTy()));
|
||||
SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Arg,
|
||||
|
@ -830,7 +830,7 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
|||
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Lo));
|
||||
RegsToPass.push_back(std::make_pair(VA.getLocReg()+1, Hi));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
case CCValAssign::SExt:
|
||||
|
@ -843,17 +843,17 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
|||
Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
|
||||
break;
|
||||
}
|
||||
|
||||
// Arguments that can be passed on register must be kept at
|
||||
|
||||
// Arguments that can be passed on register must be kept at
|
||||
// RegsToPass vector
|
||||
if (VA.isRegLoc()) {
|
||||
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
// Register can't get to this point...
|
||||
assert(VA.isMemLoc());
|
||||
|
||||
|
||||
// Create the frame index object for this incoming parameter
|
||||
// This guarantees that when allocating Local Area the firsts
|
||||
// 16 bytes which are alwayes reserved won't be overwritten
|
||||
|
@ -864,7 +864,7 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
|||
|
||||
SDValue PtrOff = DAG.getFrameIndex(FI,getPointerTy());
|
||||
|
||||
// emit ISD::STORE whichs stores the
|
||||
// emit ISD::STORE whichs stores the
|
||||
// parameter value to a stack Location
|
||||
MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
|
||||
MachinePointerInfo(),
|
||||
|
@ -873,34 +873,34 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
|||
|
||||
// Transform all store nodes into one single node because all store
|
||||
// nodes are independent of each other.
|
||||
if (!MemOpChains.empty())
|
||||
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
|
||||
if (!MemOpChains.empty())
|
||||
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
|
||||
&MemOpChains[0], MemOpChains.size());
|
||||
|
||||
// Build a sequence of copy-to-reg nodes chained together with token
|
||||
// Build a sequence of copy-to-reg nodes chained together with token
|
||||
// chain and flag operands which copy the outgoing args into registers.
|
||||
// The InFlag in necessary since all emited instructions must be
|
||||
// stuck together.
|
||||
SDValue InFlag;
|
||||
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
|
||||
Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
|
||||
Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
|
||||
RegsToPass[i].second, InFlag);
|
||||
InFlag = Chain.getValue(1);
|
||||
}
|
||||
|
||||
// If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
|
||||
// direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
|
||||
// node so that legalize doesn't hack it.
|
||||
// direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
|
||||
// node so that legalize doesn't hack it.
|
||||
unsigned char OpFlag = IsPIC ? MipsII::MO_GOT_CALL : MipsII::MO_NO_FLAG;
|
||||
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
|
||||
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl,
|
||||
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
|
||||
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl,
|
||||
getPointerTy(), 0, OpFlag);
|
||||
else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
|
||||
Callee = DAG.getTargetExternalSymbol(S->getSymbol(),
|
||||
Callee = DAG.getTargetExternalSymbol(S->getSymbol(),
|
||||
getPointerTy(), OpFlag);
|
||||
|
||||
// MipsJmpLink = #chain, #target_address, #opt_in_flags...
|
||||
// = Chain, Callee, Reg#1, Reg#2, ...
|
||||
// = Chain, Callee, Reg#1, Reg#2, ...
|
||||
//
|
||||
// Returns a chain & a flag for retval copy to use.
|
||||
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
|
||||
|
@ -908,7 +908,7 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
|||
Ops.push_back(Chain);
|
||||
Ops.push_back(Callee);
|
||||
|
||||
// Add argument registers to the end of the list so that they are
|
||||
// Add argument registers to the end of the list so that they are
|
||||
// known live into the call.
|
||||
for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
|
||||
Ops.push_back(DAG.getRegister(RegsToPass[i].first,
|
||||
|
@ -920,17 +920,17 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
|||
Chain = DAG.getNode(MipsISD::JmpLink, dl, NodeTys, &Ops[0], Ops.size());
|
||||
InFlag = Chain.getValue(1);
|
||||
|
||||
// Create a stack location to hold GP when PIC is used. This stack
|
||||
// location is used on function prologue to save GP and also after all
|
||||
// emited CALL's to restore GP.
|
||||
// Create a stack location to hold GP when PIC is used. This stack
|
||||
// location is used on function prologue to save GP and also after all
|
||||
// emited CALL's to restore GP.
|
||||
if (IsPIC) {
|
||||
// Function can have an arbitrary number of calls, so
|
||||
// Function can have an arbitrary number of calls, so
|
||||
// hold the LastArgStackLoc with the biggest offset.
|
||||
int FI;
|
||||
MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
|
||||
if (LastArgStackLoc >= MipsFI->getGPStackOffset()) {
|
||||
LastArgStackLoc = (!LastArgStackLoc) ? (16) : (LastArgStackLoc+4);
|
||||
// Create the frame index only once. SPOffset here can be anything
|
||||
// Create the frame index only once. SPOffset here can be anything
|
||||
// (this will be fixed on processFunctionBeforeFrameFinalized)
|
||||
if (MipsFI->getGPStackOffset() == -1) {
|
||||
FI = MFI->CreateFixedObject(4, 0, true);
|
||||
|
@ -946,10 +946,10 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
|||
MachinePointerInfo::getFixedStack(FI),
|
||||
false, false, 0);
|
||||
Chain = GPLoad.getValue(1);
|
||||
Chain = DAG.getCopyToReg(Chain, dl, DAG.getRegister(Mips::GP, MVT::i32),
|
||||
Chain = DAG.getCopyToReg(Chain, dl, DAG.getRegister(Mips::GP, MVT::i32),
|
||||
GPLoad, SDValue(0,0));
|
||||
InFlag = Chain.getValue(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Create the CALLSEQ_END node.
|
||||
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
|
||||
|
@ -993,7 +993,7 @@ MipsTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
|
|||
// Formal Arguments Calling Convention Implementation
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
/// LowerFormalArguments - transform physical registers into virtual registers
|
||||
/// LowerFormalArguments - transform physical registers into virtual registers
|
||||
/// and generate load operations for arguments places on the stack.
|
||||
SDValue
|
||||
MipsTargetLowering::LowerFormalArguments(SDValue Chain,
|
||||
|
@ -1023,7 +1023,7 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
|
|||
ArgLocs, *DAG.getContext());
|
||||
|
||||
if (Subtarget->isABI_O32())
|
||||
CCInfo.AnalyzeFormalArguments(Ins,
|
||||
CCInfo.AnalyzeFormalArguments(Ins,
|
||||
isVarArg ? CC_MipsO32_VarArgs : CC_MipsO32);
|
||||
else
|
||||
CCInfo.AnalyzeFormalArguments(Ins, CC_Mips);
|
||||
|
@ -1042,22 +1042,22 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
|
|||
TargetRegisterClass *RC = 0;
|
||||
|
||||
if (RegVT == MVT::i32)
|
||||
RC = Mips::CPURegsRegisterClass;
|
||||
else if (RegVT == MVT::f32)
|
||||
RC = Mips::CPURegsRegisterClass;
|
||||
else if (RegVT == MVT::f32)
|
||||
RC = Mips::FGR32RegisterClass;
|
||||
else if (RegVT == MVT::f64) {
|
||||
if (!Subtarget->isSingleFloat())
|
||||
if (!Subtarget->isSingleFloat())
|
||||
RC = Mips::AFGR64RegisterClass;
|
||||
} else
|
||||
} else
|
||||
llvm_unreachable("RegVT not supported by FormalArguments Lowering");
|
||||
|
||||
// Transform the arguments stored on
|
||||
// Transform the arguments stored on
|
||||
// physical registers into virtual ones
|
||||
unsigned Reg = AddLiveIn(DAG.getMachineFunction(), ArgRegEnd, RC);
|
||||
SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
|
||||
|
||||
// If this is an 8 or 16-bit value, it has been passed promoted
|
||||
// to 32 bits. Insert an assert[sz]ext to capture this, then
|
||||
|
||||
// If this is an 8 or 16-bit value, it has been passed promoted
|
||||
// to 32 bits. Insert an assert[sz]ext to capture this, then
|
||||
// truncate to the right size.
|
||||
if (VA.getLocInfo() != CCValAssign::Full) {
|
||||
unsigned Opcode = 0;
|
||||
|
@ -1066,21 +1066,21 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
|
|||
else if (VA.getLocInfo() == CCValAssign::ZExt)
|
||||
Opcode = ISD::AssertZext;
|
||||
if (Opcode)
|
||||
ArgValue = DAG.getNode(Opcode, dl, RegVT, ArgValue,
|
||||
ArgValue = DAG.getNode(Opcode, dl, RegVT, ArgValue,
|
||||
DAG.getValueType(VA.getValVT()));
|
||||
ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
|
||||
}
|
||||
|
||||
// Handle O32 ABI cases: i32->f32 and (i32,i32)->f64
|
||||
// Handle O32 ABI cases: i32->f32 and (i32,i32)->f64
|
||||
if (Subtarget->isABI_O32()) {
|
||||
if (RegVT == MVT::i32 && VA.getValVT() == MVT::f32)
|
||||
ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, ArgValue);
|
||||
if (RegVT == MVT::i32 && VA.getValVT() == MVT::f32)
|
||||
ArgValue = DAG.getNode(ISD::BITCAST, dl, MVT::f32, ArgValue);
|
||||
if (RegVT == MVT::i32 && VA.getValVT() == MVT::f64) {
|
||||
unsigned Reg2 = AddLiveIn(DAG.getMachineFunction(),
|
||||
unsigned Reg2 = AddLiveIn(DAG.getMachineFunction(),
|
||||
VA.getLocReg()+1, RC);
|
||||
SDValue ArgValue2 = DAG.getCopyFromReg(Chain, dl, Reg2, RegVT);
|
||||
SDValue Hi = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, ArgValue);
|
||||
SDValue Lo = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, ArgValue2);
|
||||
SDValue Hi = DAG.getNode(ISD::BITCAST, dl, MVT::f32, ArgValue);
|
||||
SDValue Lo = DAG.getNode(ISD::BITCAST, dl, MVT::f32, ArgValue2);
|
||||
ArgValue = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::f64, Lo, Hi);
|
||||
}
|
||||
}
|
||||
|
@ -1093,13 +1093,13 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
|
|||
|
||||
// The last argument is not a register anymore
|
||||
ArgRegEnd = 0;
|
||||
|
||||
// The stack pointer offset is relative to the caller stack frame.
|
||||
// Since the real stack size is unknown here, a negative SPOffset
|
||||
|
||||
// The stack pointer offset is relative to the caller stack frame.
|
||||
// Since the real stack size is unknown here, a negative SPOffset
|
||||
// is used so there's a way to adjust these offsets when the stack
|
||||
// size get known (on EliminateFrameIndex). A dummy SPOffset is
|
||||
// size get known (on EliminateFrameIndex). A dummy SPOffset is
|
||||
// used instead of a direct negative address (which is recorded to
|
||||
// be used on emitPrologue) to avoid mis-calc of the first stack
|
||||
// be used on emitPrologue) to avoid mis-calc of the first stack
|
||||
// offset on PEI::calculateFrameObjectOffsets.
|
||||
// Arguments are always 32-bit.
|
||||
unsigned ArgSize = VA.getLocVT().getSizeInBits()/8;
|
||||
|
@ -1130,11 +1130,11 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
|
|||
|
||||
// To meet ABI, when VARARGS are passed on registers, the registers
|
||||
// must have their values written to the caller stack frame. If the last
|
||||
// argument was placed in the stack, there's no need to save any register.
|
||||
// argument was placed in the stack, there's no need to save any register.
|
||||
if ((isVarArg) && (Subtarget->isABI_O32() && ArgRegEnd)) {
|
||||
if (StackPtr.getNode() == 0)
|
||||
StackPtr = DAG.getRegister(StackReg, getPointerTy());
|
||||
|
||||
|
||||
// The last register argument that must be saved is Mips::A3
|
||||
TargetRegisterClass *RC = Mips::CPURegsRegisterClass;
|
||||
unsigned StackLoc = ArgLocs.size()-1;
|
||||
|
@ -1157,7 +1157,7 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
|
|||
}
|
||||
}
|
||||
|
||||
// All stores are grouped in one node to allow the matching between
|
||||
// All stores are grouped in one node to allow the matching between
|
||||
// the size of Ins and InVals. This only happens when on varg functions
|
||||
if (!OutChains.empty()) {
|
||||
OutChains.push_back(Chain);
|
||||
|
@ -1190,7 +1190,7 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
|
|||
// Analize return values.
|
||||
CCInfo.AnalyzeReturn(Outs, RetCC_Mips);
|
||||
|
||||
// If this is the first return lowered for this function, add
|
||||
// If this is the first return lowered for this function, add
|
||||
// the regs to the liveout set for the function.
|
||||
if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
|
||||
for (unsigned i = 0; i != RVLocs.size(); ++i)
|
||||
|
@ -1205,7 +1205,7 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
|
|||
CCValAssign &VA = RVLocs[i];
|
||||
assert(VA.isRegLoc() && "Can only return in registers!");
|
||||
|
||||
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
|
||||
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
|
||||
OutVals[i], Flag);
|
||||
|
||||
// guarantee that all emitted copies are
|
||||
|
@ -1222,7 +1222,7 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
|
|||
MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
|
||||
unsigned Reg = MipsFI->getSRetReturnReg();
|
||||
|
||||
if (!Reg)
|
||||
if (!Reg)
|
||||
llvm_unreachable("sret virtual register not created in the entry block");
|
||||
SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy());
|
||||
|
||||
|
@ -1232,10 +1232,10 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
|
|||
|
||||
// Return on Mips is always a "jr $ra"
|
||||
if (Flag.getNode())
|
||||
return DAG.getNode(MipsISD::Ret, dl, MVT::Other,
|
||||
return DAG.getNode(MipsISD::Ret, dl, MVT::Other,
|
||||
Chain, DAG.getRegister(Mips::RA, MVT::i32), Flag);
|
||||
else // Return Void
|
||||
return DAG.getNode(MipsISD::Ret, dl, MVT::Other,
|
||||
return DAG.getNode(MipsISD::Ret, dl, MVT::Other,
|
||||
Chain, DAG.getRegister(Mips::RA, MVT::i32));
|
||||
}
|
||||
|
||||
|
@ -1246,21 +1246,21 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
|
|||
/// getConstraintType - Given a constraint letter, return the type of
|
||||
/// constraint it is for this target.
|
||||
MipsTargetLowering::ConstraintType MipsTargetLowering::
|
||||
getConstraintType(const std::string &Constraint) const
|
||||
getConstraintType(const std::string &Constraint) const
|
||||
{
|
||||
// Mips specific constrainy
|
||||
// Mips specific constrainy
|
||||
// GCC config/mips/constraints.md
|
||||
//
|
||||
// 'd' : An address register. Equivalent to r
|
||||
// unless generating MIPS16 code.
|
||||
// 'y' : Equivalent to r; retained for
|
||||
// backwards compatibility.
|
||||
// 'f' : Floating Point registers.
|
||||
// 'd' : An address register. Equivalent to r
|
||||
// unless generating MIPS16 code.
|
||||
// 'y' : Equivalent to r; retained for
|
||||
// backwards compatibility.
|
||||
// 'f' : Floating Point registers.
|
||||
if (Constraint.size() == 1) {
|
||||
switch (Constraint[0]) {
|
||||
default : break;
|
||||
case 'd':
|
||||
case 'y':
|
||||
case 'd':
|
||||
case 'y':
|
||||
case 'f':
|
||||
return C_RegisterClass;
|
||||
break;
|
||||
|
@ -1287,8 +1287,8 @@ MipsTargetLowering::getSingleConstraintMatchWeight(
|
|||
default:
|
||||
weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
|
||||
break;
|
||||
case 'd':
|
||||
case 'y':
|
||||
case 'd':
|
||||
case 'y':
|
||||
if (type->isIntegerTy())
|
||||
weight = CW_Register;
|
||||
break;
|
||||
|
@ -1313,7 +1313,7 @@ getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const
|
|||
case 'f':
|
||||
if (VT == MVT::f32)
|
||||
return std::make_pair(0U, Mips::FGR32RegisterClass);
|
||||
if (VT == MVT::f64)
|
||||
if (VT == MVT::f64)
|
||||
if ((!Subtarget->isSingleFloat()) && (!Subtarget->isFP64bit()))
|
||||
return std::make_pair(0U, Mips::AFGR64RegisterClass);
|
||||
}
|
||||
|
@ -1331,15 +1331,15 @@ getRegClassForInlineAsmConstraint(const std::string &Constraint,
|
|||
if (Constraint.size() != 1)
|
||||
return std::vector<unsigned>();
|
||||
|
||||
switch (Constraint[0]) {
|
||||
switch (Constraint[0]) {
|
||||
default : break;
|
||||
case 'r':
|
||||
// GCC Mips Constraint Letters
|
||||
case 'd':
|
||||
case 'y':
|
||||
return make_vector<unsigned>(Mips::T0, Mips::T1, Mips::T2, Mips::T3,
|
||||
Mips::T4, Mips::T5, Mips::T6, Mips::T7, Mips::S0, Mips::S1,
|
||||
Mips::S2, Mips::S3, Mips::S4, Mips::S5, Mips::S6, Mips::S7,
|
||||
case 'd':
|
||||
case 'y':
|
||||
return make_vector<unsigned>(Mips::T0, Mips::T1, Mips::T2, Mips::T3,
|
||||
Mips::T4, Mips::T5, Mips::T6, Mips::T7, Mips::S0, Mips::S1,
|
||||
Mips::S2, Mips::S3, Mips::S4, Mips::S5, Mips::S6, Mips::S7,
|
||||
Mips::T8, 0);
|
||||
|
||||
case 'f':
|
||||
|
@ -1351,15 +1351,15 @@ getRegClassForInlineAsmConstraint(const std::string &Constraint,
|
|||
Mips::F25, Mips::F26, Mips::F27, Mips::F28, Mips::F29,
|
||||
Mips::F30, Mips::F31, 0);
|
||||
else
|
||||
return make_vector<unsigned>(Mips::F2, Mips::F4, Mips::F6, Mips::F8,
|
||||
Mips::F10, Mips::F20, Mips::F22, Mips::F24, Mips::F26,
|
||||
return make_vector<unsigned>(Mips::F2, Mips::F4, Mips::F6, Mips::F8,
|
||||
Mips::F10, Mips::F20, Mips::F22, Mips::F24, Mips::F26,
|
||||
Mips::F28, Mips::F30, 0);
|
||||
}
|
||||
|
||||
if (VT == MVT::f64)
|
||||
if (VT == MVT::f64)
|
||||
if ((!Subtarget->isSingleFloat()) && (!Subtarget->isFP64bit()))
|
||||
return make_vector<unsigned>(Mips::D1, Mips::D2, Mips::D3, Mips::D4,
|
||||
Mips::D5, Mips::D10, Mips::D11, Mips::D12, Mips::D13,
|
||||
return make_vector<unsigned>(Mips::D1, Mips::D2, Mips::D3, Mips::D4,
|
||||
Mips::D5, Mips::D10, Mips::D11, Mips::D12, Mips::D13,
|
||||
Mips::D14, Mips::D15, 0);
|
||||
}
|
||||
return std::vector<unsigned>();
|
||||
|
|
|
@ -76,7 +76,7 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
|
|||
// On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
|
||||
// arguments are at least 4/8 bytes aligned.
|
||||
setMinStackArgumentAlignment(TM.getSubtarget<PPCSubtarget>().isPPC64() ? 8:4);
|
||||
|
||||
|
||||
// Set up the register classes.
|
||||
addRegisterClass(MVT::i32, PPC::GPRCRegisterClass);
|
||||
addRegisterClass(MVT::f32, PPC::F4RCRegisterClass);
|
||||
|
@ -178,10 +178,10 @@ PPCTargetLowering::PPCTargetLowering(PPCTargetMachine &TM)
|
|||
setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
|
||||
setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
|
||||
|
||||
setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand);
|
||||
setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand);
|
||||
setOperationAction(ISD::BIT_CONVERT, MVT::i64, Expand);
|
||||
setOperationAction(ISD::BIT_CONVERT, MVT::f64, Expand);
|
||||
setOperationAction(ISD::BITCAST, MVT::f32, Expand);
|
||||
setOperationAction(ISD::BITCAST, MVT::i32, Expand);
|
||||
setOperationAction(ISD::BITCAST, MVT::i64, Expand);
|
||||
setOperationAction(ISD::BITCAST, MVT::f64, Expand);
|
||||
|
||||
// We cannot sextinreg(i1). Expand to shifts.
|
||||
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
|
||||
|
@ -549,7 +549,7 @@ static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
|
|||
|
||||
/// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
|
||||
/// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
|
||||
bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
|
||||
bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
|
||||
bool isUnary) {
|
||||
if (!isUnary)
|
||||
return isVMerge(N, UnitSize, 8, 24);
|
||||
|
@ -558,7 +558,7 @@ bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
|
|||
|
||||
/// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
|
||||
/// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
|
||||
bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
|
||||
bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
|
||||
bool isUnary) {
|
||||
if (!isUnary)
|
||||
return isVMerge(N, UnitSize, 0, 16);
|
||||
|
@ -573,7 +573,7 @@ int PPC::isVSLDOIShuffleMask(SDNode *N, bool isUnary) {
|
|||
"PPC only supports shuffles by bytes!");
|
||||
|
||||
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
|
||||
|
||||
|
||||
// Find the first non-undef value in the shuffle mask.
|
||||
unsigned i;
|
||||
for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
|
||||
|
@ -611,7 +611,7 @@ bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
|
|||
// This is a splat operation if each element of the permute is the same, and
|
||||
// if the value doesn't reference the second vector.
|
||||
unsigned ElementBase = N->getMaskElt(0);
|
||||
|
||||
|
||||
// FIXME: Handle UNDEF elements too!
|
||||
if (ElementBase >= 16)
|
||||
return false;
|
||||
|
@ -639,7 +639,7 @@ bool PPC::isAllNegativeZeroVector(SDNode *N) {
|
|||
APInt APVal, APUndef;
|
||||
unsigned BitSize;
|
||||
bool HasAnyUndefs;
|
||||
|
||||
|
||||
if (BV->isConstantSplat(APVal, APUndef, BitSize, HasAnyUndefs, 32, true))
|
||||
if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
|
||||
return CFP->getValueAPF().isNegZero();
|
||||
|
@ -1104,10 +1104,10 @@ static bool GetLabelAccessInfo(const TargetMachine &TM, unsigned &HiOpFlags,
|
|||
unsigned &LoOpFlags, const GlobalValue *GV = 0) {
|
||||
HiOpFlags = PPCII::MO_HA16;
|
||||
LoOpFlags = PPCII::MO_LO16;
|
||||
|
||||
|
||||
// Don't use the pic base if not in PIC relocation model. Or if we are on a
|
||||
// non-darwin platform. We don't support PIC on other platforms yet.
|
||||
bool isPIC = TM.getRelocationModel() == Reloc::PIC_ &&
|
||||
bool isPIC = TM.getRelocationModel() == Reloc::PIC_ &&
|
||||
TM.getSubtarget<PPCSubtarget>().isDarwin();
|
||||
if (isPIC) {
|
||||
HiOpFlags |= PPCII::MO_PIC_FLAG;
|
||||
|
@ -1119,13 +1119,13 @@ static bool GetLabelAccessInfo(const TargetMachine &TM, unsigned &HiOpFlags,
|
|||
if (GV && TM.getSubtarget<PPCSubtarget>().hasLazyResolverStub(GV, TM)) {
|
||||
HiOpFlags |= PPCII::MO_NLP_FLAG;
|
||||
LoOpFlags |= PPCII::MO_NLP_FLAG;
|
||||
|
||||
|
||||
if (GV->hasHiddenVisibility()) {
|
||||
HiOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG;
|
||||
LoOpFlags |= PPCII::MO_NLP_HIDDEN_FLAG;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return isPIC;
|
||||
}
|
||||
|
||||
|
@ -1137,12 +1137,12 @@ static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
|
|||
|
||||
SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
|
||||
SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
|
||||
|
||||
|
||||
// With PIC, the first instruction is actually "GR+hi(&G)".
|
||||
if (isPIC)
|
||||
Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
|
||||
DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
|
||||
|
||||
|
||||
// Generate non-pic code that has direct accesses to the constant pool.
|
||||
// The address of the global is just (hi(&g)+lo(&g)).
|
||||
return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
|
||||
|
@ -1166,7 +1166,7 @@ SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
|
|||
SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
|
||||
EVT PtrVT = Op.getValueType();
|
||||
JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
|
||||
|
||||
|
||||
unsigned MOHiFlag, MOLoFlag;
|
||||
bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag);
|
||||
SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
|
||||
|
@ -1180,7 +1180,7 @@ SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
|
|||
DebugLoc DL = Op.getDebugLoc();
|
||||
|
||||
const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
|
||||
|
||||
|
||||
unsigned MOHiFlag, MOLoFlag;
|
||||
bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag);
|
||||
SDValue TgtBAHi = DAG.getBlockAddress(BA, PtrVT, /*isTarget=*/true, MOHiFlag);
|
||||
|
@ -1210,7 +1210,7 @@ SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
|
|||
DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
|
||||
SDValue GALo =
|
||||
DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
|
||||
|
||||
|
||||
SDValue Ptr = LowerLabelRef(GAHi, GALo, isPIC, DAG);
|
||||
|
||||
// If the global reference is actually to a non-lazy-pointer, we have to do an
|
||||
|
@ -1429,7 +1429,7 @@ static bool CC_PPC_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
|
|||
PPC::R7, PPC::R8, PPC::R9, PPC::R10,
|
||||
};
|
||||
const unsigned NumArgRegs = array_lengthof(ArgRegs);
|
||||
|
||||
|
||||
unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs);
|
||||
|
||||
// Skip one register if the first unallocated register has an even register
|
||||
|
@ -1439,7 +1439,7 @@ static bool CC_PPC_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
|
|||
if (RegNum != NumArgRegs && RegNum % 2 == 1) {
|
||||
State.AllocateReg(ArgRegs[RegNum]);
|
||||
}
|
||||
|
||||
|
||||
// Always return false here, as this function only makes sure that the first
|
||||
// unallocated register has an odd register number and does not actually
|
||||
// allocate a register for the current argument.
|
||||
|
@ -1457,7 +1457,7 @@ static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
|
|||
};
|
||||
|
||||
const unsigned NumArgRegs = array_lengthof(ArgRegs);
|
||||
|
||||
|
||||
unsigned RegNum = State.getFirstUnallocated(ArgRegs, NumArgRegs);
|
||||
|
||||
// If there is only one Floating-point register left we need to put both f64
|
||||
|
@ -1465,7 +1465,7 @@ static bool CC_PPC_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
|
|||
if (RegNum != NumArgRegs && ArgRegs[RegNum] == PPC::F8) {
|
||||
State.AllocateReg(ArgRegs[RegNum]);
|
||||
}
|
||||
|
||||
|
||||
// Always return false here, as this function only makes sure that the two f64
|
||||
// values a ppc_fp128 value is split into are both passed in registers or both
|
||||
// passed on the stack and does not actually allocate a register for the
|
||||
|
@ -1550,7 +1550,7 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
|
|||
// Specifications:
|
||||
// System V Application Binary Interface PowerPC Processor Supplement
|
||||
// AltiVec Technology Programming Interface Manual
|
||||
|
||||
|
||||
MachineFunction &MF = DAG.getMachineFunction();
|
||||
MachineFrameInfo *MFI = MF.getFrameInfo();
|
||||
PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
|
||||
|
@ -1569,15 +1569,15 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
|
|||
CCInfo.AllocateStack(PPCFrameInfo::getLinkageSize(false, false), PtrByteSize);
|
||||
|
||||
CCInfo.AnalyzeFormalArguments(Ins, CC_PPC_SVR4);
|
||||
|
||||
|
||||
for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
|
||||
CCValAssign &VA = ArgLocs[i];
|
||||
|
||||
|
||||
// Arguments stored in registers.
|
||||
if (VA.isRegLoc()) {
|
||||
TargetRegisterClass *RC;
|
||||
EVT ValVT = VA.getValVT();
|
||||
|
||||
|
||||
switch (ValVT.getSimpleVT().SimpleTy) {
|
||||
default:
|
||||
llvm_unreachable("ValVT not supported by formal arguments Lowering");
|
||||
|
@ -1597,7 +1597,7 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
|
|||
RC = PPC::VRRCRegisterClass;
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
// Transform the arguments stored in physical registers into virtual ones.
|
||||
unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
|
||||
SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, ValVT);
|
||||
|
@ -1633,7 +1633,7 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
|
|||
|
||||
// Area that is at least reserved in the caller of this function.
|
||||
unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
|
||||
|
||||
|
||||
// Set the size that is at least reserved in caller of this function. Tail
|
||||
// call optimized function's reserved stack space needs to be aligned so that
|
||||
// taking the difference between two stack areas will result in an aligned
|
||||
|
@ -1643,16 +1643,16 @@ PPCTargetLowering::LowerFormalArguments_SVR4(
|
|||
MinReservedArea =
|
||||
std::max(MinReservedArea,
|
||||
PPCFrameInfo::getMinCallFrameSize(false, false));
|
||||
|
||||
|
||||
unsigned TargetAlign = DAG.getMachineFunction().getTarget().getFrameInfo()->
|
||||
getStackAlignment();
|
||||
unsigned AlignMask = TargetAlign-1;
|
||||
MinReservedArea = (MinReservedArea + AlignMask) & ~AlignMask;
|
||||
|
||||
|
||||
FI->setMinReservedArea(MinReservedArea);
|
||||
|
||||
SmallVector<SDValue, 8> MemOps;
|
||||
|
||||
|
||||
// If the function takes variable number of arguments, make a frame index for
|
||||
// the start of the first vararg value... for expansion of llvm.va_start.
|
||||
if (isVarArg) {
|
||||
|
@ -1883,9 +1883,9 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
|
|||
MemOps.push_back(Store);
|
||||
++GPR_idx;
|
||||
}
|
||||
|
||||
|
||||
ArgOffset += PtrByteSize;
|
||||
|
||||
|
||||
continue;
|
||||
}
|
||||
for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
|
||||
|
@ -2064,7 +2064,7 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
|
|||
// result of va_next.
|
||||
for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
|
||||
unsigned VReg;
|
||||
|
||||
|
||||
if (isPPC64)
|
||||
VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
|
||||
else
|
||||
|
@ -2331,7 +2331,7 @@ SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG,
|
|||
LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo(),
|
||||
false, false, 0);
|
||||
Chain = SDValue(LROpOut.getNode(), 1);
|
||||
|
||||
|
||||
// When using the 32/64-bit SVR4 ABI there is no need to load the FP stack
|
||||
// slot as the FP is never overwritten.
|
||||
if (isDarwinABI) {
|
||||
|
@ -2421,7 +2421,7 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
|
|||
SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
|
||||
SmallVector<SDValue, 8> &Ops, std::vector<EVT> &NodeTys,
|
||||
const PPCSubtarget &PPCSubTarget) {
|
||||
|
||||
|
||||
bool isPPC64 = PPCSubTarget.isPPC64();
|
||||
bool isSVR4ABI = PPCSubTarget.isSVR4ABI();
|
||||
|
||||
|
@ -2437,7 +2437,7 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
|
|||
Callee = SDValue(Dest, 0);
|
||||
needIndirectCall = false;
|
||||
}
|
||||
|
||||
|
||||
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
|
||||
// XXX Work around for http://llvm.org/bugs/show_bug.cgi?id=5201
|
||||
// Use indirect calls for ALL functions calls in JIT mode, since the
|
||||
|
@ -2453,7 +2453,7 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
|
|||
// automatically synthesizes these stubs.
|
||||
OpFlags = PPCII::MO_DARWIN_STUB;
|
||||
}
|
||||
|
||||
|
||||
// If the callee is a GlobalAddress/ExternalSymbol node (quite common,
|
||||
// every direct call is) turn it into a TargetGlobalAddress /
|
||||
// TargetExternalSymbol node so that legalize doesn't hack it.
|
||||
|
@ -2461,12 +2461,12 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
|
|||
Callee.getValueType(),
|
||||
0, OpFlags);
|
||||
needIndirectCall = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
|
||||
unsigned char OpFlags = 0;
|
||||
|
||||
|
||||
if (DAG.getTarget().getRelocationModel() != Reloc::Static &&
|
||||
PPCSubTarget.getDarwinVers() < 9) {
|
||||
// PC-relative references to external symbols should go through $stub,
|
||||
|
@ -2474,12 +2474,12 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &Callee, SDValue &InFlag,
|
|||
// automatically synthesizes these stubs.
|
||||
OpFlags = PPCII::MO_DARWIN_STUB;
|
||||
}
|
||||
|
||||
|
||||
Callee = DAG.getTargetExternalSymbol(S->getSymbol(), Callee.getValueType(),
|
||||
OpFlags);
|
||||
needIndirectCall = false;
|
||||
}
|
||||
|
||||
|
||||
if (needIndirectCall) {
|
||||
// Otherwise, this is an indirect call. We have to use a MTCTR/BCTRL pair
|
||||
// to do the call, we can't use PPCISD::CALL.
|
||||
|
@ -2750,7 +2750,7 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
|
|||
// in this function's (MF) stack pointer stack slot 0(SP).
|
||||
if (GuaranteedTailCallOpt && CallConv==CallingConv::Fast)
|
||||
MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
|
||||
|
||||
|
||||
// Count how many bytes are to be pushed on the stack, including the linkage
|
||||
// area, parameter list area and the part of the local variable space which
|
||||
// contains copies of aggregates which are passed by value.
|
||||
|
@ -2768,12 +2768,12 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
|
|||
// Fixed vector arguments go into registers as long as registers are
|
||||
// available. Variable vector arguments always go into memory.
|
||||
unsigned NumArgs = Outs.size();
|
||||
|
||||
|
||||
for (unsigned i = 0; i != NumArgs; ++i) {
|
||||
MVT ArgVT = Outs[i].VT;
|
||||
ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
|
||||
bool Result;
|
||||
|
||||
|
||||
if (Outs[i].IsFixed) {
|
||||
Result = CC_PPC_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
|
||||
CCInfo);
|
||||
|
@ -2781,7 +2781,7 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
|
|||
Result = CC_PPC_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full,
|
||||
ArgFlags, CCInfo);
|
||||
}
|
||||
|
||||
|
||||
if (Result) {
|
||||
#ifndef NDEBUG
|
||||
errs() << "Call operand #" << i << " has unhandled type "
|
||||
|
@ -2794,7 +2794,7 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
|
|||
// All arguments are treated the same.
|
||||
CCInfo.AnalyzeCallOperands(Outs, CC_PPC_SVR4);
|
||||
}
|
||||
|
||||
|
||||
// Assign locations to all of the outgoing aggregate by value arguments.
|
||||
SmallVector<CCValAssign, 16> ByValArgLocs;
|
||||
CCState CCByValInfo(CallConv, isVarArg, getTargetMachine(), ByValArgLocs,
|
||||
|
@ -2809,7 +2809,7 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
|
|||
// space variable where copies of aggregates which are passed by value are
|
||||
// stored.
|
||||
unsigned NumBytes = CCByValInfo.getNextStackOffset();
|
||||
|
||||
|
||||
// Calculate by how many bytes the stack has to be adjusted in case of tail
|
||||
// call optimization.
|
||||
int SPDiff = CalculateTailCallSPDiff(DAG, isTailCall, NumBytes);
|
||||
|
@ -2829,7 +2829,7 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
|
|||
// arguments that may not fit in the registers available for argument
|
||||
// passing.
|
||||
SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
|
||||
|
||||
|
||||
SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
|
||||
SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
|
||||
SmallVector<SDValue, 8> MemOpChains;
|
||||
|
@ -2841,7 +2841,7 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
|
|||
CCValAssign &VA = ArgLocs[i];
|
||||
SDValue Arg = OutVals[i];
|
||||
ISD::ArgFlagsTy Flags = Outs[i].Flags;
|
||||
|
||||
|
||||
if (Flags.isByVal()) {
|
||||
// Argument is an aggregate which is passed by value, thus we need to
|
||||
// create a copy of it in the local variable space of the current stack
|
||||
|
@ -2850,33 +2850,33 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
|
|||
assert((j < ByValArgLocs.size()) && "Index out of bounds!");
|
||||
CCValAssign &ByValVA = ByValArgLocs[j++];
|
||||
assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!");
|
||||
|
||||
|
||||
// Memory reserved in the local variable space of the callers stack frame.
|
||||
unsigned LocMemOffset = ByValVA.getLocMemOffset();
|
||||
|
||||
|
||||
SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
|
||||
PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
|
||||
|
||||
|
||||
// Create a copy of the argument in the local area of the current
|
||||
// stack frame.
|
||||
SDValue MemcpyCall =
|
||||
CreateCopyOfByValArgument(Arg, PtrOff,
|
||||
CallSeqStart.getNode()->getOperand(0),
|
||||
Flags, DAG, dl);
|
||||
|
||||
|
||||
// This must go outside the CALLSEQ_START..END.
|
||||
SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall,
|
||||
CallSeqStart.getNode()->getOperand(1));
|
||||
DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
|
||||
NewCallSeqStart.getNode());
|
||||
Chain = CallSeqStart = NewCallSeqStart;
|
||||
|
||||
|
||||
// Pass the address of the aggregate copy on the stack either in a
|
||||
// physical register or in the parameter list area of the current stack
|
||||
// frame to the callee.
|
||||
Arg = PtrOff;
|
||||
}
|
||||
|
||||
|
||||
if (VA.isRegLoc()) {
|
||||
// Put argument in a physical register.
|
||||
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
|
||||
|
@ -2899,11 +2899,11 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (!MemOpChains.empty())
|
||||
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
|
||||
&MemOpChains[0], MemOpChains.size());
|
||||
|
||||
|
||||
// Build a sequence of copy-to-reg nodes chained together with token chain
|
||||
// and flag operands which copy the outgoing args into the appropriate regs.
|
||||
SDValue InFlag;
|
||||
|
@ -2912,7 +2912,7 @@ PPCTargetLowering::LowerCall_SVR4(SDValue Chain, SDValue Callee,
|
|||
RegsToPass[i].second, InFlag);
|
||||
InFlag = Chain.getValue(1);
|
||||
}
|
||||
|
||||
|
||||
// Set CR6 to true if this is a vararg call.
|
||||
if (isVarArg) {
|
||||
SDValue SetCR(DAG.getMachineNode(PPC::CRSET, dl, MVT::i32), 0);
|
||||
|
@ -3187,7 +3187,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
|
|||
MachinePointerInfo(), false, false, 0);
|
||||
MemOpChains.push_back(Store);
|
||||
if (VR_idx != NumVRs) {
|
||||
SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff,
|
||||
SDValue Load = DAG.getLoad(MVT::v4f32, dl, Store, PtrOff,
|
||||
MachinePointerInfo(),
|
||||
false, false, 0);
|
||||
MemOpChains.push_back(Load.getValue(1));
|
||||
|
@ -3272,7 +3272,7 @@ PPCTargetLowering::LowerCall_Darwin(SDValue Chain, SDValue Callee,
|
|||
// On Darwin, R12 must contain the address of an indirect callee. This does
|
||||
// not mean the MTCTR instruction must use R12; it's easier to model this as
|
||||
// an extra parameter, so do that.
|
||||
if (!isTailCall &&
|
||||
if (!isTailCall &&
|
||||
!dyn_cast<GlobalAddressSDNode>(Callee) &&
|
||||
!dyn_cast<ExternalSymbolSDNode>(Callee) &&
|
||||
!isBLACompatibleAddress(Callee, DAG))
|
||||
|
@ -3523,7 +3523,7 @@ SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
|
|||
default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
|
||||
case MVT::i32:
|
||||
Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIWZ :
|
||||
PPCISD::FCTIDZ,
|
||||
PPCISD::FCTIDZ,
|
||||
dl, MVT::f64, Src);
|
||||
break;
|
||||
case MVT::i64:
|
||||
|
@ -3555,8 +3555,7 @@ SDValue PPCTargetLowering::LowerSINT_TO_FP(SDValue Op,
|
|||
return SDValue();
|
||||
|
||||
if (Op.getOperand(0).getValueType() == MVT::i64) {
|
||||
SDValue Bits = DAG.getNode(ISD::BIT_CONVERT, dl,
|
||||
MVT::f64, Op.getOperand(0));
|
||||
SDValue Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op.getOperand(0));
|
||||
SDValue FP = DAG.getNode(PPCISD::FCFID, dl, MVT::f64, Bits);
|
||||
if (Op.getValueType() == MVT::f32)
|
||||
FP = DAG.getNode(ISD::FP_ROUND, dl,
|
||||
|
@ -3777,7 +3776,7 @@ static SDValue BuildSplatI(int Val, unsigned SplatSize, EVT VT,
|
|||
Ops.assign(CanonicalVT.getVectorNumElements(), Elt);
|
||||
SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, dl, CanonicalVT,
|
||||
&Ops[0], Ops.size());
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, ReqVT, Res);
|
||||
return DAG.getNode(ISD::BITCAST, dl, ReqVT, Res);
|
||||
}
|
||||
|
||||
/// BuildIntrinsicOp - Return a binary operator intrinsic node with the
|
||||
|
@ -3806,14 +3805,14 @@ static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
|
|||
static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt,
|
||||
EVT VT, SelectionDAG &DAG, DebugLoc dl) {
|
||||
// Force LHS/RHS to be the right type.
|
||||
LHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, LHS);
|
||||
RHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, RHS);
|
||||
LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
|
||||
RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
|
||||
|
||||
int Ops[16];
|
||||
for (unsigned i = 0; i != 16; ++i)
|
||||
Ops[i] = i + Amt;
|
||||
SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, T);
|
||||
return DAG.getNode(ISD::BITCAST, dl, VT, T);
|
||||
}
|
||||
|
||||
// If this is a case we can't handle, return null and let the default
|
||||
|
@ -3847,7 +3846,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
|
|||
if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
|
||||
SDValue Z = DAG.getConstant(0, MVT::i32);
|
||||
Z = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Z, Z, Z, Z);
|
||||
Op = DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Z);
|
||||
Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
|
||||
}
|
||||
return Op;
|
||||
}
|
||||
|
@ -3866,7 +3865,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
|
|||
if (SextVal >= -32 && SextVal <= 30 && (SextVal & 1) == 0) {
|
||||
SDValue Res = BuildSplatI(SextVal >> 1, SplatSize, MVT::Other, DAG, dl);
|
||||
Res = DAG.getNode(ISD::ADD, dl, Res.getValueType(), Res, Res);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
|
||||
return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
|
||||
}
|
||||
|
||||
// If this is 0x8000_0000 x 4, turn into vspltisw + vslw. If it is
|
||||
|
@ -3882,7 +3881,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
|
|||
|
||||
// xor by OnesV to invert it.
|
||||
Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
|
||||
return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
|
||||
}
|
||||
|
||||
// Check to see if this is a wide variety of vsplti*, binop self cases.
|
||||
|
@ -3908,7 +3907,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
|
|||
Intrinsic::ppc_altivec_vslw
|
||||
};
|
||||
Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
|
||||
return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
|
||||
}
|
||||
|
||||
// vsplti + srl self.
|
||||
|
@ -3919,7 +3918,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
|
|||
Intrinsic::ppc_altivec_vsrw
|
||||
};
|
||||
Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
|
||||
return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
|
||||
}
|
||||
|
||||
// vsplti + sra self.
|
||||
|
@ -3930,7 +3929,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
|
|||
Intrinsic::ppc_altivec_vsraw
|
||||
};
|
||||
Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
|
||||
return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
|
||||
}
|
||||
|
||||
// vsplti + rol self.
|
||||
|
@ -3942,7 +3941,7 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
|
|||
Intrinsic::ppc_altivec_vrlw
|
||||
};
|
||||
Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Res);
|
||||
return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
|
||||
}
|
||||
|
||||
// t = vsplti c, result = vsldoi t, t, 1
|
||||
|
@ -3969,14 +3968,14 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
|
|||
SDValue LHS = BuildSplatI(SextVal-16, SplatSize, MVT::Other, DAG, dl);
|
||||
SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG, dl);
|
||||
LHS = DAG.getNode(ISD::SUB, dl, LHS.getValueType(), LHS, RHS);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), LHS);
|
||||
return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), LHS);
|
||||
}
|
||||
// Odd, in range [-31,-17]: (vsplti C)+(vsplti -16).
|
||||
if (SextVal >= -31 && SextVal <= 0) {
|
||||
SDValue LHS = BuildSplatI(SextVal+16, SplatSize, MVT::Other, DAG, dl);
|
||||
SDValue RHS = BuildSplatI(-16, SplatSize, MVT::Other, DAG, dl);
|
||||
LHS = DAG.getNode(ISD::ADD, dl, LHS.getValueType(), LHS, RHS);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), LHS);
|
||||
return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), LHS);
|
||||
}
|
||||
|
||||
return SDValue();
|
||||
|
@ -4053,10 +4052,10 @@ static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
|
|||
return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
|
||||
}
|
||||
EVT VT = OpLHS.getValueType();
|
||||
OpLHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, OpLHS);
|
||||
OpRHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, OpRHS);
|
||||
OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
|
||||
OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
|
||||
SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, T);
|
||||
return DAG.getNode(ISD::BITCAST, dl, VT, T);
|
||||
}
|
||||
|
||||
/// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE. If this
|
||||
|
@ -4109,7 +4108,7 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
|
|||
// perfect shuffle table to emit an optimal matching sequence.
|
||||
SmallVector<int, 16> PermMask;
|
||||
SVOp->getMask(PermMask);
|
||||
|
||||
|
||||
unsigned PFIndexes[4];
|
||||
bool isFourElementShuffle = true;
|
||||
for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
|
||||
|
@ -4244,7 +4243,7 @@ SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
|
|||
SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
|
||||
Op.getOperand(1), Op.getOperand(2),
|
||||
DAG.getConstant(CompareOpc, MVT::i32));
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Tmp);
|
||||
return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp);
|
||||
}
|
||||
|
||||
// Create the PPCISD altivec 'dot' comparison node.
|
||||
|
@ -4327,9 +4326,9 @@ SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
|
|||
BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
|
||||
|
||||
// Shrinkify inputs to v8i16.
|
||||
LHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, LHS);
|
||||
RHS = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, RHS);
|
||||
RHSSwap = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, RHSSwap);
|
||||
LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS);
|
||||
RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS);
|
||||
RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap);
|
||||
|
||||
// Low parts multiplied together, generating 32-bit results (we ignore the
|
||||
// top parts).
|
||||
|
@ -4355,12 +4354,12 @@ SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
|
|||
// Multiply the even 8-bit parts, producing 16-bit sums.
|
||||
SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
|
||||
LHS, RHS, DAG, dl, MVT::v8i16);
|
||||
EvenParts = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, EvenParts);
|
||||
EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts);
|
||||
|
||||
// Multiply the odd 8-bit parts, producing 16-bit sums.
|
||||
SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
|
||||
LHS, RHS, DAG, dl, MVT::v8i16);
|
||||
OddParts = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, OddParts);
|
||||
OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
|
||||
|
||||
// Merge the results together.
|
||||
int Ops[16];
|
||||
|
@ -5568,7 +5567,7 @@ SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
|
|||
if (Depth > 0) {
|
||||
SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
|
||||
SDValue Offset =
|
||||
|
||||
|
||||
DAG.getConstant(PPCFrameInfo::getReturnSaveOffset(isPPC64, isDarwinABI),
|
||||
isPPC64? MVT::i64 : MVT::i32);
|
||||
return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
|
||||
|
|
|
@ -285,8 +285,8 @@ including having this work sanely.
|
|||
Fix Darwin FP-In-Integer Registers ABI
|
||||
|
||||
Darwin passes doubles in structures in integer registers, which is very very
|
||||
bad. Add something like a BIT_CONVERT to LLVM, then do an i-p transformation
|
||||
that percolates these things out of functions.
|
||||
bad. Add something like a BITCAST to LLVM, then do an i-p transformation that
|
||||
percolates these things out of functions.
|
||||
|
||||
Check out how horrible this is:
|
||||
http://gcc.gnu.org/ml/gcc/2005-10/msg01036.html
|
||||
|
|
|
@ -66,7 +66,7 @@ SparcTargetLowering::LowerReturn(SDValue Chain,
|
|||
CCValAssign &VA = RVLocs[i];
|
||||
assert(VA.isRegLoc() && "Can only return in registers!");
|
||||
|
||||
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
|
||||
Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
|
||||
OutVals[i], Flag);
|
||||
|
||||
// Guarantee that all emitted copies are stuck together with flags.
|
||||
|
@ -166,7 +166,7 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
|
|||
MF.getRegInfo().addLiveIn(*CurArgReg++, VReg);
|
||||
SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
|
||||
|
||||
Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Arg);
|
||||
Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg);
|
||||
InVals.push_back(Arg);
|
||||
} else {
|
||||
int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset,
|
||||
|
@ -219,7 +219,7 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
|
|||
|
||||
// If we want a double, do a bit convert.
|
||||
if (ObjectVT == MVT::f64)
|
||||
WholeValue = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, WholeValue);
|
||||
WholeValue = DAG.getNode(ISD::BITCAST, dl, MVT::f64, WholeValue);
|
||||
|
||||
InVals.push_back(WholeValue);
|
||||
}
|
||||
|
@ -383,7 +383,7 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
|||
ValToStore = Val;
|
||||
} else {
|
||||
// Convert this to a FP value in an int reg.
|
||||
Val = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Val);
|
||||
Val = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Val);
|
||||
RegsToPass.push_back(std::make_pair(ArgRegs[RegsToPass.size()], Val));
|
||||
}
|
||||
break;
|
||||
|
@ -397,7 +397,7 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
|||
// Break into top and bottom parts by storing to the stack and loading
|
||||
// out the parts as integers. Top part goes in a reg.
|
||||
SDValue StackPtr = DAG.CreateStackTemporary(MVT::f64, MVT::i32);
|
||||
SDValue Store = DAG.getStore(DAG.getEntryNode(), dl,
|
||||
SDValue Store = DAG.getStore(DAG.getEntryNode(), dl,
|
||||
Val, StackPtr, MachinePointerInfo(),
|
||||
false, false, 0);
|
||||
// Sparc is big-endian, so the high part comes first.
|
||||
|
@ -450,7 +450,7 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
|||
SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
|
||||
SDValue PtrOff = DAG.getConstant(ArgOffset, MVT::i32);
|
||||
PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
|
||||
MemOpChains.push_back(DAG.getStore(Chain, dl, ValToStore,
|
||||
MemOpChains.push_back(DAG.getStore(Chain, dl, ValToStore,
|
||||
PtrOff, MachinePointerInfo(),
|
||||
false, false, 0));
|
||||
}
|
||||
|
@ -612,8 +612,8 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM)
|
|||
setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
|
||||
setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
|
||||
|
||||
setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand);
|
||||
setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand);
|
||||
setOperationAction(ISD::BITCAST, MVT::f32, Expand);
|
||||
setOperationAction(ISD::BITCAST, MVT::i32, Expand);
|
||||
|
||||
// Sparc has no select or setcc: expand to SELECT_CC.
|
||||
setOperationAction(ISD::SELECT, MVT::i32, Expand);
|
||||
|
@ -758,7 +758,7 @@ static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
|
|||
}
|
||||
}
|
||||
|
||||
SDValue SparcTargetLowering::LowerGlobalAddress(SDValue Op,
|
||||
SDValue SparcTargetLowering::LowerGlobalAddress(SDValue Op,
|
||||
SelectionDAG &DAG) const {
|
||||
const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
|
||||
// FIXME there isn't really any debug info here
|
||||
|
@ -767,15 +767,15 @@ SDValue SparcTargetLowering::LowerGlobalAddress(SDValue Op,
|
|||
SDValue Hi = DAG.getNode(SPISD::Hi, dl, MVT::i32, GA);
|
||||
SDValue Lo = DAG.getNode(SPISD::Lo, dl, MVT::i32, GA);
|
||||
|
||||
if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
|
||||
if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
|
||||
return DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi);
|
||||
|
||||
|
||||
SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, dl,
|
||||
getPointerTy());
|
||||
SDValue RelAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi);
|
||||
SDValue AbsAddr = DAG.getNode(ISD::ADD, dl, MVT::i32,
|
||||
SDValue AbsAddr = DAG.getNode(ISD::ADD, dl, MVT::i32,
|
||||
GlobalBase, RelAddr);
|
||||
return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
|
||||
return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
|
||||
AbsAddr, MachinePointerInfo(), false, false, 0);
|
||||
}
|
||||
|
||||
|
@ -788,15 +788,15 @@ SDValue SparcTargetLowering::LowerConstantPool(SDValue Op,
|
|||
SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment());
|
||||
SDValue Hi = DAG.getNode(SPISD::Hi, dl, MVT::i32, CP);
|
||||
SDValue Lo = DAG.getNode(SPISD::Lo, dl, MVT::i32, CP);
|
||||
if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
|
||||
if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
|
||||
return DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi);
|
||||
|
||||
SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, dl,
|
||||
SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, dl,
|
||||
getPointerTy());
|
||||
SDValue RelAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi);
|
||||
SDValue AbsAddr = DAG.getNode(ISD::ADD, dl, MVT::i32,
|
||||
GlobalBase, RelAddr);
|
||||
return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
|
||||
return DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(),
|
||||
AbsAddr, MachinePointerInfo(), false, false, 0);
|
||||
}
|
||||
|
||||
|
@ -805,13 +805,13 @@ static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) {
|
|||
// Convert the fp value to integer in an FP register.
|
||||
assert(Op.getValueType() == MVT::i32);
|
||||
Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
|
||||
return DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
|
||||
}
|
||||
|
||||
static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
|
||||
DebugLoc dl = Op.getDebugLoc();
|
||||
assert(Op.getOperand(0).getValueType() == MVT::i32);
|
||||
SDValue Tmp = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Op.getOperand(0));
|
||||
SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Op.getOperand(0));
|
||||
// Convert the int value to FP in an FP register.
|
||||
return DAG.getNode(SPISD::ITOF, dl, Op.getValueType(), Tmp);
|
||||
}
|
||||
|
@ -925,7 +925,7 @@ static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) {
|
|||
|
||||
// Bit-Convert the value to f64.
|
||||
SDValue Ops[2] = {
|
||||
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, V),
|
||||
DAG.getNode(ISD::BITCAST, dl, MVT::f64, V),
|
||||
V.getValue(1)
|
||||
};
|
||||
return DAG.getMergeValues(Ops, 2, dl);
|
||||
|
|
|
@ -147,8 +147,8 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm) :
|
|||
setOperationAction(ISD::FREM, MVT::f64, Expand);
|
||||
|
||||
// We have only 64-bit bitconverts
|
||||
setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand);
|
||||
setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand);
|
||||
setOperationAction(ISD::BITCAST, MVT::f32, Expand);
|
||||
setOperationAction(ISD::BITCAST, MVT::i32, Expand);
|
||||
|
||||
setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
|
||||
setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
|
||||
|
|
|
@ -36,7 +36,7 @@
|
|||
using namespace llvm;
|
||||
|
||||
namespace {
|
||||
|
||||
|
||||
class X86FastISel : public FastISel {
|
||||
/// Subtarget - Keep a pointer to the X86Subtarget around so that we can
|
||||
/// make the right decision when generating code for different targets.
|
||||
|
@ -46,7 +46,7 @@ class X86FastISel : public FastISel {
|
|||
///
|
||||
unsigned StackPtr;
|
||||
|
||||
/// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
|
||||
/// X86ScalarSSEf32, X86ScalarSSEf64 - Select between SSE or x87
|
||||
/// floating point ops.
|
||||
/// When SSE is available, use it for f32 operations.
|
||||
/// When SSE2 is available, use it for f64 operations.
|
||||
|
@ -69,12 +69,12 @@ public:
|
|||
/// possible.
|
||||
virtual bool TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
|
||||
const LoadInst *LI);
|
||||
|
||||
|
||||
#include "X86GenFastISel.inc"
|
||||
|
||||
private:
|
||||
bool X86FastEmitCompare(const Value *LHS, const Value *RHS, EVT VT);
|
||||
|
||||
|
||||
bool X86FastEmitLoad(EVT VT, const X86AddressMode &AM, unsigned &RR);
|
||||
|
||||
bool X86FastEmitStore(EVT VT, const Value *Val,
|
||||
|
@ -84,12 +84,12 @@ private:
|
|||
|
||||
bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT,
|
||||
unsigned &ResultReg);
|
||||
|
||||
|
||||
bool X86SelectAddress(const Value *V, X86AddressMode &AM);
|
||||
bool X86SelectCallAddress(const Value *V, X86AddressMode &AM);
|
||||
|
||||
bool X86SelectLoad(const Instruction *I);
|
||||
|
||||
|
||||
bool X86SelectStore(const Instruction *I);
|
||||
|
||||
bool X86SelectRet(const Instruction *I);
|
||||
|
@ -105,7 +105,7 @@ private:
|
|||
bool X86SelectSelect(const Instruction *I);
|
||||
|
||||
bool X86SelectTrunc(const Instruction *I);
|
||||
|
||||
|
||||
bool X86SelectFPExt(const Instruction *I);
|
||||
bool X86SelectFPTrunc(const Instruction *I);
|
||||
|
||||
|
@ -134,7 +134,7 @@ private:
|
|||
|
||||
bool isTypeLegal(const Type *Ty, MVT &VT, bool AllowI1 = false);
|
||||
};
|
||||
|
||||
|
||||
} // end anonymous namespace.
|
||||
|
||||
bool X86FastISel::isTypeLegal(const Type *Ty, MVT &VT, bool AllowI1) {
|
||||
|
@ -250,7 +250,7 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val,
|
|||
Opc = Subtarget->hasSSE2() ? X86::MOVSDmr : X86::ST_Fp64m;
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
|
||||
DL, TII.get(Opc)), AM).addReg(Val);
|
||||
return true;
|
||||
|
@ -261,7 +261,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
|
|||
// Handle 'null' like i32/i64 0.
|
||||
if (isa<ConstantPointerNull>(Val))
|
||||
Val = Constant::getNullValue(TD.getIntPtrType(Val->getContext()));
|
||||
|
||||
|
||||
// If this is a store of a simple constant, fold the constant into the store.
|
||||
if (const ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
|
||||
unsigned Opc = 0;
|
||||
|
@ -278,7 +278,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
|
|||
Opc = X86::MOV64mi32;
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
if (Opc) {
|
||||
addFullAddress(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
|
||||
DL, TII.get(Opc)), AM)
|
||||
|
@ -287,11 +287,11 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
|
|||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
unsigned ValReg = getRegForValue(Val);
|
||||
if (ValReg == 0)
|
||||
return false;
|
||||
|
||||
return false;
|
||||
|
||||
return X86FastEmitStore(VT, ValReg, AM);
|
||||
}
|
||||
|
||||
|
@ -303,7 +303,7 @@ bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT,
|
|||
unsigned &ResultReg) {
|
||||
unsigned RR = FastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc,
|
||||
Src, /*TODO: Kill=*/false);
|
||||
|
||||
|
||||
if (RR != 0) {
|
||||
ResultReg = RR;
|
||||
return true;
|
||||
|
@ -438,7 +438,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
|
|||
AM.Disp = (uint32_t)Disp;
|
||||
if (X86SelectAddress(U->getOperand(0), AM))
|
||||
return true;
|
||||
|
||||
|
||||
// If we couldn't merge the sub value into this addr mode, revert back to
|
||||
// our address and just match the value instead of completely failing.
|
||||
AM = SavedAM;
|
||||
|
@ -467,7 +467,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
|
|||
|
||||
// Okay, we've committed to selecting this global. Set up the basic address.
|
||||
AM.GV = GV;
|
||||
|
||||
|
||||
// Allow the subtarget to classify the global.
|
||||
unsigned char GVFlags = Subtarget->ClassifyGlobalReference(GV, TM);
|
||||
|
||||
|
@ -476,7 +476,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
|
|||
// FIXME: How do we know Base.Reg is free??
|
||||
AM.Base.Reg = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
|
||||
}
|
||||
|
||||
|
||||
// Unless the ABI requires an extra load, return a direct reference to
|
||||
// the global.
|
||||
if (!isGlobalStubReference(GVFlags)) {
|
||||
|
@ -489,7 +489,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
|
|||
AM.GVOpFlags = GVFlags;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
// Ok, we need to do a load from a stub. If we've already loaded from this
|
||||
// stub, reuse the loaded pointer, otherwise emit the load now.
|
||||
DenseMap<const Value*, unsigned>::iterator I = LocalValueMap.find(V);
|
||||
|
@ -511,14 +511,14 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
|
|||
if (TLI.getPointerTy() == MVT::i64) {
|
||||
Opc = X86::MOV64rm;
|
||||
RC = X86::GR64RegisterClass;
|
||||
|
||||
|
||||
if (Subtarget->isPICStyleRIPRel())
|
||||
StubAM.Base.Reg = X86::RIP;
|
||||
} else {
|
||||
Opc = X86::MOV32rm;
|
||||
RC = X86::GR32RegisterClass;
|
||||
}
|
||||
|
||||
|
||||
LoadReg = createResultReg(RC);
|
||||
MachineInstrBuilder LoadMI =
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(Opc), LoadReg);
|
||||
|
@ -530,7 +530,7 @@ bool X86FastISel::X86SelectAddress(const Value *V, X86AddressMode &AM) {
|
|||
// Prevent loading GV stub multiple times in same MBB.
|
||||
LocalValueMap[V] = LoadReg;
|
||||
}
|
||||
|
||||
|
||||
// Now construct the final address. Note that the Disp, Scale,
|
||||
// and Index values may already be set here.
|
||||
AM.Base.Reg = LoadReg;
|
||||
|
@ -604,7 +604,7 @@ bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) {
|
|||
|
||||
// Okay, we've committed to selecting this global. Set up the basic address.
|
||||
AM.GV = GV;
|
||||
|
||||
|
||||
// No ABI requires an extra load for anything other than DLLImport, which
|
||||
// we rejected above. Return a direct reference to the global.
|
||||
if (Subtarget->isPICStyleRIPRel()) {
|
||||
|
@ -617,7 +617,7 @@ bool X86FastISel::X86SelectCallAddress(const Value *V, X86AddressMode &AM) {
|
|||
} else if (Subtarget->isPICStyleGOT()) {
|
||||
AM.GVOpFlags = X86II::MO_GOTOFF;
|
||||
}
|
||||
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -702,7 +702,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
|
|||
return false;
|
||||
|
||||
CCValAssign &VA = ValLocs[0];
|
||||
|
||||
|
||||
// Don't bother handling odd stuff for now.
|
||||
if (VA.getLocInfo() != CCValAssign::Full)
|
||||
return false;
|
||||
|
@ -792,11 +792,11 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1,
|
|||
EVT VT) {
|
||||
unsigned Op0Reg = getRegForValue(Op0);
|
||||
if (Op0Reg == 0) return false;
|
||||
|
||||
|
||||
// Handle 'null' like i32/i64 0.
|
||||
if (isa<ConstantPointerNull>(Op1))
|
||||
Op1 = Constant::getNullValue(TD.getIntPtrType(Op0->getContext()));
|
||||
|
||||
|
||||
// We have two options: compare with register or immediate. If the RHS of
|
||||
// the compare is an immediate that we can fold into this compare, use
|
||||
// CMPri, otherwise use CMPrr.
|
||||
|
@ -808,16 +808,16 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1,
|
|||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
unsigned CompareOpc = X86ChooseCmpOpcode(VT, Subtarget);
|
||||
if (CompareOpc == 0) return false;
|
||||
|
||||
|
||||
unsigned Op1Reg = getRegForValue(Op1);
|
||||
if (Op1Reg == 0) return false;
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CompareOpc))
|
||||
.addReg(Op0Reg)
|
||||
.addReg(Op1Reg);
|
||||
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -835,13 +835,13 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
|
|||
case CmpInst::FCMP_OEQ: {
|
||||
if (!X86FastEmitCompare(CI->getOperand(0), CI->getOperand(1), VT))
|
||||
return false;
|
||||
|
||||
|
||||
unsigned EReg = createResultReg(&X86::GR8RegClass);
|
||||
unsigned NPReg = createResultReg(&X86::GR8RegClass);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(X86::SETEr), EReg);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
||||
TII.get(X86::SETNPr), NPReg);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
||||
TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg);
|
||||
UpdateValueMap(I, ResultReg);
|
||||
return true;
|
||||
|
@ -874,7 +874,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
|
|||
case CmpInst::FCMP_UGE: SwapArgs = true; SetCCOpc = X86::SETBEr; break;
|
||||
case CmpInst::FCMP_ULT: SwapArgs = false; SetCCOpc = X86::SETBr; break;
|
||||
case CmpInst::FCMP_ULE: SwapArgs = false; SetCCOpc = X86::SETBEr; break;
|
||||
|
||||
|
||||
case CmpInst::ICMP_EQ: SwapArgs = false; SetCCOpc = X86::SETEr; break;
|
||||
case CmpInst::ICMP_NE: SwapArgs = false; SetCCOpc = X86::SETNEr; break;
|
||||
case CmpInst::ICMP_UGT: SwapArgs = false; SetCCOpc = X86::SETAr; break;
|
||||
|
@ -896,7 +896,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
|
|||
// Emit a compare of Op0/Op1.
|
||||
if (!X86FastEmitCompare(Op0, Op1, VT))
|
||||
return false;
|
||||
|
||||
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(SetCCOpc), ResultReg);
|
||||
UpdateValueMap(I, ResultReg);
|
||||
return true;
|
||||
|
@ -961,7 +961,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
|
|||
case CmpInst::FCMP_UGE: SwapArgs = true; BranchOpc = X86::JBE_4; break;
|
||||
case CmpInst::FCMP_ULT: SwapArgs = false; BranchOpc = X86::JB_4; break;
|
||||
case CmpInst::FCMP_ULE: SwapArgs = false; BranchOpc = X86::JBE_4; break;
|
||||
|
||||
|
||||
case CmpInst::ICMP_EQ: SwapArgs = false; BranchOpc = X86::JE_4; break;
|
||||
case CmpInst::ICMP_NE: SwapArgs = false; BranchOpc = X86::JNE_4; break;
|
||||
case CmpInst::ICMP_UGT: SwapArgs = false; BranchOpc = X86::JA_4; break;
|
||||
|
@ -975,7 +975,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
|
|||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
const Value *Op0 = CI->getOperand(0), *Op1 = CI->getOperand(1);
|
||||
if (SwapArgs)
|
||||
std::swap(Op0, Op1);
|
||||
|
@ -983,7 +983,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
|
|||
// Emit a compare of the LHS and RHS, setting the flags.
|
||||
if (!X86FastEmitCompare(Op0, Op1, VT))
|
||||
return false;
|
||||
|
||||
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(BranchOpc))
|
||||
.addMBB(TrueMBB);
|
||||
|
||||
|
@ -1119,16 +1119,16 @@ bool X86FastISel::X86SelectShift(const Instruction *I) {
|
|||
|
||||
unsigned Op0Reg = getRegForValue(I->getOperand(0));
|
||||
if (Op0Reg == 0) return false;
|
||||
|
||||
|
||||
// Fold immediate in shl(x,3).
|
||||
if (const ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
|
||||
unsigned ResultReg = createResultReg(RC);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpImm),
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpImm),
|
||||
ResultReg).addReg(Op0Reg).addImm(CI->getZExtValue() & 0xff);
|
||||
UpdateValueMap(I, ResultReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
unsigned Op1Reg = getRegForValue(I->getOperand(1));
|
||||
if (Op1Reg == 0) return false;
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
|
||||
|
@ -1152,10 +1152,10 @@ bool X86FastISel::X86SelectSelect(const Instruction *I) {
|
|||
MVT VT;
|
||||
if (!isTypeLegal(I->getType(), VT))
|
||||
return false;
|
||||
|
||||
|
||||
// We only use cmov here, if we don't have a cmov instruction bail.
|
||||
if (!Subtarget->hasCMov()) return false;
|
||||
|
||||
|
||||
unsigned Opc = 0;
|
||||
const TargetRegisterClass *RC = NULL;
|
||||
if (VT == MVT::i16) {
|
||||
|
@ -1168,7 +1168,7 @@ bool X86FastISel::X86SelectSelect(const Instruction *I) {
|
|||
Opc = X86::CMOVE64rr;
|
||||
RC = &X86::GR64RegClass;
|
||||
} else {
|
||||
return false;
|
||||
return false;
|
||||
}
|
||||
|
||||
unsigned Op0Reg = getRegForValue(I->getOperand(0));
|
||||
|
@ -1233,7 +1233,7 @@ bool X86FastISel::X86SelectTrunc(const Instruction *I) {
|
|||
return false;
|
||||
EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
|
||||
EVT DstVT = TLI.getValueType(I->getType());
|
||||
|
||||
|
||||
// This code only handles truncation to byte right now.
|
||||
if (DstVT != MVT::i8 && DstVT != MVT::i1)
|
||||
// All other cases should be handled by the tblgen generated code.
|
||||
|
@ -1304,21 +1304,21 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
|
|||
// Grab the frame index.
|
||||
X86AddressMode AM;
|
||||
if (!X86SelectAddress(Slot, AM)) return false;
|
||||
|
||||
|
||||
if (!X86FastEmitStore(PtrTy, Op1, AM)) return false;
|
||||
|
||||
|
||||
return true;
|
||||
}
|
||||
case Intrinsic::objectsize: {
|
||||
ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(1));
|
||||
const Type *Ty = I.getCalledFunction()->getReturnType();
|
||||
|
||||
|
||||
assert(CI && "Non-constant type in Intrinsic::objectsize?");
|
||||
|
||||
|
||||
MVT VT;
|
||||
if (!isTypeLegal(Ty, VT))
|
||||
return false;
|
||||
|
||||
|
||||
unsigned OpC = 0;
|
||||
if (VT == MVT::i32)
|
||||
OpC = X86::MOV32ri;
|
||||
|
@ -1326,7 +1326,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
|
|||
OpC = X86::MOV64ri;
|
||||
else
|
||||
return false;
|
||||
|
||||
|
||||
unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(OpC), ResultReg).
|
||||
addImm(CI->isZero() ? -1ULL : 0);
|
||||
|
@ -1398,7 +1398,7 @@ bool X86FastISel::X86VisitIntrinsicCall(const IntrinsicInst &I) {
|
|||
ResultReg = DestReg1+1;
|
||||
else
|
||||
ResultReg = createResultReg(TLI.getRegClassFor(MVT::i8));
|
||||
|
||||
|
||||
unsigned Opc = X86::SETBr;
|
||||
if (I.getIntrinsicID() == Intrinsic::sadd_with_overflow)
|
||||
Opc = X86::SETOr;
|
||||
|
@ -1516,10 +1516,10 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
|
|||
// Analyze operands of the call, assigning locations to each operand.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CC, false, TM, ArgLocs, I->getParent()->getContext());
|
||||
|
||||
|
||||
// Allocate shadow area for Win64
|
||||
if (Subtarget->isTargetWin64()) {
|
||||
CCInfo.AllocateStack(32, 8);
|
||||
if (Subtarget->isTargetWin64()) {
|
||||
CCInfo.AllocateStack(32, 8);
|
||||
}
|
||||
|
||||
CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags, CC_X86);
|
||||
|
@ -1539,7 +1539,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
|
|||
CCValAssign &VA = ArgLocs[i];
|
||||
unsigned Arg = Args[VA.getValNo()];
|
||||
EVT ArgVT = ArgVTs[VA.getValNo()];
|
||||
|
||||
|
||||
// Promote the value if needed.
|
||||
switch (VA.getLocInfo()) {
|
||||
default: llvm_unreachable("Unknown loc info!");
|
||||
|
@ -1572,21 +1572,21 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
|
|||
if (!Emitted)
|
||||
Emitted = X86FastEmitExtend(ISD::SIGN_EXTEND, VA.getLocVT(),
|
||||
Arg, ArgVT, Arg);
|
||||
|
||||
|
||||
assert(Emitted && "Failed to emit a aext!"); Emitted=Emitted;
|
||||
ArgVT = VA.getLocVT();
|
||||
break;
|
||||
}
|
||||
case CCValAssign::BCvt: {
|
||||
unsigned BC = FastEmit_r(ArgVT.getSimpleVT(), VA.getLocVT(),
|
||||
ISD::BIT_CONVERT, Arg, /*TODO: Kill=*/false);
|
||||
ISD::BITCAST, Arg, /*TODO: Kill=*/false);
|
||||
assert(BC != 0 && "Failed to emit a bitcast!");
|
||||
Arg = BC;
|
||||
ArgVT = VA.getLocVT();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (VA.isRegLoc()) {
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
|
||||
VA.getLocReg()).addReg(Arg);
|
||||
|
@ -1597,7 +1597,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
|
|||
AM.Base.Reg = StackPtr;
|
||||
AM.Disp = LocMemOffset;
|
||||
const Value *ArgVal = ArgVals[VA.getValNo()];
|
||||
|
||||
|
||||
// If this is a really simple value, emit this with the Value* version of
|
||||
// X86FastEmitStore. If it isn't simple, we don't want to do this, as it
|
||||
// can cause us to reevaluate the argument.
|
||||
|
@ -1609,13 +1609,13 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
|
|||
}
|
||||
|
||||
// ELF / PIC requires GOT in the EBX register before function calls via PLT
|
||||
// GOT pointer.
|
||||
// GOT pointer.
|
||||
if (Subtarget->isPICStyleGOT()) {
|
||||
unsigned Base = getInstrInfo()->getGlobalBaseReg(FuncInfo.MF);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(TargetOpcode::COPY),
|
||||
X86::EBX).addReg(Base);
|
||||
}
|
||||
|
||||
|
||||
// Issue the call.
|
||||
MachineInstrBuilder MIB;
|
||||
if (CalleeOp) {
|
||||
|
@ -1629,7 +1629,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
|
|||
CallOpc = X86::CALL32r;
|
||||
MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc))
|
||||
.addReg(CalleeOp);
|
||||
|
||||
|
||||
} else {
|
||||
// Direct call.
|
||||
assert(GV && "Not a direct call");
|
||||
|
@ -1640,10 +1640,10 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
|
|||
CallOpc = X86::CALL64pcrel32;
|
||||
else
|
||||
CallOpc = X86::CALLpcrel32;
|
||||
|
||||
|
||||
// See if we need any target-specific flags on the GV operand.
|
||||
unsigned char OpFlags = 0;
|
||||
|
||||
|
||||
// On ELF targets, in both X86-64 and X86-32 mode, direct calls to
|
||||
// external symbols most go through the PLT in PIC mode. If the symbol
|
||||
// has hidden or protected visibility, or if it is static or local, then
|
||||
|
@ -1660,8 +1660,8 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
|
|||
// automatically synthesizes these stubs.
|
||||
OpFlags = X86II::MO_DARWIN_STUB;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc))
|
||||
.addGlobalAddress(GV, 0, OpFlags);
|
||||
}
|
||||
|
@ -1690,7 +1690,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
|
|||
assert(RVLocs.size() == 1 && "Can't handle multi-value calls!");
|
||||
EVT CopyVT = RVLocs[0].getValVT();
|
||||
TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
|
||||
|
||||
|
||||
// If this is a call to a function that returns an fp value on the x87 fp
|
||||
// stack, but where we prefer to use the value in xmm registers, copy it
|
||||
// out as F80 and use a truncate to move it from fp stack reg to xmm reg.
|
||||
|
@ -1728,7 +1728,7 @@ bool X86FastISel::X86SelectCall(const Instruction *I) {
|
|||
if (AndToI1) {
|
||||
// Mask out all but lowest bit for some call which produces an i1.
|
||||
unsigned AndResult = createResultReg(X86::GR8RegisterClass);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL,
|
||||
TII.get(X86::AND8ri), AndResult).addReg(ResultReg).addImm(1);
|
||||
ResultReg = AndResult;
|
||||
}
|
||||
|
@ -1798,7 +1798,7 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
|
|||
MVT VT;
|
||||
if (!isTypeLegal(C->getType(), VT))
|
||||
return false;
|
||||
|
||||
|
||||
// Get opcode and regclass of the output for the given load instruction.
|
||||
unsigned Opc = 0;
|
||||
const TargetRegisterClass *RC = NULL;
|
||||
|
@ -1843,7 +1843,7 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
|
|||
// No f80 support yet.
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
// Materialize addresses with LEA instructions.
|
||||
if (isa<GlobalValue>(C)) {
|
||||
X86AddressMode AM;
|
||||
|
@ -1859,14 +1859,14 @@ unsigned X86FastISel::TargetMaterializeConstant(const Constant *C) {
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
// MachineConstantPool wants an explicit alignment.
|
||||
unsigned Align = TD.getPrefTypeAlignment(C->getType());
|
||||
if (Align == 0) {
|
||||
// Alignment of vector types. FIXME!
|
||||
Align = TD.getTypeAllocSize(C->getType());
|
||||
}
|
||||
|
||||
|
||||
// x86-32 PIC requires a PIC base register for constant pools.
|
||||
unsigned PICBase = 0;
|
||||
unsigned char OpFlag = 0;
|
||||
|
@ -1922,19 +1922,19 @@ bool X86FastISel::TryToFoldLoad(MachineInstr *MI, unsigned OpNo,
|
|||
X86AddressMode AM;
|
||||
if (!X86SelectAddress(LI->getOperand(0), AM))
|
||||
return false;
|
||||
|
||||
|
||||
X86InstrInfo &XII = (X86InstrInfo&)TII;
|
||||
|
||||
|
||||
unsigned Size = TD.getTypeAllocSize(LI->getType());
|
||||
unsigned Alignment = LI->getAlignment();
|
||||
|
||||
SmallVector<MachineOperand, 8> AddrOps;
|
||||
AM.getFullAddress(AddrOps);
|
||||
|
||||
|
||||
MachineInstr *Result =
|
||||
XII.foldMemoryOperandImpl(*FuncInfo.MF, MI, OpNo, AddrOps, Size, Alignment);
|
||||
if (Result == 0) return false;
|
||||
|
||||
|
||||
MI->getParent()->insert(MI, Result);
|
||||
MI->eraseFromParent();
|
||||
return true;
|
||||
|
|
|
@ -226,12 +226,12 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
|
|||
|
||||
// TODO: when we have SSE, these could be more efficient, by using movd/movq.
|
||||
if (!X86ScalarSSEf64) {
|
||||
setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand);
|
||||
setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand);
|
||||
setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
|
||||
setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
|
||||
if (Subtarget->is64Bit()) {
|
||||
setOperationAction(ISD::BIT_CONVERT , MVT::f64 , Expand);
|
||||
setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
|
||||
// Without SSE, i64->f64 goes through memory.
|
||||
setOperationAction(ISD::BIT_CONVERT , MVT::i64 , Expand);
|
||||
setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -654,10 +654,10 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
|
|||
setOperationAction(ISD::SELECT, MVT::v4i16, Expand);
|
||||
setOperationAction(ISD::SELECT, MVT::v2i32, Expand);
|
||||
setOperationAction(ISD::SELECT, MVT::v1i64, Expand);
|
||||
setOperationAction(ISD::BIT_CONVERT, MVT::v8i8, Expand);
|
||||
setOperationAction(ISD::BIT_CONVERT, MVT::v4i16, Expand);
|
||||
setOperationAction(ISD::BIT_CONVERT, MVT::v2i32, Expand);
|
||||
setOperationAction(ISD::BIT_CONVERT, MVT::v1i64, Expand);
|
||||
setOperationAction(ISD::BITCAST, MVT::v8i8, Expand);
|
||||
setOperationAction(ISD::BITCAST, MVT::v4i16, Expand);
|
||||
setOperationAction(ISD::BITCAST, MVT::v2i32, Expand);
|
||||
setOperationAction(ISD::BITCAST, MVT::v1i64, Expand);
|
||||
|
||||
if (!UseSoftFloat && Subtarget->hasSSE1()) {
|
||||
addRegisterClass(MVT::v4f32, X86::VR128RegisterClass);
|
||||
|
@ -1293,13 +1293,13 @@ X86TargetLowering::LowerReturn(SDValue Chain,
|
|||
if (Subtarget->is64Bit()) {
|
||||
if (ValVT == MVT::x86mmx) {
|
||||
if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
|
||||
ValToCopy = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, ValToCopy);
|
||||
ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy);
|
||||
ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
|
||||
ValToCopy);
|
||||
// If we don't have SSE2 available, convert to v4f32 so the generated
|
||||
// register is legal.
|
||||
if (!Subtarget->hasSSE2())
|
||||
ValToCopy = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32,ValToCopy);
|
||||
ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1406,7 +1406,7 @@ X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
|
|||
MVT::i64, InFlag).getValue(1);
|
||||
Val = Chain.getValue(0);
|
||||
}
|
||||
Val = DAG.getNode(ISD::BIT_CONVERT, dl, CopyVT, Val);
|
||||
Val = DAG.getNode(ISD::BITCAST, dl, CopyVT, Val);
|
||||
} else {
|
||||
Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
|
||||
CopyVT, InFlag).getValue(1);
|
||||
|
@ -1589,7 +1589,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
|
|||
ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
|
||||
DAG.getValueType(VA.getValVT()));
|
||||
else if (VA.getLocInfo() == CCValAssign::BCvt)
|
||||
ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, VA.getValVT(), ArgValue);
|
||||
ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
|
||||
|
||||
if (VA.isExtInLoc()) {
|
||||
// Handle MMX values passed in XMM regs.
|
||||
|
@ -1922,14 +1922,14 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
|
|||
case CCValAssign::AExt:
|
||||
if (RegVT.isVector() && RegVT.getSizeInBits() == 128) {
|
||||
// Special case: passing MMX values in XMM registers.
|
||||
Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, Arg);
|
||||
Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
|
||||
Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
|
||||
Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
|
||||
} else
|
||||
Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
|
||||
break;
|
||||
case CCValAssign::BCvt:
|
||||
Arg = DAG.getNode(ISD::BIT_CONVERT, dl, RegVT, Arg);
|
||||
Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg);
|
||||
break;
|
||||
case CCValAssign::Indirect: {
|
||||
// Store the argument.
|
||||
|
@ -3501,7 +3501,7 @@ static SDValue getZeroVector(EVT VT, bool HasSSE2, SelectionDAG &DAG,
|
|||
SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
|
||||
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops, 8);
|
||||
}
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec);
|
||||
return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
|
||||
}
|
||||
|
||||
/// getOnesVector - Returns a vector of specified type with all bits set.
|
||||
|
@ -3514,7 +3514,7 @@ static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) {
|
|||
SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32);
|
||||
SDValue Vec;
|
||||
Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec);
|
||||
return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
|
||||
}
|
||||
|
||||
|
||||
|
@ -3599,9 +3599,9 @@ static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) {
|
|||
|
||||
// Perform the splat.
|
||||
int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo };
|
||||
V1 = DAG.getNode(ISD::BIT_CONVERT, dl, PVT, V1);
|
||||
V1 = DAG.getNode(ISD::BITCAST, dl, PVT, V1);
|
||||
V1 = DAG.getVectorShuffle(PVT, dl, V1, DAG.getUNDEF(PVT), &SplatMask[0]);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, V1);
|
||||
return DAG.getNode(ISD::BITCAST, dl, VT, V1);
|
||||
}
|
||||
|
||||
/// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
|
||||
|
@ -3725,7 +3725,7 @@ SDValue getShuffleScalarElt(SDNode *N, int Index, SelectionDAG &DAG,
|
|||
}
|
||||
|
||||
// Actual nodes that may contain scalar elements
|
||||
if (Opcode == ISD::BIT_CONVERT) {
|
||||
if (Opcode == ISD::BITCAST) {
|
||||
V = V.getOperand(0);
|
||||
EVT SrcVT = V.getValueType();
|
||||
unsigned NumElems = VT.getVectorNumElements();
|
||||
|
@ -3914,7 +3914,7 @@ static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
|
|||
}
|
||||
}
|
||||
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, V);
|
||||
return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V);
|
||||
}
|
||||
|
||||
/// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16.
|
||||
|
@ -3955,8 +3955,8 @@ static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
|
|||
const TargetLowering &TLI, DebugLoc dl) {
|
||||
EVT ShVT = MVT::v2i64;
|
||||
unsigned Opc = isLeft ? X86ISD::VSHL : X86ISD::VSRL;
|
||||
SrcOp = DAG.getNode(ISD::BIT_CONVERT, dl, ShVT, SrcOp);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
|
||||
SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp);
|
||||
return DAG.getNode(ISD::BITCAST, dl, VT,
|
||||
DAG.getNode(Opc, dl, ShVT, SrcOp,
|
||||
DAG.getConstant(NumBits, TLI.getShiftAmountTy())));
|
||||
}
|
||||
|
@ -4023,8 +4023,8 @@ X86TargetLowering::LowerAsSplatVectorLoad(SDValue SrcOp, EVT VT, DebugLoc dl,
|
|||
LD->getPointerInfo().getWithOffset(StartOffset),
|
||||
false, false, 0);
|
||||
// Canonicalize it to a v4i32 shuffle.
|
||||
V1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4i32, V1);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
|
||||
V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
|
||||
return DAG.getNode(ISD::BITCAST, dl, VT,
|
||||
DAG.getVectorShuffle(MVT::v4i32, dl, V1,
|
||||
DAG.getUNDEF(MVT::v4i32),&Mask[0]));
|
||||
}
|
||||
|
@ -4092,7 +4092,7 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts,
|
|||
SDValue ResNode = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys,
|
||||
Ops, 2, MVT::i32,
|
||||
LDBase->getMemOperand());
|
||||
return DAG.getNode(ISD::BIT_CONVERT, DL, VT, ResNode);
|
||||
return DAG.getNode(ISD::BITCAST, DL, VT, ResNode);
|
||||
}
|
||||
return SDValue();
|
||||
}
|
||||
|
@ -4184,7 +4184,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
|
|||
DAG.getUNDEF(Item.getValueType()),
|
||||
&Mask[0]);
|
||||
}
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(), Item);
|
||||
return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Item);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4208,7 +4208,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
|
|||
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MiddleVT, Item);
|
||||
Item = getShuffleVectorZeroOrUndef(Item, 0, true,
|
||||
Subtarget->hasSSE2(), DAG);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Item);
|
||||
return DAG.getNode(ISD::BITCAST, dl, VT, Item);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4401,21 +4401,21 @@ X86TargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
|
|||
assert(ResVT == MVT::v2i64 || ResVT == MVT::v4i32 ||
|
||||
ResVT == MVT::v8i16 || ResVT == MVT::v16i8);
|
||||
int Mask[2];
|
||||
SDValue InVec = DAG.getNode(ISD::BIT_CONVERT,dl, MVT::v1i64, Op.getOperand(0));
|
||||
SDValue InVec = DAG.getNode(ISD::BITCAST,dl, MVT::v1i64, Op.getOperand(0));
|
||||
SDValue VecOp = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64, InVec);
|
||||
InVec = Op.getOperand(1);
|
||||
if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR) {
|
||||
unsigned NumElts = ResVT.getVectorNumElements();
|
||||
VecOp = DAG.getNode(ISD::BIT_CONVERT, dl, ResVT, VecOp);
|
||||
VecOp = DAG.getNode(ISD::BITCAST, dl, ResVT, VecOp);
|
||||
VecOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ResVT, VecOp,
|
||||
InVec.getOperand(0), DAG.getIntPtrConstant(NumElts/2+1));
|
||||
} else {
|
||||
InVec = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v1i64, InVec);
|
||||
InVec = DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, InVec);
|
||||
SDValue VecOp2 = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64, InVec);
|
||||
Mask[0] = 0; Mask[1] = 2;
|
||||
VecOp = DAG.getVectorShuffle(MVT::v2i64, dl, VecOp, VecOp2, Mask);
|
||||
}
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, ResVT, VecOp);
|
||||
return DAG.getNode(ISD::BITCAST, dl, ResVT, VecOp);
|
||||
}
|
||||
|
||||
// v8i16 shuffles - Prefer shuffles in the following order:
|
||||
|
@ -4497,9 +4497,9 @@ X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op,
|
|||
MaskV.push_back(BestLoQuad < 0 ? 0 : BestLoQuad);
|
||||
MaskV.push_back(BestHiQuad < 0 ? 1 : BestHiQuad);
|
||||
NewV = DAG.getVectorShuffle(MVT::v2i64, dl,
|
||||
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, V1),
|
||||
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, V2), &MaskV[0]);
|
||||
NewV = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, NewV);
|
||||
DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1),
|
||||
DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]);
|
||||
NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV);
|
||||
|
||||
// Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the
|
||||
// source words for the shuffle, to aid later transformations.
|
||||
|
@ -4568,12 +4568,12 @@ X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op,
|
|||
pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
|
||||
pshufbMask.push_back(DAG.getConstant(EltIdx+1, MVT::i8));
|
||||
}
|
||||
V1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, V1);
|
||||
V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V1);
|
||||
V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
|
||||
DAG.getNode(ISD::BUILD_VECTOR, dl,
|
||||
MVT::v16i8, &pshufbMask[0], 16));
|
||||
if (!TwoInputs)
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, V1);
|
||||
return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
|
||||
|
||||
// Calculate the shuffle mask for the second input, shuffle it, and
|
||||
// OR it with the first shuffled input.
|
||||
|
@ -4588,12 +4588,12 @@ X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op,
|
|||
pshufbMask.push_back(DAG.getConstant(EltIdx - 16, MVT::i8));
|
||||
pshufbMask.push_back(DAG.getConstant(EltIdx - 15, MVT::i8));
|
||||
}
|
||||
V2 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, V2);
|
||||
V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V2);
|
||||
V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2,
|
||||
DAG.getNode(ISD::BUILD_VECTOR, dl,
|
||||
MVT::v16i8, &pshufbMask[0], 16));
|
||||
V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, V1);
|
||||
return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
|
||||
}
|
||||
|
||||
// If BestLoQuad >= 0, generate a pshuflw to put the low elements in order,
|
||||
|
@ -4760,8 +4760,8 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
|
|||
// No SSSE3 - Calculate in place words and then fix all out of place words
|
||||
// With 0-16 extracts & inserts. Worst case is 16 bytes out of order from
|
||||
// the 16 different words that comprise the two doublequadword input vectors.
|
||||
V1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, V1);
|
||||
V2 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v8i16, V2);
|
||||
V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
|
||||
V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
|
||||
SDValue NewV = V2Only ? V2 : V1;
|
||||
for (int i = 0; i != 8; ++i) {
|
||||
int Elt0 = MaskVals[i*2];
|
||||
|
@ -4823,7 +4823,7 @@ SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
|
|||
NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
|
||||
DAG.getIntPtrConstant(i));
|
||||
}
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v16i8, NewV);
|
||||
return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV);
|
||||
}
|
||||
|
||||
/// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
|
||||
|
@ -4867,8 +4867,8 @@ SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
|
|||
MaskVec.push_back(StartIdx / Scale);
|
||||
}
|
||||
|
||||
V1 = DAG.getNode(ISD::BIT_CONVERT, dl, NewVT, V1);
|
||||
V2 = DAG.getNode(ISD::BIT_CONVERT, dl, NewVT, V2);
|
||||
V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, V1);
|
||||
V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, V2);
|
||||
return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]);
|
||||
}
|
||||
|
||||
|
@ -4887,11 +4887,11 @@ static SDValue getVZextMovL(EVT VT, EVT OpVT,
|
|||
MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32;
|
||||
if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) &&
|
||||
SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
|
||||
SrcOp.getOperand(0).getOpcode() == ISD::BIT_CONVERT &&
|
||||
SrcOp.getOperand(0).getOpcode() == ISD::BITCAST &&
|
||||
SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) {
|
||||
// PR2108
|
||||
OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32;
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
|
||||
return DAG.getNode(ISD::BITCAST, dl, VT,
|
||||
DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
|
||||
DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
|
||||
OpVT,
|
||||
|
@ -4901,9 +4901,9 @@ static SDValue getVZextMovL(EVT VT, EVT OpVT,
|
|||
}
|
||||
}
|
||||
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
|
||||
return DAG.getNode(ISD::BITCAST, dl, VT,
|
||||
DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
|
||||
DAG.getNode(ISD::BIT_CONVERT, dl,
|
||||
DAG.getNode(ISD::BITCAST, dl,
|
||||
OpVT, SrcOp)));
|
||||
}
|
||||
|
||||
|
@ -5057,7 +5057,7 @@ LowerVECTOR_SHUFFLE_4wide(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
|
|||
}
|
||||
|
||||
static bool MayFoldVectorLoad(SDValue V) {
|
||||
if (V.hasOneUse() && V.getOpcode() == ISD::BIT_CONVERT)
|
||||
if (V.hasOneUse() && V.getOpcode() == ISD::BITCAST)
|
||||
V = V.getOperand(0);
|
||||
if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR)
|
||||
V = V.getOperand(0);
|
||||
|
@ -5074,7 +5074,7 @@ static bool MayFoldVectorLoad(SDValue V) {
|
|||
// one use. Remove this version after this bug get fixed.
|
||||
// rdar://8434668, PR8156
|
||||
static bool RelaxedMayFoldVectorLoad(SDValue V) {
|
||||
if (V.hasOneUse() && V.getOpcode() == ISD::BIT_CONVERT)
|
||||
if (V.hasOneUse() && V.getOpcode() == ISD::BITCAST)
|
||||
V = V.getOperand(0);
|
||||
if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR)
|
||||
V = V.getOperand(0);
|
||||
|
@ -5112,7 +5112,7 @@ bool CanXFormVExtractWithShuffleIntoLoad(SDValue V, SelectionDAG &DAG,
|
|||
// If the bit convert changed the number of elements, it is unsafe
|
||||
// to examine the mask.
|
||||
bool HasShuffleIntoBitcast = false;
|
||||
if (V.getOpcode() == ISD::BIT_CONVERT) {
|
||||
if (V.getOpcode() == ISD::BITCAST) {
|
||||
EVT SrcVT = V.getOperand(0).getValueType();
|
||||
if (SrcVT.getVectorNumElements() != VT.getVectorNumElements())
|
||||
return false;
|
||||
|
@ -5127,7 +5127,7 @@ bool CanXFormVExtractWithShuffleIntoLoad(SDValue V, SelectionDAG &DAG,
|
|||
V = (Idx < (int)NumElems) ? V.getOperand(0) : V.getOperand(1);
|
||||
|
||||
// Skip one more bit_convert if necessary
|
||||
if (V.getOpcode() == ISD::BIT_CONVERT)
|
||||
if (V.getOpcode() == ISD::BITCAST)
|
||||
V = V.getOperand(0);
|
||||
|
||||
if (ISD::isNormalLoad(V.getNode())) {
|
||||
|
@ -5164,8 +5164,8 @@ SDValue getMOVDDup(SDValue &Op, DebugLoc &dl, SDValue V1, SelectionDAG &DAG) {
|
|||
EVT VT = Op.getValueType();
|
||||
|
||||
// Canonizalize to v2f64.
|
||||
V1 = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, V1);
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
|
||||
V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
|
||||
return DAG.getNode(ISD::BITCAST, dl, VT,
|
||||
getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64,
|
||||
V1, DAG));
|
||||
}
|
||||
|
@ -5319,7 +5319,7 @@ SDValue NormalizeVectorShuffle(SDValue Op, SelectionDAG &DAG,
|
|||
if (VT == MVT::v8i16 || VT == MVT::v16i8) {
|
||||
SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG, dl);
|
||||
if (NewOp.getNode())
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, VT, NewOp);
|
||||
return DAG.getNode(ISD::BITCAST, dl, VT, NewOp);
|
||||
} else if ((VT == MVT::v4i32 || (VT == MVT::v4f32 && Subtarget->hasSSE2()))) {
|
||||
// FIXME: Figure out a cleaner way to do this.
|
||||
// Try to make use of movq to zero out the top part.
|
||||
|
@ -5629,7 +5629,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op,
|
|||
if (Idx == 0)
|
||||
return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
|
||||
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
|
||||
DAG.getNode(ISD::BIT_CONVERT, dl,
|
||||
DAG.getNode(ISD::BITCAST, dl,
|
||||
MVT::v4i32,
|
||||
Op.getOperand(0)),
|
||||
Op.getOperand(1)));
|
||||
|
@ -5650,14 +5650,14 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op,
|
|||
if ((User->getOpcode() != ISD::STORE ||
|
||||
(isa<ConstantSDNode>(Op.getOperand(1)) &&
|
||||
cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) &&
|
||||
(User->getOpcode() != ISD::BIT_CONVERT ||
|
||||
(User->getOpcode() != ISD::BITCAST ||
|
||||
User->getValueType(0) != MVT::i32))
|
||||
return SDValue();
|
||||
SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
|
||||
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4i32,
|
||||
DAG.getNode(ISD::BITCAST, dl, MVT::v4i32,
|
||||
Op.getOperand(0)),
|
||||
Op.getOperand(1));
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Extract);
|
||||
return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract);
|
||||
} else if (VT == MVT::i32) {
|
||||
// ExtractPS works with constant index.
|
||||
if (isa<ConstantSDNode>(Op.getOperand(1)))
|
||||
|
@ -5688,7 +5688,7 @@ X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
|
|||
if (Idx == 0)
|
||||
return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
|
||||
DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
|
||||
DAG.getNode(ISD::BIT_CONVERT, dl,
|
||||
DAG.getNode(ISD::BITCAST, dl,
|
||||
MVT::v4i32, Vec),
|
||||
Op.getOperand(1)));
|
||||
// Transform it so it match pextrw which produces a 32-bit result.
|
||||
|
@ -5819,7 +5819,7 @@ X86TargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const {
|
|||
SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
|
||||
assert(Op.getValueType().getSimpleVT().getSizeInBits() == 128 &&
|
||||
"Expected an SSE type!");
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, Op.getValueType(),
|
||||
return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(),
|
||||
DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt));
|
||||
}
|
||||
|
||||
|
@ -6390,7 +6390,7 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
|
|||
MachinePointerInfo::getConstantPool(),
|
||||
false, false, 16);
|
||||
SDValue Unpck2 = getUnpackl(DAG, dl, MVT::v4i32, Unpck1, CLod0);
|
||||
SDValue XR2F = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Unpck2);
|
||||
SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck2);
|
||||
SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
|
||||
MachinePointerInfo::getConstantPool(),
|
||||
false, false, 16);
|
||||
|
@ -6420,19 +6420,19 @@ SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
|
|||
DAG.getIntPtrConstant(0)));
|
||||
|
||||
Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
|
||||
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Load),
|
||||
DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load),
|
||||
DAG.getIntPtrConstant(0));
|
||||
|
||||
// Or the load with the bias.
|
||||
SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64,
|
||||
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64,
|
||||
DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
|
||||
DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
|
||||
MVT::v2f64, Load)),
|
||||
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64,
|
||||
DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
|
||||
DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
|
||||
MVT::v2f64, Bias)));
|
||||
Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
|
||||
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2f64, Or),
|
||||
DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or),
|
||||
DAG.getIntPtrConstant(0));
|
||||
|
||||
// Subtract the bias.
|
||||
|
@ -6690,11 +6690,11 @@ SDValue X86TargetLowering::LowerFNEG(SDValue Op, SelectionDAG &DAG) const {
|
|||
MachinePointerInfo::getConstantPool(),
|
||||
false, false, 16);
|
||||
if (VT.isVector()) {
|
||||
return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
|
||||
return DAG.getNode(ISD::BITCAST, dl, VT,
|
||||
DAG.getNode(ISD::XOR, dl, MVT::v2i64,
|
||||
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64,
|
||||
DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
|
||||
Op.getOperand(0)),
|
||||
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v2i64, Mask)));
|
||||
DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Mask)));
|
||||
} else {
|
||||
return DAG.getNode(X86ISD::FXOR, dl, VT, Op.getOperand(0), Mask);
|
||||
}
|
||||
|
@ -6746,7 +6746,7 @@ SDValue X86TargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
|
|||
SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, SignBit);
|
||||
SignBit = DAG.getNode(X86ISD::FSRL, dl, MVT::v2f64, SignBit,
|
||||
DAG.getConstant(32, MVT::i32));
|
||||
SignBit = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32, SignBit);
|
||||
SignBit = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, SignBit);
|
||||
SignBit = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, SignBit,
|
||||
DAG.getIntPtrConstant(0));
|
||||
}
|
||||
|
@ -7895,7 +7895,7 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const
|
|||
}
|
||||
|
||||
EVT VT = Op.getValueType();
|
||||
ShAmt = DAG.getNode(ISD::BIT_CONVERT, dl, VT, ShAmt);
|
||||
ShAmt = DAG.getNode(ISD::BITCAST, dl, VT, ShAmt);
|
||||
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
|
||||
DAG.getConstant(NewIntNo, MVT::i32),
|
||||
Op.getOperand(1), ShAmt);
|
||||
|
@ -8329,7 +8329,7 @@ SDValue X86TargetLowering::LowerSHL(SDValue Op, SelectionDAG &DAG) const {
|
|||
false, false, 16);
|
||||
|
||||
Op = DAG.getNode(ISD::ADD, dl, VT, Op, Addend);
|
||||
Op = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32, Op);
|
||||
Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op);
|
||||
Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
|
||||
return DAG.getNode(ISD::MUL, dl, VT, Op, R);
|
||||
}
|
||||
|
@ -8550,16 +8550,16 @@ SDValue X86TargetLowering::LowerREADCYCLECOUNTER(SDValue Op,
|
|||
return DAG.getMergeValues(Ops, 2, dl);
|
||||
}
|
||||
|
||||
SDValue X86TargetLowering::LowerBIT_CONVERT(SDValue Op,
|
||||
SDValue X86TargetLowering::LowerBITCAST(SDValue Op,
|
||||
SelectionDAG &DAG) const {
|
||||
EVT SrcVT = Op.getOperand(0).getValueType();
|
||||
EVT DstVT = Op.getValueType();
|
||||
assert((Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
|
||||
Subtarget->hasMMX() && !DisableMMX) &&
|
||||
"Unexpected custom BIT_CONVERT");
|
||||
"Unexpected custom BITCAST");
|
||||
assert((DstVT == MVT::i64 ||
|
||||
(DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
|
||||
"Unexpected custom BIT_CONVERT");
|
||||
"Unexpected custom BITCAST");
|
||||
// i64 <=> MMX conversions are Legal.
|
||||
if (SrcVT==MVT::i64 && DstVT.isVector())
|
||||
return Op;
|
||||
|
@ -8642,7 +8642,7 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
|
|||
case ISD::SMULO:
|
||||
case ISD::UMULO: return LowerXALUO(Op, DAG);
|
||||
case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, DAG);
|
||||
case ISD::BIT_CONVERT: return LowerBIT_CONVERT(Op, DAG);
|
||||
case ISD::BITCAST: return LowerBITCAST(Op, DAG);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -11177,13 +11177,13 @@ static SDValue PerformBTCombine(SDNode *N,
|
|||
|
||||
static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) {
|
||||
SDValue Op = N->getOperand(0);
|
||||
if (Op.getOpcode() == ISD::BIT_CONVERT)
|
||||
if (Op.getOpcode() == ISD::BITCAST)
|
||||
Op = Op.getOperand(0);
|
||||
EVT VT = N->getValueType(0), OpVT = Op.getValueType();
|
||||
if (Op.getOpcode() == X86ISD::VZEXT_LOAD &&
|
||||
VT.getVectorElementType().getSizeInBits() ==
|
||||
OpVT.getVectorElementType().getSizeInBits()) {
|
||||
return DAG.getNode(ISD::BIT_CONVERT, N->getDebugLoc(), VT, Op);
|
||||
return DAG.getNode(ISD::BITCAST, N->getDebugLoc(), VT, Op);
|
||||
}
|
||||
return SDValue();
|
||||
}
|
||||
|
|
|
@ -740,7 +740,7 @@ namespace llvm {
|
|||
SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot,
|
||||
SelectionDAG &DAG) const;
|
||||
SDValue LowerBIT_CONVERT(SDValue op, SelectionDAG &DAG) const;
|
||||
SDValue LowerBITCAST(SDValue op, SelectionDAG &DAG) const;
|
||||
SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) const;
|
||||
|
|
Loading…
Reference in New Issue