Split EVT into MVT and EVT, the former representing _just_ a primitive type, while

the latter is capable of representing either a primitive or an extended type.

llvm-svn: 78713
This commit is contained in:
Owen Anderson 2009-08-11 20:47:22 +00:00
parent a1e04d43c4
commit 9f94459d24
86 changed files with 5608 additions and 5506 deletions

View File

@ -35,7 +35,7 @@ static bool IsChainCompatible(SDNode *Chain, SDNode *Op) {
return false;
if (Chain->getNumOperands() > 0) {
SDValue C0 = Chain->getOperand(0);
if (C0.getValueType() == EVT::Other)
if (C0.getValueType() == MVT::Other)
return C0.getNode() != Op && IsChainCompatible(C0.getNode(), Op);
}
return true;

View File

@ -137,24 +137,24 @@ protected:
/// FastEmit_r - This method is called by target-independent code
/// to request that an instruction with the given type and opcode
/// be emitted.
virtual unsigned FastEmit_(EVT::SimpleValueType VT,
EVT::SimpleValueType RetVT,
virtual unsigned FastEmit_(MVT VT,
MVT RetVT,
ISD::NodeType Opcode);
/// FastEmit_r - This method is called by target-independent code
/// to request that an instruction with the given type, opcode, and
/// register operand be emitted.
///
virtual unsigned FastEmit_r(EVT::SimpleValueType VT,
EVT::SimpleValueType RetVT,
virtual unsigned FastEmit_r(MVT VT,
MVT RetVT,
ISD::NodeType Opcode, unsigned Op0);
/// FastEmit_rr - This method is called by target-independent code
/// to request that an instruction with the given type, opcode, and
/// register operands be emitted.
///
virtual unsigned FastEmit_rr(EVT::SimpleValueType VT,
EVT::SimpleValueType RetVT,
virtual unsigned FastEmit_rr(MVT VT,
MVT RetVT,
ISD::NodeType Opcode,
unsigned Op0, unsigned Op1);
@ -162,8 +162,8 @@ protected:
/// to request that an instruction with the given type, opcode, and
/// register and immediate operands be emitted.
///
virtual unsigned FastEmit_ri(EVT::SimpleValueType VT,
EVT::SimpleValueType RetVT,
virtual unsigned FastEmit_ri(MVT VT,
MVT RetVT,
ISD::NodeType Opcode,
unsigned Op0, uint64_t Imm);
@ -171,8 +171,8 @@ protected:
/// to request that an instruction with the given type, opcode, and
/// register and floating-point immediate operands be emitted.
///
virtual unsigned FastEmit_rf(EVT::SimpleValueType VT,
EVT::SimpleValueType RetVT,
virtual unsigned FastEmit_rf(MVT VT,
MVT RetVT,
ISD::NodeType Opcode,
unsigned Op0, ConstantFP *FPImm);
@ -180,8 +180,8 @@ protected:
/// to request that an instruction with the given type, opcode, and
/// register and immediate operands be emitted.
///
virtual unsigned FastEmit_rri(EVT::SimpleValueType VT,
EVT::SimpleValueType RetVT,
virtual unsigned FastEmit_rri(MVT VT,
MVT RetVT,
ISD::NodeType Opcode,
unsigned Op0, unsigned Op1, uint64_t Imm);
@ -189,33 +189,33 @@ protected:
/// to emit an instruction with an immediate operand using FastEmit_ri.
/// If that fails, it materializes the immediate into a register and try
/// FastEmit_rr instead.
unsigned FastEmit_ri_(EVT::SimpleValueType VT,
unsigned FastEmit_ri_(MVT VT,
ISD::NodeType Opcode,
unsigned Op0, uint64_t Imm,
EVT::SimpleValueType ImmType);
MVT ImmType);
/// FastEmit_rf_ - This method is a wrapper of FastEmit_rf. It first tries
/// to emit an instruction with an immediate operand using FastEmit_rf.
/// If that fails, it materializes the immediate into a register and try
/// FastEmit_rr instead.
unsigned FastEmit_rf_(EVT::SimpleValueType VT,
unsigned FastEmit_rf_(MVT VT,
ISD::NodeType Opcode,
unsigned Op0, ConstantFP *FPImm,
EVT::SimpleValueType ImmType);
MVT ImmType);
/// FastEmit_i - This method is called by target-independent code
/// to request that an instruction with the given type, opcode, and
/// immediate operand be emitted.
virtual unsigned FastEmit_i(EVT::SimpleValueType VT,
EVT::SimpleValueType RetVT,
virtual unsigned FastEmit_i(MVT VT,
MVT RetVT,
ISD::NodeType Opcode,
uint64_t Imm);
/// FastEmit_f - This method is called by target-independent code
/// to request that an instruction with the given type, opcode, and
/// floating-point immediate operand be emitted.
virtual unsigned FastEmit_f(EVT::SimpleValueType VT,
EVT::SimpleValueType RetVT,
virtual unsigned FastEmit_f(MVT VT,
MVT RetVT,
ISD::NodeType Opcode,
ConstantFP *FPImm);
@ -268,12 +268,12 @@ protected:
/// FastEmitInst_extractsubreg - Emit a MachineInstr for an extract_subreg
/// from a specified index of a superregister to a specified type.
unsigned FastEmitInst_extractsubreg(EVT::SimpleValueType RetVT,
unsigned FastEmitInst_extractsubreg(MVT RetVT,
unsigned Op0, uint32_t Idx);
/// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
/// with all but the least significant bit set to zero.
unsigned FastEmitZExtFromI1(EVT::SimpleValueType VT,
unsigned FastEmitZExtFromI1(MVT VT,
unsigned Op);
/// FastEmitBranch - Emit an unconditional branch to the given block,

View File

@ -193,7 +193,7 @@ public:
/// setRoot - Set the current root tag of the SelectionDAG.
///
const SDValue &setRoot(SDValue N) {
assert((!N.getNode() || N.getValueType() == EVT::Other) &&
assert((!N.getNode() || N.getValueType() == MVT::Other) &&
"DAG root value is not a chain!");
return Root = N;
}
@ -327,7 +327,7 @@ public:
unsigned LabelID);
SDValue getCopyToReg(SDValue Chain, DebugLoc dl, unsigned Reg, SDValue N) {
return getNode(ISD::CopyToReg, dl, EVT::Other, Chain,
return getNode(ISD::CopyToReg, dl, MVT::Other, Chain,
getRegister(Reg, N.getValueType()), N);
}
@ -336,7 +336,7 @@ public:
// null) and that there should be a flag result.
SDValue getCopyToReg(SDValue Chain, DebugLoc dl, unsigned Reg, SDValue N,
SDValue Flag) {
SDVTList VTs = getVTList(EVT::Other, EVT::Flag);
SDVTList VTs = getVTList(MVT::Other, MVT::Flag);
SDValue Ops[] = { Chain, getRegister(Reg, N.getValueType()), N, Flag };
return getNode(ISD::CopyToReg, dl, VTs, Ops, Flag.getNode() ? 4 : 3);
}
@ -344,13 +344,13 @@ public:
// Similar to last getCopyToReg() except parameter Reg is a SDValue
SDValue getCopyToReg(SDValue Chain, DebugLoc dl, SDValue Reg, SDValue N,
SDValue Flag) {
SDVTList VTs = getVTList(EVT::Other, EVT::Flag);
SDVTList VTs = getVTList(MVT::Other, MVT::Flag);
SDValue Ops[] = { Chain, Reg, N, Flag };
return getNode(ISD::CopyToReg, dl, VTs, Ops, Flag.getNode() ? 4 : 3);
}
SDValue getCopyFromReg(SDValue Chain, DebugLoc dl, unsigned Reg, EVT VT) {
SDVTList VTs = getVTList(VT, EVT::Other);
SDVTList VTs = getVTList(VT, MVT::Other);
SDValue Ops[] = { Chain, getRegister(Reg, VT) };
return getNode(ISD::CopyFromReg, dl, VTs, Ops, 2);
}
@ -360,7 +360,7 @@ public:
// null) and that there should be a flag result.
SDValue getCopyFromReg(SDValue Chain, DebugLoc dl, unsigned Reg, EVT VT,
SDValue Flag) {
SDVTList VTs = getVTList(VT, EVT::Other, EVT::Flag);
SDVTList VTs = getVTList(VT, MVT::Other, MVT::Flag);
SDValue Ops[] = { Chain, getRegister(Reg, VT), Flag };
return getNode(ISD::CopyFromReg, dl, VTs, Ops, Flag.getNode() ? 3 : 2);
}
@ -391,7 +391,7 @@ public:
/// a flag result (to ensure it's not CSE'd). CALLSEQ_START does not have a
/// useful DebugLoc.
SDValue getCALLSEQ_START(SDValue Chain, SDValue Op) {
SDVTList VTs = getVTList(EVT::Other, EVT::Flag);
SDVTList VTs = getVTList(MVT::Other, MVT::Flag);
SDValue Ops[] = { Chain, Op };
return getNode(ISD::CALLSEQ_START, DebugLoc::getUnknownLoc(),
VTs, Ops, 2);
@ -402,7 +402,7 @@ public:
/// a useful DebugLoc.
SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2,
SDValue InFlag) {
SDVTList NodeTys = getVTList(EVT::Other, EVT::Flag);
SDVTList NodeTys = getVTList(MVT::Other, MVT::Flag);
SmallVector<SDValue, 4> Ops;
Ops.push_back(Chain);
Ops.push_back(Op1);

View File

@ -1245,7 +1245,7 @@ public:
/// to which the flag operand points. Otherwise return NULL.
SDNode *getFlaggedNode() const {
if (getNumOperands() != 0 &&
getOperand(getNumOperands()-1).getValueType() == EVT::Flag)
getOperand(getNumOperands()-1).getValueType().getSimpleVT() == MVT::Flag)
return getOperand(getNumOperands()-1).getNode();
return 0;
}
@ -1278,7 +1278,7 @@ public:
return ValueList[ResNo];
}
/// getValueSizeInBits - Returns EVT::getSizeInBits(getValueType(ResNo)).
/// getValueSizeInBits - Returns MVT::getSizeInBits(getValueType(ResNo)).
///
unsigned getValueSizeInBits(unsigned ResNo) const {
return getValueType(ResNo).getSizeInBits();
@ -1505,7 +1505,7 @@ public:
explicit HandleSDNode(SDValue X)
#endif
: SDNode(ISD::HANDLENODE, DebugLoc::getUnknownLoc(),
getSDVTList(EVT::Other)) {
getSDVTList(MVT::Other)) {
InitOperands(&Op, X);
}
~HandleSDNode();
@ -1914,7 +1914,7 @@ class BasicBlockSDNode : public SDNode {
/// harder. Let's see if we need it first.
explicit BasicBlockSDNode(MachineBasicBlock *mbb)
: SDNode(ISD::BasicBlock, DebugLoc::getUnknownLoc(),
getSDVTList(EVT::Other)), MBB(mbb) {
getSDVTList(MVT::Other)), MBB(mbb) {
}
public:
@ -1965,7 +1965,7 @@ class SrcValueSDNode : public SDNode {
/// Create a SrcValue for a general value.
explicit SrcValueSDNode(const Value *v)
: SDNode(ISD::SRCVALUE, DebugLoc::getUnknownLoc(),
getSDVTList(EVT::Other)), V(v) {}
getSDVTList(MVT::Other)), V(v) {}
public:
/// getValue - return the contained Value.
@ -1987,7 +1987,7 @@ class MemOperandSDNode : public SDNode {
/// Create a MachineMemOperand node
explicit MemOperandSDNode(const MachineMemOperand &mo)
: SDNode(ISD::MEMOPERAND, DebugLoc::getUnknownLoc(),
getSDVTList(EVT::Other)), MO(mo) {}
getSDVTList(MVT::Other)), MO(mo) {}
public:
/// MO - The contained MachineMemOperand.
@ -2026,7 +2026,7 @@ class DbgStopPointSDNode : public SDNode {
DbgStopPointSDNode(SDValue ch, unsigned l, unsigned c,
Value *cu)
: SDNode(ISD::DBG_STOPPOINT, DebugLoc::getUnknownLoc(),
getSDVTList(EVT::Other)), Line(l), Column(c), CU(cu) {
getSDVTList(MVT::Other)), Line(l), Column(c), CU(cu) {
InitOperands(&Chain, ch);
}
public:
@ -2045,7 +2045,7 @@ class LabelSDNode : public SDNode {
unsigned LabelID;
friend class SelectionDAG;
LabelSDNode(unsigned NodeTy, DebugLoc dl, SDValue ch, unsigned id)
: SDNode(NodeTy, dl, getSDVTList(EVT::Other)), LabelID(id) {
: SDNode(NodeTy, dl, getSDVTList(MVT::Other)), LabelID(id) {
InitOperands(&Chain, ch);
}
public:
@ -2085,7 +2085,7 @@ class CondCodeSDNode : public SDNode {
friend class SelectionDAG;
explicit CondCodeSDNode(ISD::CondCode Cond)
: SDNode(ISD::CONDCODE, DebugLoc::getUnknownLoc(),
getSDVTList(EVT::Other)), Condition(Cond) {
getSDVTList(MVT::Other)), Condition(Cond) {
}
public:
@ -2210,7 +2210,7 @@ namespace ISD {
EVT VT;
bool Used;
InputArg() : VT(EVT::Other), Used(false) {}
InputArg() : VT(MVT::Other), Used(false) {}
InputArg(ISD::ArgFlagsTy flags, EVT vt, bool used)
: Flags(flags), VT(vt), Used(used) {
assert(VT.isSimple() &&
@ -2243,7 +2243,7 @@ class VTSDNode : public SDNode {
friend class SelectionDAG;
explicit VTSDNode(EVT VT)
: SDNode(ISD::VALUETYPE, DebugLoc::getUnknownLoc(),
getSDVTList(EVT::Other)), ValueType(VT) {
getSDVTList(MVT::Other)), ValueType(VT) {
}
public:

View File

@ -24,9 +24,9 @@
namespace llvm {
class Type;
class LLVMContext;
struct EVT;
struct EVT { // EVT = Machine Value Type
public:
struct MVT { // MVT = Machine Value Type
enum SimpleValueType {
// If you change this numbering, you must change the values in
// ValueTypes.td as well!
@ -113,131 +113,288 @@ namespace llvm {
LastSimpleValueType = 255
};
SimpleValueType SimpleTy;
MVT() : SimpleTy((SimpleValueType)(LastSimpleValueType+1)) {}
MVT(SimpleValueType SVT) : SimpleTy(SVT) { }
bool operator>(const MVT& S) const { return SimpleTy > S.SimpleTy; }
bool operator<(const MVT& S) const { return SimpleTy < S.SimpleTy; }
bool operator==(const MVT& S) const { return SimpleTy == S.SimpleTy; }
bool operator>=(const MVT& S) const { return SimpleTy >= S.SimpleTy; }
bool operator<=(const MVT& S) const { return SimpleTy <= S.SimpleTy; }
/// isFloatingPoint - Return true if this is a FP, or a vector FP type.
bool isFloatingPoint() const {
return ((SimpleTy >= MVT::f32 && SimpleTy <= MVT::ppcf128) ||
(SimpleTy >= MVT::v2f32 && SimpleTy <= MVT::v4f64));
}
/// isInteger - Return true if this is an integer, or a vector integer type.
bool isInteger() const {
return ((SimpleTy >= MVT::FIRST_INTEGER_VALUETYPE &&
SimpleTy <= MVT::LAST_INTEGER_VALUETYPE) ||
(SimpleTy >= MVT::v2i8 && SimpleTy <= MVT::v4i64));
}
/// isVector - Return true if this is a vector value type.
bool isVector() const {
return (SimpleTy >= MVT::FIRST_VECTOR_VALUETYPE &&
SimpleTy <= MVT::LAST_VECTOR_VALUETYPE);
}
MVT getVectorElementType() const {
switch (SimpleTy) {
default:
return (MVT::SimpleValueType)(MVT::LastSimpleValueType+1);
case v2i8 :
case v4i8 :
case v8i8 :
case v16i8:
case v32i8: return i8;
case v2i16:
case v4i16:
case v8i16:
case v16i16: return i16;
case v2i32:
case v4i32:
case v8i32: return i32;
case v1i64:
case v2i64:
case v4i64: return i64;
case v2f32:
case v4f32:
case v8f32: return f32;
case v2f64:
case v4f64: return f64;
}
}
unsigned getVectorNumElements() const {
switch (SimpleTy) {
default:
return ~0U;
case v32i8: return 32;
case v16i8:
case v16i16: return 16;
case v8i8 :
case v8i16:
case v8i32:
case v8f32: return 8;
case v4i8:
case v4i16:
case v4i32:
case v4i64:
case v4f32:
case v4f64: return 4;
case v2i8:
case v2i16:
case v2i32:
case v2i64:
case v2f32:
case v2f64: return 2;
case v1i64: return 1;
}
}
unsigned getSizeInBits() const {
switch (SimpleTy) {
case iPTR:
assert(0 && "Value type size is target-dependent. Ask TLI.");
case iPTRAny:
case iAny:
case fAny:
assert(0 && "Value type is overloaded.");
default:
assert(0 && "getSizeInBits called on extended MVT.");
case i1 : return 1;
case i8 : return 8;
case i16 :
case v2i8: return 16;
case f32 :
case i32 :
case v4i8:
case v2i16: return 32;
case f64 :
case i64 :
case v8i8:
case v4i16:
case v2i32:
case v1i64:
case v2f32: return 64;
case f80 : return 80;
case f128:
case ppcf128:
case i128:
case v16i8:
case v8i16:
case v4i32:
case v2i64:
case v4f32:
case v2f64: return 128;
case v32i8:
case v16i16:
case v8i32:
case v4i64:
case v8f32:
case v4f64: return 256;
}
}
static MVT getFloatingPointVT(unsigned BitWidth) {
switch (BitWidth) {
default:
assert(false && "Bad bit width!");
case 32:
return MVT::f32;
case 64:
return MVT::f64;
case 80:
return MVT::f80;
case 128:
return MVT::f128;
}
}
static MVT getIntegerVT(unsigned BitWidth) {
switch (BitWidth) {
default:
return (MVT::SimpleValueType)(MVT::LastSimpleValueType+1);
case 1:
return MVT::i1;
case 8:
return MVT::i8;
case 16:
return MVT::i16;
case 32:
return MVT::i32;
case 64:
return MVT::i64;
case 128:
return MVT::i128;
}
}
static MVT getVectorVT(MVT VT, unsigned NumElements) {
switch (VT.SimpleTy) {
default:
break;
case MVT::i8:
if (NumElements == 2) return MVT::v2i8;
if (NumElements == 4) return MVT::v4i8;
if (NumElements == 8) return MVT::v8i8;
if (NumElements == 16) return MVT::v16i8;
if (NumElements == 32) return MVT::v32i8;
break;
case MVT::i16:
if (NumElements == 2) return MVT::v2i16;
if (NumElements == 4) return MVT::v4i16;
if (NumElements == 8) return MVT::v8i16;
if (NumElements == 16) return MVT::v16i16;
break;
case MVT::i32:
if (NumElements == 2) return MVT::v2i32;
if (NumElements == 4) return MVT::v4i32;
if (NumElements == 8) return MVT::v8i32;
break;
case MVT::i64:
if (NumElements == 1) return MVT::v1i64;
if (NumElements == 2) return MVT::v2i64;
if (NumElements == 4) return MVT::v4i64;
break;
case MVT::f32:
if (NumElements == 2) return MVT::v2f32;
if (NumElements == 4) return MVT::v4f32;
if (NumElements == 8) return MVT::v8f32;
break;
case MVT::f64:
if (NumElements == 2) return MVT::v2f64;
if (NumElements == 4) return MVT::v4f64;
break;
}
return (MVT::SimpleValueType)(MVT::LastSimpleValueType+1);
}
static MVT getIntVectorWithNumElements(unsigned NumElts) {
switch (NumElts) {
default: return (MVT::SimpleValueType)(MVT::LastSimpleValueType+1);
case 1: return MVT::v1i64;
case 2: return MVT::v2i32;
case 4: return MVT::v4i16;
case 8: return MVT::v8i8;
case 16: return MVT::v16i8;
}
}
};
struct EVT { // EVT = Extended Value Type
private:
/// This union holds low-level value types. Valid values include any of
/// the values in the SimpleValueType enum, or any value returned from one
/// of the EVT methods. Any value type equal to one of the SimpleValueType
/// enum values is a "simple" value type. All others are "extended".
///
/// Note that simple doesn't necessary mean legal for the target machine.
/// All legal value types must be simple, but often there are some simple
/// value types that are not legal.
///
union {
uintptr_t V;
const Type *LLVMTy;
};
MVT V;
const Type *LLVMTy;
public:
EVT() {}
EVT(SimpleValueType S) : V(S) {}
EVT() : V((MVT::SimpleValueType)(MVT::LastSimpleValueType+1)) {}
EVT(MVT::SimpleValueType SVT) : V(SVT) { }
EVT(MVT S) : V(S) {}
bool operator==(const EVT VT) const {
return getRawBits() == VT.getRawBits();
if (V.SimpleTy == VT.V.SimpleTy) {
if (V.SimpleTy == MVT::LastSimpleValueType+1)
return LLVMTy == VT.LLVMTy;
return true;
}
return false;
}
bool operator!=(const EVT VT) const {
return getRawBits() != VT.getRawBits();
if (V.SimpleTy == VT.V.SimpleTy) {
if (V.SimpleTy == MVT::LastSimpleValueType+1)
return LLVMTy != VT.LLVMTy;
return false;
}
return true;
}
/// getFloatingPointVT - Returns the EVT that represents a floating point
/// type with the given number of bits. There are two floating point types
/// with 128 bits - this returns f128 rather than ppcf128.
static EVT getFloatingPointVT(unsigned BitWidth) {
switch (BitWidth) {
default:
assert(false && "Bad bit width!");
case 32:
return f32;
case 64:
return f64;
case 80:
return f80;
case 128:
return f128;
}
return MVT::getFloatingPointVT(BitWidth);
}
/// getIntegerVT - Returns the EVT that represents an integer with the given
/// number of bits.
static EVT getIntegerVT(unsigned BitWidth) {
switch (BitWidth) {
default:
break;
case 1:
return i1;
case 8:
return i8;
case 16:
return i16;
case 32:
return i32;
case 64:
return i64;
case 128:
return i128;
}
return getExtendedIntegerVT(BitWidth);
MVT M = MVT::getIntegerVT(BitWidth);
if (M.SimpleTy == MVT::LastSimpleValueType+1)
return getExtendedIntegerVT(BitWidth);
else
return M;
}
/// getVectorVT - Returns the EVT that represents a vector NumElements in
/// length, where each element is of type VT.
static EVT getVectorVT(EVT VT, unsigned NumElements) {
switch (VT.V) {
default:
break;
case i8:
if (NumElements == 2) return v2i8;
if (NumElements == 4) return v4i8;
if (NumElements == 8) return v8i8;
if (NumElements == 16) return v16i8;
if (NumElements == 32) return v32i8;
break;
case i16:
if (NumElements == 2) return v2i16;
if (NumElements == 4) return v4i16;
if (NumElements == 8) return v8i16;
if (NumElements == 16) return v16i16;
break;
case i32:
if (NumElements == 2) return v2i32;
if (NumElements == 4) return v4i32;
if (NumElements == 8) return v8i32;
break;
case i64:
if (NumElements == 1) return v1i64;
if (NumElements == 2) return v2i64;
if (NumElements == 4) return v4i64;
break;
case f32:
if (NumElements == 2) return v2f32;
if (NumElements == 4) return v4f32;
if (NumElements == 8) return v8f32;
break;
case f64:
if (NumElements == 2) return v2f64;
if (NumElements == 4) return v4f64;
break;
}
return getExtendedVectorVT(VT, NumElements);
MVT M = MVT::getVectorVT(VT.V, NumElements);
if (M.SimpleTy == MVT::LastSimpleValueType+1)
return getExtendedVectorVT(VT, NumElements);
else
return M;
}
/// getIntVectorWithNumElements - Return any integer vector type that has
/// the specified number of elements.
static EVT getIntVectorWithNumElements(unsigned NumElts) {
switch (NumElts) {
default: return getVectorVT(i8, NumElts);
case 1: return v1i64;
case 2: return v2i32;
case 4: return v4i16;
case 8: return v8i8;
case 16: return v16i8;
}
MVT M = MVT::getIntVectorWithNumElements(NumElts);
if (M.SimpleTy == MVT::LastSimpleValueType+1)
return getVectorVT(EVT(MVT::i8), NumElts);
else
return M;
}
/// isSimple - Test if the given EVT is simple (as opposed to being
/// extended).
bool isSimple() const {
return V <= LastSimpleValueType;
return V.SimpleTy <= MVT::LastSimpleValueType;
}
/// isExtended - Test if the given EVT is extended (as opposed to
@ -249,49 +406,53 @@ namespace llvm {
/// isFloatingPoint - Return true if this is a FP, or a vector FP type.
bool isFloatingPoint() const {
return isSimple() ?
((V >= f32 && V <= ppcf128) ||
(V >= v2f32 && V <= v4f64)) : isExtendedFloatingPoint();
((V >= MVT::f32 && V <= MVT::ppcf128) ||
(V >= MVT::v2f32 && V <= MVT::v4f64)) : isExtendedFloatingPoint();
}
/// isInteger - Return true if this is an integer, or a vector integer type.
bool isInteger() const {
return isSimple() ?
((V >= FIRST_INTEGER_VALUETYPE && V <= LAST_INTEGER_VALUETYPE) ||
(V >= v2i8 && V <= v4i64)) : isExtendedInteger();
((V >= MVT::FIRST_INTEGER_VALUETYPE &&
V <= MVT::LAST_INTEGER_VALUETYPE) ||
(V >= MVT::v2i8 && V <= MVT::v4i64)) : isExtendedInteger();
}
/// isVector - Return true if this is a vector value type.
bool isVector() const {
return isSimple() ?
(V >= FIRST_VECTOR_VALUETYPE && V <= LAST_VECTOR_VALUETYPE) :
(V >= MVT::FIRST_VECTOR_VALUETYPE && V <=
MVT::LAST_VECTOR_VALUETYPE) :
isExtendedVector();
}
/// is64BitVector - Return true if this is a 64-bit vector type.
bool is64BitVector() const {
return isSimple() ?
(V==v8i8 || V==v4i16 || V==v2i32 || V==v1i64 || V==v2f32) :
(V==MVT::v8i8 || V==MVT::v4i16 || V==MVT::v2i32 ||
V==MVT::v1i64 || V==MVT::v2f32) :
isExtended64BitVector();
}
/// is128BitVector - Return true if this is a 128-bit vector type.
bool is128BitVector() const {
return isSimple() ?
(V==v16i8 || V==v8i16 || V==v4i32 ||
V==v2i64 || V==v4f32 || V==v2f64) :
(V==MVT::v16i8 || V==MVT::v8i16 || V==MVT::v4i32 ||
V==MVT::v2i64 || V==MVT::v4f32 || V==MVT::v2f64) :
isExtended128BitVector();
}
/// is256BitVector - Return true if this is a 256-bit vector type.
inline bool is256BitVector() const {
return isSimple() ?
(V==v8f32 || V==v4f64 || V==v32i8 || V==v16i16 || V==v8i32 ||
V==v4i64) : isExtended256BitVector();
(V==MVT::v8f32 || V==MVT::v4f64 || V==MVT::v32i8 ||
V==MVT::v16i16 || V==MVT::v8i32 || V==MVT::v4i64) :
isExtended256BitVector();
}
/// isOverloaded - Return true if this is an overloaded type for TableGen.
bool isOverloaded() const {
return (V==iAny || V==fAny || V==vAny || V==iPTRAny);
return (V==MVT::iAny || V==MVT::fAny || V==MVT::vAny || V==MVT::iPTRAny);
}
/// isByteSized - Return true if the bit size is a multiple of 8.
@ -333,115 +494,37 @@ namespace llvm {
/// getSimpleVT - Return the SimpleValueType held in the specified
/// simple EVT.
SimpleValueType getSimpleVT() const {
MVT getSimpleVT() const {
assert(isSimple() && "Expected a SimpleValueType!");
return SimpleValueType(V);
return V;
}
/// getVectorElementType - Given a vector type, return the type of
/// each element.
EVT getVectorElementType() const {
assert(isVector() && "Invalid vector type!");
switch (V) {
default:
if (isSimple())
return V.getVectorElementType();
else
return getExtendedVectorElementType();
case v2i8 :
case v4i8 :
case v8i8 :
case v16i8:
case v32i8: return i8;
case v2i16:
case v4i16:
case v8i16:
case v16i16: return i16;
case v2i32:
case v4i32:
case v8i32: return i32;
case v1i64:
case v2i64:
case v4i64: return i64;
case v2f32:
case v4f32:
case v8f32: return f32;
case v2f64:
case v4f64: return f64;
}
}
/// getVectorNumElements - Given a vector type, return the number of
/// elements it contains.
unsigned getVectorNumElements() const {
assert(isVector() && "Invalid vector type!");
switch (V) {
default:
if (isSimple())
return V.getVectorNumElements();
else
return getExtendedVectorNumElements();
case v32i8: return 32;
case v16i8:
case v16i16: return 16;
case v8i8 :
case v8i16:
case v8i32:
case v8f32: return 8;
case v4i8:
case v4i16:
case v4i32:
case v4i64:
case v4f32:
case v4f64: return 4;
case v2i8:
case v2i16:
case v2i32:
case v2i64:
case v2f32:
case v2f64: return 2;
case v1i64: return 1;
}
}
/// getSizeInBits - Return the size of the specified value type in bits.
unsigned getSizeInBits() const {
switch (V) {
case iPTR:
assert(0 && "Value type size is target-dependent. Ask TLI.");
case iPTRAny:
case iAny:
case fAny:
case vAny:
assert(0 && "Value type is overloaded.");
default:
if (isSimple())
return V.getSizeInBits();
else
return getExtendedSizeInBits();
case i1 : return 1;
case i8 : return 8;
case i16 :
case v2i8: return 16;
case f32 :
case i32 :
case v4i8:
case v2i16: return 32;
case f64 :
case i64 :
case v8i8:
case v4i16:
case v2i32:
case v1i64:
case v2f32: return 64;
case f80 : return 80;
case f128:
case ppcf128:
case i128:
case v16i8:
case v8i16:
case v4i32:
case v2i64:
case v4f32:
case v2f64: return 128;
case v32i8:
case v16i16:
case v8i32:
case v4i64:
case v8f32:
case v4f64: return 256;
}
}
/// getStoreSizeInBits - Return the number of bits overwritten by a store
@ -457,7 +540,7 @@ namespace llvm {
assert(isInteger() && !isVector() && "Invalid integer type!");
unsigned BitWidth = getSizeInBits();
if (BitWidth <= 8)
return i8;
return EVT(MVT::i8);
else
return getIntegerVT(1 << Log2_32_Ceil(BitWidth));
}
@ -495,14 +578,21 @@ namespace llvm {
/// types are returned as Other, otherwise they are invalid.
static EVT getEVT(const Type *Ty, bool HandleUnknown = false);
/// getRawBits - Represent the type as a bunch of bits.
uintptr_t getRawBits() const { return V; }
intptr_t getRawBits() {
if (V.SimpleTy <= MVT::LastSimpleValueType)
return V.SimpleTy;
else
return (intptr_t)(LLVMTy);
}
/// compareRawBits - A meaningless but well-behaved order, useful for
/// constructing containers.
struct compareRawBits {
bool operator()(EVT L, EVT R) const {
return L.getRawBits() < R.getRawBits();
if (L.V.SimpleTy == R.V.SimpleTy)
return L.LLVMTy < R.LLVMTy;
else
return L.V.SimpleTy < R.V.SimpleTy;
}
};

View File

@ -111,8 +111,8 @@ public:
bool isBigEndian() const { return !IsLittleEndian; }
bool isLittleEndian() const { return IsLittleEndian; }
EVT::SimpleValueType getPointerTy() const { return PointerTy; }
EVT::SimpleValueType getShiftAmountTy() const { return ShiftAmountTy; }
MVT getPointerTy() const { return PointerTy; }
MVT getShiftAmountTy() const { return ShiftAmountTy; }
/// usesGlobalOffsetTable - Return true if this target uses a GOT for PIC
/// codegen.
@ -133,10 +133,10 @@ public:
/// getSetCCResultType - Return the ValueType of the result of SETCC
/// operations. Also used to obtain the target's preferred type for
/// the condition operand of SELECT and BRCOND nodes. In the case of
/// BRCOND the argument passed is EVT::Other since there are no other
/// BRCOND the argument passed is MVT::Other since there are no other
/// operands to get a type hint from.
virtual
EVT::SimpleValueType getSetCCResultType(EVT VT) const;
MVT::SimpleValueType getSetCCResultType(EVT VT) const;
/// getBooleanContents - For targets without i1 registers, this gives the
/// nature of the high-bits of boolean values held in types wider than i1.
@ -154,7 +154,7 @@ public:
/// specified value type. This may only be called on legal types.
TargetRegisterClass *getRegClassFor(EVT VT) const {
assert(VT.isSimple() && "getRegClassFor called on illegal type!");
TargetRegisterClass *RC = RegClassForVT[VT.getSimpleVT()];
TargetRegisterClass *RC = RegClassForVT[VT.getSimpleVT().SimpleTy];
assert(RC && "This value type is not natively supported!");
return RC;
}
@ -164,16 +164,16 @@ public:
/// holds it without promotions or expansions.
bool isTypeLegal(EVT VT) const {
assert(!VT.isSimple() ||
(unsigned)VT.getSimpleVT() < array_lengthof(RegClassForVT));
return VT.isSimple() && RegClassForVT[VT.getSimpleVT()] != 0;
(unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != 0;
}
class ValueTypeActionImpl {
/// ValueTypeActions - This is a bitvector that contains two bits for each
/// value type, where the two bits correspond to the LegalizeAction enum.
/// This can be queried with "getTypeAction(VT)".
/// dimension by (EVT::MAX_ALLOWED_VALUETYPE/32) * 2
uint32_t ValueTypeActions[(EVT::MAX_ALLOWED_VALUETYPE/32)*2];
/// dimension by (MVT::MAX_ALLOWED_VALUETYPE/32) * 2
uint32_t ValueTypeActions[(MVT::MAX_ALLOWED_VALUETYPE/32)*2];
public:
ValueTypeActionImpl() {
ValueTypeActions[0] = ValueTypeActions[1] = 0;
@ -197,12 +197,12 @@ public:
assert(0 && "Unsupported extended type!");
return Legal;
}
unsigned I = VT.getSimpleVT();
unsigned I = VT.getSimpleVT().SimpleTy;
assert(I<4*array_lengthof(ValueTypeActions)*sizeof(ValueTypeActions[0]));
return (LegalizeAction)((ValueTypeActions[I>>4] >> ((2*I) & 31)) & 3);
}
void setTypeAction(EVT VT, LegalizeAction Action) {
unsigned I = VT.getSimpleVT();
unsigned I = VT.getSimpleVT().SimpleTy;
assert(I<4*array_lengthof(ValueTypeActions)*sizeof(ValueTypeActions[0]));
ValueTypeActions[I>>4] |= Action << ((I*2) & 31);
}
@ -228,8 +228,9 @@ public:
/// returns the integer type to transform to.
EVT getTypeToTransformTo(EVT VT) const {
if (VT.isSimple()) {
assert((unsigned)VT.getSimpleVT() < array_lengthof(TransformToType));
EVT NVT = TransformToType[VT.getSimpleVT()];
assert((unsigned)VT.getSimpleVT().SimpleTy <
array_lengthof(TransformToType));
EVT NVT = TransformToType[VT.getSimpleVT().SimpleTy];
assert(getTypeAction(NVT) != Promote &&
"Promote may not follow Expand or Promote");
return NVT;
@ -255,7 +256,7 @@ public:
return getTypeAction(NVT) == Promote ? getTypeToTransformTo(NVT) : NVT;
}
assert(0 && "Unsupported extended type!");
return EVT(EVT::Other); // Not reached
return MVT(MVT::Other); // Not reached
}
/// getTypeToExpandTo - For types supported by the target, this is an
@ -315,7 +316,7 @@ public:
/// getWidenVectorType: given a vector type, returns the type to widen to
/// (e.g., v7i8 to v8i8). If the vector type is legal, it returns itself.
/// If there is no vector type that we want to widen to, returns EVT::Other
/// If there is no vector type that we want to widen to, returns MVT::Other
/// When and were to widen is target dependent based on the cost of
/// scalarizing vs using the wider vector type.
virtual EVT getWidenVectorType(EVT VT) const;
@ -353,9 +354,9 @@ public:
LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
if (VT.isExtended()) return Expand;
assert(Op < array_lengthof(OpActions[0]) &&
(unsigned)VT.getSimpleVT() < sizeof(OpActions[0][0])*8 &&
(unsigned)VT.getSimpleVT().SimpleTy < sizeof(OpActions[0][0])*8 &&
"Table isn't big enough!");
unsigned I = (unsigned) VT.getSimpleVT();
unsigned I = (unsigned) VT.getSimpleVT().SimpleTy;
unsigned J = I & 31;
I = I >> 5;
return (LegalizeAction)((OpActions[I][Op] >> (J*2) ) & 3);
@ -365,7 +366,7 @@ public:
/// legal on this target or can be made legal with custom lowering. This
/// is used to help guide high-level lowering decisions.
bool isOperationLegalOrCustom(unsigned Op, EVT VT) const {
return (VT == EVT::Other || isTypeLegal(VT)) &&
return (VT == MVT::Other || isTypeLegal(VT)) &&
(getOperationAction(Op, VT) == Legal ||
getOperationAction(Op, VT) == Custom);
}
@ -373,7 +374,7 @@ public:
/// isOperationLegal - Return true if the specified operation is legal on this
/// target.
bool isOperationLegal(unsigned Op, EVT VT) const {
return (VT == EVT::Other || isTypeLegal(VT)) &&
return (VT == MVT::Other || isTypeLegal(VT)) &&
getOperationAction(Op, VT) == Legal;
}
@ -383,9 +384,10 @@ public:
/// for it.
LegalizeAction getLoadExtAction(unsigned LType, EVT VT) const {
assert(LType < array_lengthof(LoadExtActions) &&
(unsigned)VT.getSimpleVT() < sizeof(LoadExtActions[0])*4 &&
(unsigned)VT.getSimpleVT().SimpleTy < sizeof(LoadExtActions[0])*4 &&
"Table isn't big enough!");
return (LegalizeAction)((LoadExtActions[LType] >> (2*VT.getSimpleVT())) & 3);
return (LegalizeAction)((LoadExtActions[LType] >>
(2*VT.getSimpleVT().SimpleTy)) & 3);
}
/// isLoadExtLegal - Return true if the specified load with extension is legal
@ -402,11 +404,13 @@ public:
/// expander for it.
LegalizeAction getTruncStoreAction(EVT ValVT,
EVT MemVT) const {
assert((unsigned)ValVT.getSimpleVT() < array_lengthof(TruncStoreActions) &&
(unsigned)MemVT.getSimpleVT() < sizeof(TruncStoreActions[0])*4 &&
assert((unsigned)ValVT.getSimpleVT().SimpleTy <
array_lengthof(TruncStoreActions) &&
(unsigned)MemVT.getSimpleVT().SimpleTy <
sizeof(TruncStoreActions[0])*4 &&
"Table isn't big enough!");
return (LegalizeAction)((TruncStoreActions[ValVT.getSimpleVT()] >>
(2*MemVT.getSimpleVT())) & 3);
return (LegalizeAction)((TruncStoreActions[ValVT.getSimpleVT().SimpleTy] >>
(2*MemVT.getSimpleVT().SimpleTy)) & 3);
}
/// isTruncStoreLegal - Return true if the specified store with truncation is
@ -424,9 +428,10 @@ public:
LegalizeAction
getIndexedLoadAction(unsigned IdxMode, EVT VT) const {
assert( IdxMode < array_lengthof(IndexedModeActions[0][0]) &&
((unsigned)VT.getSimpleVT()) < EVT::LAST_VALUETYPE &&
((unsigned)VT.getSimpleVT().SimpleTy) < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
return (LegalizeAction)((IndexedModeActions[(unsigned)VT.getSimpleVT()][0][IdxMode]));
return (LegalizeAction)((IndexedModeActions[
(unsigned)VT.getSimpleVT().SimpleTy][0][IdxMode]));
}
/// isIndexedLoadLegal - Return true if the specified indexed load is legal
@ -444,9 +449,10 @@ public:
LegalizeAction
getIndexedStoreAction(unsigned IdxMode, EVT VT) const {
assert(IdxMode < array_lengthof(IndexedModeActions[0][1]) &&
(unsigned)VT.getSimpleVT() < EVT::LAST_VALUETYPE &&
(unsigned)VT.getSimpleVT().SimpleTy < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
return (LegalizeAction)((IndexedModeActions[(unsigned)VT.getSimpleVT()][1][IdxMode]));
return (LegalizeAction)((IndexedModeActions[
(unsigned)VT.getSimpleVT().SimpleTy][1][IdxMode]));
}
/// isIndexedStoreLegal - Return true if the specified indexed load is legal
@ -463,11 +469,13 @@ public:
/// for it.
LegalizeAction
getConvertAction(EVT FromVT, EVT ToVT) const {
assert((unsigned)FromVT.getSimpleVT() < array_lengthof(ConvertActions) &&
(unsigned)ToVT.getSimpleVT() < sizeof(ConvertActions[0])*4 &&
assert((unsigned)FromVT.getSimpleVT().SimpleTy <
array_lengthof(ConvertActions) &&
(unsigned)ToVT.getSimpleVT().SimpleTy <
sizeof(ConvertActions[0])*4 &&
"Table isn't big enough!");
return (LegalizeAction)((ConvertActions[FromVT.getSimpleVT()] >>
(2*ToVT.getSimpleVT())) & 3);
return (LegalizeAction)((ConvertActions[FromVT.getSimpleVT().SimpleTy] >>
(2*ToVT.getSimpleVT().SimpleTy)) & 3);
}
/// isConvertLegal - Return true if the specified conversion is legal
@ -484,10 +492,10 @@ public:
LegalizeAction
getCondCodeAction(ISD::CondCode CC, EVT VT) const {
assert((unsigned)CC < array_lengthof(CondCodeActions) &&
(unsigned)VT.getSimpleVT() < sizeof(CondCodeActions[0])*4 &&
(unsigned)VT.getSimpleVT().SimpleTy < sizeof(CondCodeActions[0])*4 &&
"Table isn't big enough!");
LegalizeAction Action = (LegalizeAction)
((CondCodeActions[CC] >> (2*VT.getSimpleVT())) & 3);
((CondCodeActions[CC] >> (2*VT.getSimpleVT().SimpleTy)) & 3);
assert(Action != Promote && "Can't promote condition code!");
return Action;
}
@ -507,9 +515,9 @@ public:
"This operation isn't promoted!");
// See if this has an explicit type specified.
std::map<std::pair<unsigned, EVT::SimpleValueType>,
EVT::SimpleValueType>::const_iterator PTTI =
PromoteToType.find(std::make_pair(Op, VT.getSimpleVT()));
std::map<std::pair<unsigned, MVT::SimpleValueType>,
MVT::SimpleValueType>::const_iterator PTTI =
PromoteToType.find(std::make_pair(Op, VT.getSimpleVT().SimpleTy));
if (PTTI != PromoteToType.end()) return PTTI->second;
assert((VT.isInteger() || VT.isFloatingPoint()) &&
@ -517,8 +525,8 @@ public:
EVT NVT = VT;
do {
NVT = (EVT::SimpleValueType)(NVT.getSimpleVT()+1);
assert(NVT.isInteger() == VT.isInteger() && NVT != EVT::isVoid &&
NVT = (MVT::SimpleValueType)(NVT.getSimpleVT().SimpleTy+1);
assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&
"Didn't find type to promote to!");
} while (!isTypeLegal(NVT) ||
getOperationAction(Op, NVT) == Promote);
@ -527,11 +535,11 @@ public:
/// getValueType - Return the EVT corresponding to this LLVM type.
/// This is fixed by the LLVM operations except for the pointer size. If
/// AllowUnknown is true, this will return EVT::Other for types with no EVT
/// AllowUnknown is true, this will return MVT::Other for types with no EVT
/// counterpart (e.g. structs), otherwise it will assert.
EVT getValueType(const Type *Ty, bool AllowUnknown = false) const {
EVT VT = EVT::getEVT(Ty, AllowUnknown);
return VT == EVT::iPTR ? PointerTy : VT;
return VT == MVT:: iPTR ? PointerTy : VT;
}
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
@ -543,8 +551,9 @@ public:
/// eventually require.
EVT getRegisterType(EVT VT) const {
if (VT.isSimple()) {
assert((unsigned)VT.getSimpleVT() < array_lengthof(RegisterTypeForVT));
return RegisterTypeForVT[VT.getSimpleVT()];
assert((unsigned)VT.getSimpleVT().SimpleTy <
array_lengthof(RegisterTypeForVT));
return RegisterTypeForVT[VT.getSimpleVT().SimpleTy];
}
if (VT.isVector()) {
EVT VT1, RegisterVT;
@ -556,7 +565,7 @@ public:
return getRegisterType(getTypeToTransformTo(VT));
}
assert(0 && "Unsupported extended type!");
return EVT(EVT::Other); // Not reached
return EVT(MVT::Other); // Not reached
}
/// getNumRegisters - Return the number of registers that this ValueType will
@ -567,8 +576,9 @@ public:
/// type. For an i140 on a 32 bit machine this means 5 registers.
unsigned getNumRegisters(EVT VT) const {
if (VT.isSimple()) {
assert((unsigned)VT.getSimpleVT() < array_lengthof(NumRegistersForVT));
return NumRegistersForVT[VT.getSimpleVT()];
assert((unsigned)VT.getSimpleVT().SimpleTy <
array_lengthof(NumRegistersForVT));
return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
}
if (VT.isVector()) {
EVT VT1, VT2;
@ -638,7 +648,7 @@ public:
virtual EVT getOptimalMemOpType(uint64_t Size, unsigned Align,
bool isSrcConst, bool isSrcStr,
SelectionDAG &DAG) const {
return EVT::iAny;
return MVT::iAny;
}
/// usesUnderscoreSetJmp - Determine if we should use _setjmp or setjmp
@ -869,7 +879,7 @@ protected:
/// setShiftAmountType - Describe the type that should be used for shift
/// amounts. This type defaults to the pointer type.
void setShiftAmountType(EVT::SimpleValueType VT) { ShiftAmountTy = VT; }
void setShiftAmountType(MVT VT) { ShiftAmountTy = VT; }
/// setBooleanContents - Specify how the target extends the result of a
/// boolean value from i1 to a wider type. See getBooleanContents.
@ -933,9 +943,9 @@ protected:
/// regclass for the specified value type. This indicates the selector can
/// handle values of that class natively.
void addRegisterClass(EVT VT, TargetRegisterClass *RC) {
assert((unsigned)VT.getSimpleVT() < array_lengthof(RegClassForVT));
assert((unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
AvailableRegClasses.push_back(std::make_pair(VT, RC));
RegClassForVT[VT.getSimpleVT()] = RC;
RegClassForVT[VT.getSimpleVT().SimpleTy] = RC;
}
/// computeRegisterProperties - Once all of the register classes are added,
@ -944,9 +954,9 @@ protected:
/// setOperationAction - Indicate that the specified operation does not work
/// with the specified type and indicate what to do about it.
void setOperationAction(unsigned Op, EVT::SimpleValueType VT,
void setOperationAction(unsigned Op, MVT VT,
LegalizeAction Action) {
unsigned I = (unsigned)VT;
unsigned I = (unsigned)VT.SimpleTy;
unsigned J = I & 31;
I = I >> 5;
OpActions[I][Op] &= ~(uint64_t(3UL) << (J*2));
@ -955,80 +965,78 @@ protected:
/// setLoadExtAction - Indicate that the specified load with extension does
/// not work with the with specified type and indicate what to do about it.
void setLoadExtAction(unsigned ExtType, EVT::SimpleValueType VT,
void setLoadExtAction(unsigned ExtType, MVT VT,
LegalizeAction Action) {
assert((unsigned)VT < sizeof(LoadExtActions[0])*4 &&
assert((unsigned)VT.SimpleTy < sizeof(LoadExtActions[0])*4 &&
ExtType < array_lengthof(LoadExtActions) &&
"Table isn't big enough!");
LoadExtActions[ExtType] &= ~(uint64_t(3UL) << VT*2);
LoadExtActions[ExtType] |= (uint64_t)Action << VT*2;
LoadExtActions[ExtType] &= ~(uint64_t(3UL) << VT.SimpleTy*2);
LoadExtActions[ExtType] |= (uint64_t)Action << VT.SimpleTy*2;
}
/// setTruncStoreAction - Indicate that the specified truncating store does
/// not work with the with specified type and indicate what to do about it.
void setTruncStoreAction(EVT::SimpleValueType ValVT,
EVT::SimpleValueType MemVT,
void setTruncStoreAction(MVT ValVT, MVT MemVT,
LegalizeAction Action) {
assert((unsigned)ValVT < array_lengthof(TruncStoreActions) &&
(unsigned)MemVT < sizeof(TruncStoreActions[0])*4 &&
assert((unsigned)ValVT.SimpleTy < array_lengthof(TruncStoreActions) &&
(unsigned)MemVT.SimpleTy < sizeof(TruncStoreActions[0])*4 &&
"Table isn't big enough!");
TruncStoreActions[ValVT] &= ~(uint64_t(3UL) << MemVT*2);
TruncStoreActions[ValVT] |= (uint64_t)Action << MemVT*2;
TruncStoreActions[ValVT.SimpleTy] &= ~(uint64_t(3UL) << MemVT.SimpleTy*2);
TruncStoreActions[ValVT.SimpleTy] |= (uint64_t)Action << MemVT.SimpleTy*2;
}
/// setIndexedLoadAction - Indicate that the specified indexed load does or
/// does not work with the with specified type and indicate what to do abort
/// it. NOTE: All indexed mode loads are initialized to Expand in
/// TargetLowering.cpp
void setIndexedLoadAction(unsigned IdxMode, EVT::SimpleValueType VT,
void setIndexedLoadAction(unsigned IdxMode, MVT VT,
LegalizeAction Action) {
assert((unsigned)VT < EVT::LAST_VALUETYPE &&
assert((unsigned)VT.SimpleTy < MVT::LAST_VALUETYPE &&
IdxMode < array_lengthof(IndexedModeActions[0][0]) &&
"Table isn't big enough!");
IndexedModeActions[(unsigned)VT][0][IdxMode] = (uint8_t)Action;
IndexedModeActions[(unsigned)VT.SimpleTy][0][IdxMode] = (uint8_t)Action;
}
/// setIndexedStoreAction - Indicate that the specified indexed store does or
/// does not work with the with specified type and indicate what to do about
/// it. NOTE: All indexed mode stores are initialized to Expand in
/// TargetLowering.cpp
void setIndexedStoreAction(unsigned IdxMode, EVT::SimpleValueType VT,
void setIndexedStoreAction(unsigned IdxMode, MVT VT,
LegalizeAction Action) {
assert((unsigned)VT < EVT::LAST_VALUETYPE &&
assert((unsigned)VT.SimpleTy < MVT::LAST_VALUETYPE &&
IdxMode < array_lengthof(IndexedModeActions[0][1] ) &&
"Table isn't big enough!");
IndexedModeActions[(unsigned)VT][1][IdxMode] = (uint8_t)Action;
IndexedModeActions[(unsigned)VT.SimpleTy][1][IdxMode] = (uint8_t)Action;
}
/// setConvertAction - Indicate that the specified conversion does or does
/// not work with the with specified type and indicate what to do about it.
void setConvertAction(EVT::SimpleValueType FromVT, EVT::SimpleValueType ToVT,
void setConvertAction(MVT FromVT, MVT ToVT,
LegalizeAction Action) {
assert((unsigned)FromVT < array_lengthof(ConvertActions) &&
(unsigned)ToVT < sizeof(ConvertActions[0])*4 &&
assert((unsigned)FromVT.SimpleTy < array_lengthof(ConvertActions) &&
(unsigned)ToVT.SimpleTy < sizeof(ConvertActions[0])*4 &&
"Table isn't big enough!");
ConvertActions[FromVT] &= ~(uint64_t(3UL) << ToVT*2);
ConvertActions[FromVT] |= (uint64_t)Action << ToVT*2;
ConvertActions[FromVT.SimpleTy] &= ~(uint64_t(3UL) << ToVT.SimpleTy*2);
ConvertActions[FromVT.SimpleTy] |= (uint64_t)Action << ToVT.SimpleTy*2;
}
/// setCondCodeAction - Indicate that the specified condition code is or isn't
/// supported on the target and indicate what to do about it.
void setCondCodeAction(ISD::CondCode CC, EVT::SimpleValueType VT,
void setCondCodeAction(ISD::CondCode CC, MVT VT,
LegalizeAction Action) {
assert((unsigned)VT < sizeof(CondCodeActions[0])*4 &&
assert((unsigned)VT.SimpleTy < sizeof(CondCodeActions[0])*4 &&
(unsigned)CC < array_lengthof(CondCodeActions) &&
"Table isn't big enough!");
CondCodeActions[(unsigned)CC] &= ~(uint64_t(3UL) << VT*2);
CondCodeActions[(unsigned)CC] |= (uint64_t)Action << VT*2;
CondCodeActions[(unsigned)CC] &= ~(uint64_t(3UL) << VT.SimpleTy*2);
CondCodeActions[(unsigned)CC] |= (uint64_t)Action << VT.SimpleTy*2;
}
/// AddPromotedToType - If Opc/OrigVT is specified as being promoted, the
/// promotion code defaults to trying a larger integer/fp until it can find
/// one that works. If that default is insufficient, this method can be used
/// by the target to override the default.
void AddPromotedToType(unsigned Opc, EVT::SimpleValueType OrigVT,
EVT::SimpleValueType DestVT) {
PromoteToType[std::make_pair(Opc, OrigVT)] = DestVT;
void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
}
/// addLegalFPImmediate - Indicate that this target can instruction select
@ -1355,7 +1363,7 @@ public:
AsmOperandInfo(const InlineAsm::ConstraintInfo &info)
: InlineAsm::ConstraintInfo(info),
ConstraintType(TargetLowering::C_Unknown),
CallOperandVal(0), ConstraintVT(EVT::Other) {
CallOperandVal(0), ConstraintVT(MVT::Other) {
}
};
@ -1527,7 +1535,7 @@ private:
/// PointerTy - The type to use for pointers, usually i32 or i64.
///
EVT::SimpleValueType PointerTy;
MVT PointerTy;
/// IsLittleEndian - True if this is a little endian target.
///
@ -1562,7 +1570,7 @@ private:
/// ShiftAmountTy - The type to use for shift amounts, usually i8 or whatever
/// PointerTy is.
EVT::SimpleValueType ShiftAmountTy;
MVT ShiftAmountTy;
/// BooleanContents - Information about the contents of the high-bits in
/// boolean values held in a type wider than i1. See getBooleanContents.
@ -1608,16 +1616,16 @@ private:
/// RegClassForVT - This indicates the default register class to use for
/// each ValueType the target supports natively.
TargetRegisterClass *RegClassForVT[EVT::LAST_VALUETYPE];
unsigned char NumRegistersForVT[EVT::LAST_VALUETYPE];
EVT RegisterTypeForVT[EVT::LAST_VALUETYPE];
TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE];
EVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
/// TransformToType - For any value types we are promoting or expanding, this
/// contains the value type that we are changing to. For Expanded types, this
/// contains one step of the expand (e.g. i64 -> i32), even if there are
/// multiple steps required (e.g. i64 -> i16). For types natively supported
/// by the system, this holds the same type (e.g. i32 -> i32).
EVT TransformToType[EVT::LAST_VALUETYPE];
EVT TransformToType[MVT::LAST_VALUETYPE];
/// OpActions - For each operation and each value type, keep a LegalizeAction
/// that indicates how instruction selection should deal with the operation.
@ -1625,8 +1633,8 @@ private:
/// operations that are not should be described. Note that operations on
/// non-legal value types are not described here.
/// This array is accessed using VT.getSimpleVT(), so it is subject to
/// the EVT::MAX_ALLOWED_VALUETYPE * 2 bits.
uint64_t OpActions[EVT::MAX_ALLOWED_VALUETYPE/(sizeof(uint64_t)*4)][ISD::BUILTIN_OP_END];
/// the MVT::MAX_ALLOWED_VALUETYPE * 2 bits.
uint64_t OpActions[MVT::MAX_ALLOWED_VALUETYPE/(sizeof(uint64_t)*4)][ISD::BUILTIN_OP_END];
/// LoadExtActions - For each load of load extension type and each value type,
/// keep a LegalizeAction that indicates how instruction selection should deal
@ -1635,7 +1643,7 @@ private:
/// TruncStoreActions - For each truncating store, keep a LegalizeAction that
/// indicates how instruction selection should deal with the store.
uint64_t TruncStoreActions[EVT::LAST_VALUETYPE];
uint64_t TruncStoreActions[MVT::LAST_VALUETYPE];
/// IndexedModeActions - For each indexed mode and each value type,
/// keep a pair of LegalizeAction that indicates how instruction
@ -1643,14 +1651,14 @@ private:
/// dimension is now the value_type for the reference. The second
/// dimension is the load [0] vs. store[1]. The third dimension
/// represents the various modes for load store.
uint8_t IndexedModeActions[EVT::LAST_VALUETYPE][2][ISD::LAST_INDEXED_MODE];
uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][2][ISD::LAST_INDEXED_MODE];
/// ConvertActions - For each conversion from source type to destination type,
/// keep a LegalizeAction that indicates how instruction selection should
/// deal with the conversion.
/// Currently, this is used only for floating->floating conversions
/// (FP_EXTEND and FP_ROUND).
uint64_t ConvertActions[EVT::LAST_VALUETYPE];
uint64_t ConvertActions[MVT::LAST_VALUETYPE];
/// CondCodeActions - For each condition code (ISD::CondCode) keep a
/// LegalizeAction that indicates how instruction selection should
@ -1675,7 +1683,7 @@ private:
///
/// Targets add entries to this map with AddPromotedToType(..), clients access
/// this with getTypeToPromoteTo(..).
std::map<std::pair<unsigned, EVT::SimpleValueType>, EVT::SimpleValueType>
std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
PromoteToType;
/// LibcallRoutineNames - Stores the name each libcall.

View File

@ -118,7 +118,7 @@ public:
/// hasType - return true if this TargetRegisterClass has the ValueType vt.
///
bool hasType(EVT vt) const {
for(int i = 0; VTs[i] != EVT::Other; ++i)
for(int i = 0; VTs[i].getSimpleVT().SimpleTy != MVT::Other; ++i)
if (VTs[i] == vt)
return true;
return false;
@ -132,7 +132,7 @@ public:
vt_iterator vt_end() const {
vt_iterator I = VTs;
while (*I != EVT::Other) ++I;
while (I->getSimpleVT().SimpleTy != MVT::Other) ++I;
return I;
}
@ -321,7 +321,7 @@ public:
/// register of the given type. If type is EVT::Other, then just return any
/// register class the register belongs to.
virtual const TargetRegisterClass *
getPhysicalRegisterRegClass(unsigned Reg, EVT VT = EVT::Other) const;
getPhysicalRegisterRegClass(unsigned Reg, EVT VT = MVT::Other) const;
/// getAllocatableSet - Returns a bitset indexed by register number
/// indicating if a register is allocatable or not. If a register class is

View File

@ -512,48 +512,48 @@ def zextload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
}]>;
def extloadi1 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == EVT::i1;
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
}]>;
def extloadi8 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == EVT::i8;
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
}]>;
def extloadi16 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == EVT::i16;
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
}]>;
def extloadi32 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == EVT::i32;
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
}]>;
def extloadf32 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == EVT::f32;
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::f32;
}]>;
def extloadf64 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == EVT::f64;
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::f64;
}]>;
def sextloadi1 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == EVT::i1;
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
}]>;
def sextloadi8 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == EVT::i8;
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
}]>;
def sextloadi16 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == EVT::i16;
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
}]>;
def sextloadi32 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == EVT::i32;
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
}]>;
def zextloadi1 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == EVT::i1;
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
}]>;
def zextloadi8 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == EVT::i8;
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
}]>;
def zextloadi16 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == EVT::i16;
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
}]>;
def zextloadi32 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
return cast<LoadSDNode>(N)->getMemoryVT() == EVT::i32;
return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
}]>;
// store fragments.
@ -573,23 +573,23 @@ def truncstore : PatFrag<(ops node:$val, node:$ptr),
}]>;
def truncstorei8 : PatFrag<(ops node:$val, node:$ptr),
(truncstore node:$val, node:$ptr), [{
return cast<StoreSDNode>(N)->getMemoryVT() == EVT::i8;
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
}]>;
def truncstorei16 : PatFrag<(ops node:$val, node:$ptr),
(truncstore node:$val, node:$ptr), [{
return cast<StoreSDNode>(N)->getMemoryVT() == EVT::i16;
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
}]>;
def truncstorei32 : PatFrag<(ops node:$val, node:$ptr),
(truncstore node:$val, node:$ptr), [{
return cast<StoreSDNode>(N)->getMemoryVT() == EVT::i32;
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32;
}]>;
def truncstoref32 : PatFrag<(ops node:$val, node:$ptr),
(truncstore node:$val, node:$ptr), [{
return cast<StoreSDNode>(N)->getMemoryVT() == EVT::f32;
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f32;
}]>;
def truncstoref64 : PatFrag<(ops node:$val, node:$ptr),
(truncstore node:$val, node:$ptr), [{
return cast<StoreSDNode>(N)->getMemoryVT() == EVT::f64;
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f64;
}]>;
// indexed store fragments.
@ -615,23 +615,23 @@ def pre_truncst : PatFrag<(ops node:$val, node:$base, node:$offset),
}]>;
def pre_truncsti1 : PatFrag<(ops node:$val, node:$base, node:$offset),
(pre_truncst node:$val, node:$base, node:$offset), [{
return cast<StoreSDNode>(N)->getMemoryVT() == EVT::i1;
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1;
}]>;
def pre_truncsti8 : PatFrag<(ops node:$val, node:$base, node:$offset),
(pre_truncst node:$val, node:$base, node:$offset), [{
return cast<StoreSDNode>(N)->getMemoryVT() == EVT::i8;
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
}]>;
def pre_truncsti16 : PatFrag<(ops node:$val, node:$base, node:$offset),
(pre_truncst node:$val, node:$base, node:$offset), [{
return cast<StoreSDNode>(N)->getMemoryVT() == EVT::i16;
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
}]>;
def pre_truncsti32 : PatFrag<(ops node:$val, node:$base, node:$offset),
(pre_truncst node:$val, node:$base, node:$offset), [{
return cast<StoreSDNode>(N)->getMemoryVT() == EVT::i32;
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32;
}]>;
def pre_truncstf32 : PatFrag<(ops node:$val, node:$base, node:$offset),
(pre_truncst node:$val, node:$base, node:$offset), [{
return cast<StoreSDNode>(N)->getMemoryVT() == EVT::f32;
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f32;
}]>;
def post_store : PatFrag<(ops node:$val, node:$ptr, node:$offset),
@ -647,23 +647,23 @@ def post_truncst : PatFrag<(ops node:$val, node:$base, node:$offset),
}]>;
def post_truncsti1 : PatFrag<(ops node:$val, node:$base, node:$offset),
(post_truncst node:$val, node:$base, node:$offset), [{
return cast<StoreSDNode>(N)->getMemoryVT() == EVT::i1;
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1;
}]>;
def post_truncsti8 : PatFrag<(ops node:$val, node:$base, node:$offset),
(post_truncst node:$val, node:$base, node:$offset), [{
return cast<StoreSDNode>(N)->getMemoryVT() == EVT::i8;
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
}]>;
def post_truncsti16 : PatFrag<(ops node:$val, node:$base, node:$offset),
(post_truncst node:$val, node:$base, node:$offset), [{
return cast<StoreSDNode>(N)->getMemoryVT() == EVT::i16;
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
}]>;
def post_truncsti32 : PatFrag<(ops node:$val, node:$base, node:$offset),
(post_truncst node:$val, node:$base, node:$offset), [{
return cast<StoreSDNode>(N)->getMemoryVT() == EVT::i32;
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32;
}]>;
def post_truncstf32 : PatFrag<(ops node:$val, node:$base, node:$offset),
(post_truncst node:$val, node:$base, node:$offset), [{
return cast<StoreSDNode>(N)->getMemoryVT() == EVT::f32;
return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f32;
}]>;
// setcc convenience fragments.
@ -711,40 +711,40 @@ def setne : PatFrag<(ops node:$lhs, node:$rhs),
def atomic_cmp_swap_8 :
PatFrag<(ops node:$ptr, node:$cmp, node:$swap),
(atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{
return cast<AtomicSDNode>(N)->getMemoryVT() == EVT::i8;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
}]>;
def atomic_cmp_swap_16 :
PatFrag<(ops node:$ptr, node:$cmp, node:$swap),
(atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{
return cast<AtomicSDNode>(N)->getMemoryVT() == EVT::i16;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
}]>;
def atomic_cmp_swap_32 :
PatFrag<(ops node:$ptr, node:$cmp, node:$swap),
(atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{
return cast<AtomicSDNode>(N)->getMemoryVT() == EVT::i32;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
}]>;
def atomic_cmp_swap_64 :
PatFrag<(ops node:$ptr, node:$cmp, node:$swap),
(atomic_cmp_swap node:$ptr, node:$cmp, node:$swap), [{
return cast<AtomicSDNode>(N)->getMemoryVT() == EVT::i64;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
}]>;
multiclass binary_atomic_op<SDNode atomic_op> {
def _8 : PatFrag<(ops node:$ptr, node:$val),
(atomic_op node:$ptr, node:$val), [{
return cast<AtomicSDNode>(N)->getMemoryVT() == EVT::i8;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i8;
}]>;
def _16 : PatFrag<(ops node:$ptr, node:$val),
(atomic_op node:$ptr, node:$val), [{
return cast<AtomicSDNode>(N)->getMemoryVT() == EVT::i16;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i16;
}]>;
def _32 : PatFrag<(ops node:$ptr, node:$val),
(atomic_op node:$ptr, node:$val), [{
return cast<AtomicSDNode>(N)->getMemoryVT() == EVT::i32;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i32;
}]>;
def _64 : PatFrag<(ops node:$ptr, node:$val),
(atomic_op node:$ptr, node:$val), [{
return cast<AtomicSDNode>(N)->getMemoryVT() == EVT::i64;
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
}]>;
}

View File

@ -331,7 +331,7 @@ CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) {
static char isNegatibleForFree(SDValue Op, bool LegalOperations,
unsigned Depth = 0) {
// No compile time optimizations on this type.
if (Op.getValueType() == EVT::ppcf128)
if (Op.getValueType() == MVT::ppcf128)
return 0;
// fneg is removable even if it has multiple uses.
@ -833,12 +833,12 @@ SDValue DAGCombiner::combine(SDNode *N) {
/// otherwise return a null sd operand.
static SDValue getInputChainForNode(SDNode *N) {
if (unsigned NumOps = N->getNumOperands()) {
if (N->getOperand(0).getValueType() == EVT::Other)
if (N->getOperand(0).getValueType() == MVT::Other)
return N->getOperand(0);
else if (N->getOperand(NumOps-1).getValueType() == EVT::Other)
else if (N->getOperand(NumOps-1).getValueType() == MVT::Other)
return N->getOperand(NumOps-1);
for (unsigned i = 1; i < NumOps-1; ++i)
if (N->getOperand(i).getValueType() == EVT::Other)
if (N->getOperand(i).getValueType() == MVT::Other)
return N->getOperand(i);
}
return SDValue();
@ -911,7 +911,7 @@ SDValue DAGCombiner::visitTokenFactor(SDNode *N) {
} else {
// New and improved token factor.
Result = DAG.getNode(ISD::TokenFactor, N->getDebugLoc(),
EVT::Other, &Ops[0], Ops.size());
MVT::Other, &Ops[0], Ops.size());
}
// Don't add users to work list.
@ -1093,7 +1093,7 @@ SDValue DAGCombiner::visitADDC(SDNode *N) {
if (N->hasNUsesOfValue(0, 1))
return CombineTo(N, DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N1, N0),
DAG.getNode(ISD::CARRY_FALSE,
N->getDebugLoc(), EVT::Flag));
N->getDebugLoc(), MVT::Flag));
// canonicalize constant to RHS.
if (N0C && !N1C)
@ -1102,7 +1102,7 @@ SDValue DAGCombiner::visitADDC(SDNode *N) {
// fold (addc x, 0) -> x + no carry out
if (N1C && N1C->isNullValue())
return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE,
N->getDebugLoc(), EVT::Flag));
N->getDebugLoc(), MVT::Flag));
// fold (addc a, b) -> (or a, b), CARRY_FALSE iff a and b share no bits.
APInt LHSZero, LHSOne;
@ -1119,7 +1119,7 @@ SDValue DAGCombiner::visitADDC(SDNode *N) {
(LHSZero & (~RHSZero & Mask)) == (~RHSZero & Mask))
return CombineTo(N, DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N0, N1),
DAG.getNode(ISD::CARRY_FALSE,
N->getDebugLoc(), EVT::Flag));
N->getDebugLoc(), MVT::Flag));
}
return SDValue();
@ -1878,7 +1878,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
LN0->isUnindexed() && N0.hasOneUse() &&
// Do not change the width of a volatile load.
!LN0->isVolatile()) {
EVT ExtVT = EVT::Other;
EVT ExtVT = MVT::Other;
uint32_t ActiveBits = N1C->getAPIntValue().getActiveBits();
if (ActiveBits > 0 && APIntOps::isMask(ActiveBits, N1C->getAPIntValue()))
ExtVT = EVT::getIntegerVT(ActiveBits);
@ -1887,7 +1887,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
// Do not generate loads of non-round integer types since these can
// be expensive (and would be wrong if the type is not byte sized).
if (ExtVT != EVT::Other && LoadedVT.bitsGT(ExtVT) && ExtVT.isRound() &&
if (ExtVT != MVT::Other && LoadedVT.bitsGT(ExtVT) && ExtVT.isRound() &&
(!LegalOperations || TLI.isLoadExtLegal(ISD::ZEXTLOAD, ExtVT))) {
EVT PtrType = N0.getOperand(1).getValueType();
@ -2289,7 +2289,7 @@ SDValue DAGCombiner::visitXOR(SDNode *N) {
}
// fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are setcc
if (N1C && N1C->getAPIntValue() == 1 && VT == EVT::i1 &&
if (N1C && N1C->getAPIntValue() == 1 && VT == MVT::i1 &&
(N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) {
SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1);
if (isOneUseSetCC(RHS) || isOneUseSetCC(LHS)) {
@ -2793,11 +2793,11 @@ SDValue DAGCombiner::visitSELECT(SDNode *N) {
if (N0C && N0C->isNullValue())
return N2;
// fold (select C, 1, X) -> (or C, X)
if (VT == EVT::i1 && N1C && N1C->getAPIntValue() == 1)
if (VT == MVT::i1 && N1C && N1C->getAPIntValue() == 1)
return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N0, N2);
// fold (select C, 0, 1) -> (xor C, 1)
if (VT.isInteger() &&
(VT0 == EVT::i1 ||
(VT0 == MVT::i1 ||
(VT0.isInteger() &&
TLI.getBooleanContents() == TargetLowering::ZeroOrOneBooleanContent)) &&
N1C && N2C && N1C->isNullValue() && N2C->getAPIntValue() == 1) {
@ -2813,27 +2813,27 @@ SDValue DAGCombiner::visitSELECT(SDNode *N) {
return DAG.getNode(ISD::TRUNCATE, N->getDebugLoc(), VT, XORNode);
}
// fold (select C, 0, X) -> (and (not C), X)
if (VT == VT0 && VT == EVT::i1 && N1C && N1C->isNullValue()) {
if (VT == VT0 && VT == MVT::i1 && N1C && N1C->isNullValue()) {
SDValue NOTNode = DAG.getNOT(N0.getDebugLoc(), N0, VT);
AddToWorkList(NOTNode.getNode());
return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, NOTNode, N2);
}
// fold (select C, X, 1) -> (or (not C), X)
if (VT == VT0 && VT == EVT::i1 && N2C && N2C->getAPIntValue() == 1) {
if (VT == VT0 && VT == MVT::i1 && N2C && N2C->getAPIntValue() == 1) {
SDValue NOTNode = DAG.getNOT(N0.getDebugLoc(), N0, VT);
AddToWorkList(NOTNode.getNode());
return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, NOTNode, N1);
}
// fold (select C, X, 0) -> (and C, X)
if (VT == EVT::i1 && N2C && N2C->isNullValue())
if (VT == MVT::i1 && N2C && N2C->isNullValue())
return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0, N1);
// fold (select X, X, Y) -> (or X, Y)
// fold (select X, 1, Y) -> (or X, Y)
if (VT == EVT::i1 && (N0 == N1 || (N1C && N1C->getAPIntValue() == 1)))
if (VT == MVT::i1 && (N0 == N1 || (N1C && N1C->getAPIntValue() == 1)))
return DAG.getNode(ISD::OR, N->getDebugLoc(), VT, N0, N2);
// fold (select X, Y, X) -> (and X, Y)
// fold (select X, Y, 0) -> (and X, Y)
if (VT == EVT::i1 && (N0 == N2 || (N2C && N2C->getAPIntValue() == 0)))
if (VT == MVT::i1 && (N0 == N2 || (N2C && N2C->getAPIntValue() == 0)))
return DAG.getNode(ISD::AND, N->getDebugLoc(), VT, N0, N1);
// If we can fold this based on the true/false value, do so.
@ -2843,10 +2843,10 @@ SDValue DAGCombiner::visitSELECT(SDNode *N) {
// fold selects based on a setcc into other things, such as min/max/abs
if (N0.getOpcode() == ISD::SETCC) {
// FIXME:
// Check against EVT::Other for SELECT_CC, which is a workaround for targets
// Check against MVT::Other for SELECT_CC, which is a workaround for targets
// having to say they don't support SELECT_CC on every type the DAG knows
// about, since there is no way to mark an opcode illegal at all value types
if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, EVT::Other) &&
if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, MVT::Other) &&
TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT))
return DAG.getNode(ISD::SELECT_CC, N->getDebugLoc(), VT,
N0.getOperand(0), N0.getOperand(1),
@ -3887,7 +3887,7 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
if (SrcEltVT.isFloatingPoint()) {
// Convert the input float vector to a int vector where the elements are the
// same sizes.
assert((SrcEltVT == EVT::f32 || SrcEltVT == EVT::f64) && "Unknown FP VT!");
assert((SrcEltVT == MVT::f32 || SrcEltVT == MVT::f64) && "Unknown FP VT!");
EVT IntVT = EVT::getIntegerVT(SrcEltVT.getSizeInBits());
BV = ConstantFoldBIT_CONVERTofBUILD_VECTOR(BV, IntVT).getNode();
SrcEltVT = IntVT;
@ -3896,7 +3896,7 @@ ConstantFoldBIT_CONVERTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
// Now we know the input is an integer vector. If the output is a FP type,
// convert to integer first, then to FP of the right size.
if (DstEltVT.isFloatingPoint()) {
assert((DstEltVT == EVT::f32 || DstEltVT == EVT::f64) && "Unknown FP VT!");
assert((DstEltVT == MVT::f32 || DstEltVT == MVT::f64) && "Unknown FP VT!");
EVT TmpVT = EVT::getIntegerVT(DstEltVT.getSizeInBits());
SDNode *Tmp = ConstantFoldBIT_CONVERTofBUILD_VECTOR(BV, TmpVT).getNode();
@ -3988,7 +3988,7 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
}
// fold (fadd c1, c2) -> (fadd c1, c2)
if (N0CFP && N1CFP && VT != EVT::ppcf128)
if (N0CFP && N1CFP && VT != MVT::ppcf128)
return DAG.getNode(ISD::FADD, N->getDebugLoc(), VT, N0, N1);
// canonicalize constant to RHS
if (N0CFP && !N1CFP)
@ -4029,7 +4029,7 @@ SDValue DAGCombiner::visitFSUB(SDNode *N) {
}
// fold (fsub c1, c2) -> c1-c2
if (N0CFP && N1CFP && VT != EVT::ppcf128)
if (N0CFP && N1CFP && VT != MVT::ppcf128)
return DAG.getNode(ISD::FSUB, N->getDebugLoc(), VT, N0, N1);
// fold (fsub A, 0) -> A
if (UnsafeFPMath && N1CFP && N1CFP->getValueAPF().isZero())
@ -4063,7 +4063,7 @@ SDValue DAGCombiner::visitFMUL(SDNode *N) {
}
// fold (fmul c1, c2) -> c1*c2
if (N0CFP && N1CFP && VT != EVT::ppcf128)
if (N0CFP && N1CFP && VT != MVT::ppcf128)
return DAG.getNode(ISD::FMUL, N->getDebugLoc(), VT, N0, N1);
// canonicalize constant to RHS
if (N0CFP && !N1CFP)
@ -4118,7 +4118,7 @@ SDValue DAGCombiner::visitFDIV(SDNode *N) {
}
// fold (fdiv c1, c2) -> c1/c2
if (N0CFP && N1CFP && VT != EVT::ppcf128)
if (N0CFP && N1CFP && VT != MVT::ppcf128)
return DAG.getNode(ISD::FDIV, N->getDebugLoc(), VT, N0, N1);
@ -4145,7 +4145,7 @@ SDValue DAGCombiner::visitFREM(SDNode *N) {
EVT VT = N->getValueType(0);
// fold (frem c1, c2) -> fmod(c1,c2)
if (N0CFP && N1CFP && VT != EVT::ppcf128)
if (N0CFP && N1CFP && VT != MVT::ppcf128)
return DAG.getNode(ISD::FREM, N->getDebugLoc(), VT, N0, N1);
return SDValue();
@ -4158,7 +4158,7 @@ SDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) {
ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1);
EVT VT = N->getValueType(0);
if (N0CFP && N1CFP && VT != EVT::ppcf128) // Constant fold
if (N0CFP && N1CFP && VT != MVT::ppcf128) // Constant fold
return DAG.getNode(ISD::FCOPYSIGN, N->getDebugLoc(), VT, N0, N1);
if (N1CFP) {
@ -4208,7 +4208,7 @@ SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) {
EVT OpVT = N0.getValueType();
// fold (sint_to_fp c1) -> c1fp
if (N0C && OpVT != EVT::ppcf128)
if (N0C && OpVT != MVT::ppcf128)
return DAG.getNode(ISD::SINT_TO_FP, N->getDebugLoc(), VT, N0);
// If the input is a legal type, and SINT_TO_FP is not legal on this target,
@ -4230,7 +4230,7 @@ SDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) {
EVT OpVT = N0.getValueType();
// fold (uint_to_fp c1) -> c1fp
if (N0C && OpVT != EVT::ppcf128)
if (N0C && OpVT != MVT::ppcf128)
return DAG.getNode(ISD::UINT_TO_FP, N->getDebugLoc(), VT, N0);
// If the input is a legal type, and UINT_TO_FP is not legal on this target,
@ -4263,7 +4263,7 @@ SDValue DAGCombiner::visitFP_TO_UINT(SDNode *N) {
EVT VT = N->getValueType(0);
// fold (fp_to_uint c1fp) -> c1
if (N0CFP && VT != EVT::ppcf128)
if (N0CFP && VT != MVT::ppcf128)
return DAG.getNode(ISD::FP_TO_UINT, N->getDebugLoc(), VT, N0);
return SDValue();
@ -4276,7 +4276,7 @@ SDValue DAGCombiner::visitFP_ROUND(SDNode *N) {
EVT VT = N->getValueType(0);
// fold (fp_round c1fp) -> c1fp
if (N0CFP && N0.getValueType() != EVT::ppcf128)
if (N0CFP && N0.getValueType() != MVT::ppcf128)
return DAG.getNode(ISD::FP_ROUND, N->getDebugLoc(), VT, N0, N1);
// fold (fp_round (fp_extend x)) -> x
@ -4330,7 +4330,7 @@ SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) {
return SDValue();
// fold (fp_extend c1fp) -> c1fp
if (N0CFP && VT != EVT::ppcf128)
if (N0CFP && VT != MVT::ppcf128)
return DAG.getNode(ISD::FP_EXTEND, N->getDebugLoc(), VT, N0);
// Turn fp_extend(fp_round(X, 1)) -> x since the fp_round doesn't affect the
@ -4398,7 +4398,7 @@ SDValue DAGCombiner::visitFABS(SDNode *N) {
EVT VT = N->getValueType(0);
// fold (fabs c1) -> fabs(c1)
if (N0CFP && VT != EVT::ppcf128)
if (N0CFP && VT != MVT::ppcf128)
return DAG.getNode(ISD::FABS, N->getDebugLoc(), VT, N0);
// fold (fabs (fabs x)) -> (fabs x)
if (N0.getOpcode() == ISD::FABS)
@ -4438,12 +4438,12 @@ SDValue DAGCombiner::visitBRCOND(SDNode *N) {
return Chain;
// unconditional branch
if (N1C && N1C->getAPIntValue() == 1)
return DAG.getNode(ISD::BR, N->getDebugLoc(), EVT::Other, Chain, N2);
return DAG.getNode(ISD::BR, N->getDebugLoc(), MVT::Other, Chain, N2);
// fold a brcond with a setcc condition into a BR_CC node if BR_CC is legal
// on the target.
if (N1.getOpcode() == ISD::SETCC &&
TLI.isOperationLegalOrCustom(ISD::BR_CC, EVT::Other)) {
return DAG.getNode(ISD::BR_CC, N->getDebugLoc(), EVT::Other,
TLI.isOperationLegalOrCustom(ISD::BR_CC, MVT::Other)) {
return DAG.getNode(ISD::BR_CC, N->getDebugLoc(), MVT::Other,
Chain, N1.getOperand(2),
N1.getOperand(0), N1.getOperand(1), N2);
}
@ -4491,7 +4491,7 @@ SDValue DAGCombiner::visitBRCOND(SDNode *N) {
removeFromWorkList(N1.getNode());
DAG.DeleteNode(N1.getNode());
return DAG.getNode(ISD::BRCOND, N->getDebugLoc(),
EVT::Other, Chain, SetCC, N2);
MVT::Other, Chain, SetCC, N2);
}
}
}
@ -4516,7 +4516,7 @@ SDValue DAGCombiner::visitBR_CC(SDNode *N) {
// fold br_cc true, dest -> br dest (unconditional branch)
if (SCCC && !SCCC->isNullValue())
return DAG.getNode(ISD::BR, N->getDebugLoc(), EVT::Other,
return DAG.getNode(ISD::BR, N->getDebugLoc(), MVT::Other,
N->getOperand(0), N->getOperand(4));
// fold br_cc false, dest -> unconditional fall through
if (SCCC && SCCC->isNullValue())
@ -4524,7 +4524,7 @@ SDValue DAGCombiner::visitBR_CC(SDNode *N) {
// fold to a simpler setcc
if (Simp.getNode() && Simp.getOpcode() == ISD::SETCC)
return DAG.getNode(ISD::BR_CC, N->getDebugLoc(), EVT::Other,
return DAG.getNode(ISD::BR_CC, N->getDebugLoc(), MVT::Other,
N->getOperand(0), Simp.getOperand(2),
Simp.getOperand(0), Simp.getOperand(1),
N->getOperand(4));
@ -4859,7 +4859,7 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
// the updated indexed value in case of indexed loads), change uses of the
// chain value into uses of the chain input (i.e. delete the dead load).
if (!LD->isVolatile()) {
if (N->getValueType(1) == EVT::Other) {
if (N->getValueType(1) == MVT::Other) {
// Unindexed loads.
if (N->hasNUsesOfValue(0, 0)) {
// It's not safe to use the two value CombineTo variant here. e.g.
@ -4883,7 +4883,7 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
}
} else {
// Indexed loads.
assert(N->getValueType(2) == EVT::Other && "Malformed indexed loads?");
assert(N->getValueType(2) == MVT::Other && "Malformed indexed loads?");
if (N->hasNUsesOfValue(0, 0) && N->hasNUsesOfValue(0, 1)) {
SDValue Undef = DAG.getUNDEF(N->getValueType(0));
DOUT << "\nReplacing.6 "; DEBUG(N->dump(&DAG));
@ -4942,7 +4942,7 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
// Create token factor to keep old chain connected.
SDValue Token = DAG.getNode(ISD::TokenFactor, N->getDebugLoc(),
EVT::Other, Chain, ReplLoad.getValue(1));
MVT::Other, Chain, ReplLoad.getValue(1));
// Replace uses with load result and token factor. Don't add users
// to work list.
@ -5096,42 +5096,42 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
// transform should not be done in this case.
if (Value.getOpcode() != ISD::TargetConstantFP) {
SDValue Tmp;
switch (CFP->getValueType(0).getSimpleVT()) {
switch (CFP->getValueType(0).getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unknown FP type");
case EVT::f80: // We don't do this for these yet.
case EVT::f128:
case EVT::ppcf128:
case MVT::f80: // We don't do this for these yet.
case MVT::f128:
case MVT::ppcf128:
break;
case EVT::f32:
if (((TLI.isTypeLegal(EVT::i32) || !LegalTypes) && !LegalOperations &&
case MVT::f32:
if (((TLI.isTypeLegal(MVT::i32) || !LegalTypes) && !LegalOperations &&
!ST->isVolatile()) ||
TLI.isOperationLegalOrCustom(ISD::STORE, EVT::i32)) {
TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) {
Tmp = DAG.getConstant((uint32_t)CFP->getValueAPF().
bitcastToAPInt().getZExtValue(), EVT::i32);
bitcastToAPInt().getZExtValue(), MVT::i32);
return DAG.getStore(Chain, N->getDebugLoc(), Tmp,
Ptr, ST->getSrcValue(),
ST->getSrcValueOffset(), ST->isVolatile(),
ST->getAlignment());
}
break;
case EVT::f64:
if (((TLI.isTypeLegal(EVT::i64) || !LegalTypes) && !LegalOperations &&
case MVT::f64:
if (((TLI.isTypeLegal(MVT::i64) || !LegalTypes) && !LegalOperations &&
!ST->isVolatile()) ||
TLI.isOperationLegalOrCustom(ISD::STORE, EVT::i64)) {
TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i64)) {
Tmp = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt().
getZExtValue(), EVT::i64);
getZExtValue(), MVT::i64);
return DAG.getStore(Chain, N->getDebugLoc(), Tmp,
Ptr, ST->getSrcValue(),
ST->getSrcValueOffset(), ST->isVolatile(),
ST->getAlignment());
} else if (!ST->isVolatile() &&
TLI.isOperationLegalOrCustom(ISD::STORE, EVT::i32)) {
TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) {
// Many FP stores are not made apparent until after legalize, e.g. for
// argument passing. Since this is so common, custom legalize the
// 64-bit integer store into two 32-bit stores.
uint64_t Val = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
SDValue Lo = DAG.getConstant(Val & 0xFFFFFFFF, EVT::i32);
SDValue Hi = DAG.getConstant(Val >> 32, EVT::i32);
SDValue Lo = DAG.getConstant(Val & 0xFFFFFFFF, MVT::i32);
SDValue Hi = DAG.getConstant(Val >> 32, MVT::i32);
if (TLI.isBigEndian()) std::swap(Lo, Hi);
int SVOffset = ST->getSrcValueOffset();
@ -5149,7 +5149,7 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
SDValue St1 = DAG.getStore(Chain, ST->getDebugLoc(), Hi,
Ptr, ST->getSrcValue(),
SVOffset, isVolatile, Alignment);
return DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), EVT::Other,
return DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), MVT::Other,
St0, St1);
}
@ -5179,7 +5179,7 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
// Create token to keep both nodes around.
SDValue Token = DAG.getNode(ISD::TokenFactor, N->getDebugLoc(),
EVT::Other, Chain, ReplStore);
MVT::Other, Chain, ReplStore);
// Don't add users to work list.
return CombineTo(N, Token, false);
@ -5932,7 +5932,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
Temp = DAG.getNode(ISD::ZERO_EXTEND, N2.getDebugLoc(),
N2.getValueType(), SCC);
} else {
SCC = DAG.getSetCC(N0.getDebugLoc(), EVT::i1, N0, N1, CC);
SCC = DAG.getSetCC(N0.getDebugLoc(), MVT::i1, N0, N1, CC);
Temp = DAG.getNode(ISD::ZERO_EXTEND, N2.getDebugLoc(),
N2.getValueType(), SCC);
}
@ -6239,7 +6239,7 @@ SDValue DAGCombiner::FindBetterChain(SDNode *N, SDValue OldChain) {
}
// Construct a custom tailored token factor.
SDValue NewChain = DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), EVT::Other,
SDValue NewChain = DAG.getNode(ISD::TokenFactor, N->getDebugLoc(), MVT::Other,
&Aliases[0], Aliases.size());
// Make sure the old chain gets cleaned up.

View File

@ -65,10 +65,10 @@ unsigned FastISel::getRegForValue(Value *V) {
// Ignore illegal types. We must do this before looking up the value
// in ValueMap because Arguments are given virtual registers regardless
// of whether FastISel can handle them.
EVT::SimpleValueType VT = RealVT.getSimpleVT();
MVT VT = RealVT.getSimpleVT();
if (!TLI.isTypeLegal(VT)) {
// Promote EVT::i1 to a legal type though, because it's common and easy.
if (VT == EVT::i1)
// Promote MVT::i1 to a legal type though, because it's common and easy.
if (VT == MVT::i1)
VT = TLI.getTypeToTransformTo(VT).getSimpleVT();
else
return 0;
@ -190,7 +190,7 @@ unsigned FastISel::getRegForGEPIndex(Value *Idx) {
///
bool FastISel::SelectBinaryOp(User *I, ISD::NodeType ISDOpcode) {
EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
if (VT == EVT::Other || !VT.isSimple())
if (VT == MVT::Other || !VT.isSimple())
// Unhandled type. Halt "fast" selection and bail.
return false;
@ -199,9 +199,9 @@ bool FastISel::SelectBinaryOp(User *I, ISD::NodeType ISDOpcode) {
// under the assumption that i64 won't be used if the target doesn't
// support it.
if (!TLI.isTypeLegal(VT)) {
// EVT::i1 is special. Allow AND, OR, or XOR because they
// MVT::i1 is special. Allow AND, OR, or XOR because they
// don't require additional zeroing, which makes them easy.
if (VT == EVT::i1 &&
if (VT == MVT::i1 &&
(ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
ISDOpcode == ISD::XOR))
VT = TLI.getTypeToTransformTo(VT);
@ -261,7 +261,7 @@ bool FastISel::SelectGetElementPtr(User *I) {
return false;
const Type *Ty = I->getOperand(0)->getType();
EVT::SimpleValueType VT = TLI.getPointerTy();
MVT VT = TLI.getPointerTy();
for (GetElementPtrInst::op_iterator OI = I->op_begin()+1, E = I->op_end();
OI != E; ++OI) {
Value *Idx = *OI;
@ -457,7 +457,7 @@ bool FastISel::SelectCall(User *I) {
default: break;
case TargetLowering::Expand: {
EVT VT = (IID == Intrinsic::eh_selector_i32 ?
EVT::i32 : EVT::i64);
MVT::i32 : MVT::i64);
if (MMI) {
if (MBB->isLandingPad())
@ -497,8 +497,8 @@ bool FastISel::SelectCast(User *I, ISD::NodeType Opcode) {
EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
EVT DstVT = TLI.getValueType(I->getType());
if (SrcVT == EVT::Other || !SrcVT.isSimple() ||
DstVT == EVT::Other || !DstVT.isSimple())
if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
DstVT == MVT::Other || !DstVT.isSimple())
// Unhandled type. Halt "fast" selection and bail.
return false;
@ -506,7 +506,7 @@ bool FastISel::SelectCast(User *I, ISD::NodeType Opcode) {
// it may be i1 if we're doing a truncate because that's
// easy and somewhat common.
if (!TLI.isTypeLegal(DstVT))
if (DstVT != EVT::i1 || Opcode != ISD::TRUNCATE)
if (DstVT != MVT::i1 || Opcode != ISD::TRUNCATE)
// Unhandled type. Halt "fast" selection and bail.
return false;
@ -514,7 +514,7 @@ bool FastISel::SelectCast(User *I, ISD::NodeType Opcode) {
// it may be i1 if we're doing zero-extension because that's
// easy and somewhat common.
if (!TLI.isTypeLegal(SrcVT))
if (SrcVT != EVT::i1 || Opcode != ISD::ZERO_EXTEND)
if (SrcVT != MVT::i1 || Opcode != ISD::ZERO_EXTEND)
// Unhandled type. Halt "fast" selection and bail.
return false;
@ -524,14 +524,14 @@ bool FastISel::SelectCast(User *I, ISD::NodeType Opcode) {
return false;
// If the operand is i1, arrange for the high bits in the register to be zero.
if (SrcVT == EVT::i1) {
if (SrcVT == MVT::i1) {
SrcVT = TLI.getTypeToTransformTo(SrcVT);
InputReg = FastEmitZExtFromI1(SrcVT.getSimpleVT(), InputReg);
if (!InputReg)
return false;
}
// If the result is i1, truncate to the target's type for i1 first.
if (DstVT == EVT::i1)
if (DstVT == MVT::i1)
DstVT = TLI.getTypeToTransformTo(DstVT);
unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
@ -559,8 +559,8 @@ bool FastISel::SelectBitCast(User *I) {
EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
EVT DstVT = TLI.getValueType(I->getType());
if (SrcVT == EVT::Other || !SrcVT.isSimple() ||
DstVT == EVT::Other || !DstVT.isSimple() ||
if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
DstVT == MVT::Other || !DstVT.isSimple() ||
!TLI.isTypeLegal(SrcVT) || !TLI.isTypeLegal(DstVT))
// Unhandled type. Halt "fast" selection and bail.
return false;
@ -759,45 +759,44 @@ FastISel::FastISel(MachineFunction &mf,
FastISel::~FastISel() {}
unsigned FastISel::FastEmit_(EVT::SimpleValueType, EVT::SimpleValueType,
unsigned FastISel::FastEmit_(MVT, MVT,
ISD::NodeType) {
return 0;
}
unsigned FastISel::FastEmit_r(EVT::SimpleValueType, EVT::SimpleValueType,
unsigned FastISel::FastEmit_r(MVT, MVT,
ISD::NodeType, unsigned /*Op0*/) {
return 0;
}
unsigned FastISel::FastEmit_rr(EVT::SimpleValueType, EVT::SimpleValueType,
unsigned FastISel::FastEmit_rr(MVT, MVT,
ISD::NodeType, unsigned /*Op0*/,
unsigned /*Op0*/) {
return 0;
}
unsigned FastISel::FastEmit_i(EVT::SimpleValueType, EVT::SimpleValueType,
ISD::NodeType, uint64_t /*Imm*/) {
unsigned FastISel::FastEmit_i(MVT, MVT, ISD::NodeType, uint64_t /*Imm*/) {
return 0;
}
unsigned FastISel::FastEmit_f(EVT::SimpleValueType, EVT::SimpleValueType,
unsigned FastISel::FastEmit_f(MVT, MVT,
ISD::NodeType, ConstantFP * /*FPImm*/) {
return 0;
}
unsigned FastISel::FastEmit_ri(EVT::SimpleValueType, EVT::SimpleValueType,
unsigned FastISel::FastEmit_ri(MVT, MVT,
ISD::NodeType, unsigned /*Op0*/,
uint64_t /*Imm*/) {
return 0;
}
unsigned FastISel::FastEmit_rf(EVT::SimpleValueType, EVT::SimpleValueType,
unsigned FastISel::FastEmit_rf(MVT, MVT,
ISD::NodeType, unsigned /*Op0*/,
ConstantFP * /*FPImm*/) {
return 0;
}
unsigned FastISel::FastEmit_rri(EVT::SimpleValueType, EVT::SimpleValueType,
unsigned FastISel::FastEmit_rri(MVT, MVT,
ISD::NodeType,
unsigned /*Op0*/, unsigned /*Op1*/,
uint64_t /*Imm*/) {
@ -808,9 +807,9 @@ unsigned FastISel::FastEmit_rri(EVT::SimpleValueType, EVT::SimpleValueType,
/// to emit an instruction with an immediate operand using FastEmit_ri.
/// If that fails, it materializes the immediate into a register and try
/// FastEmit_rr instead.
unsigned FastISel::FastEmit_ri_(EVT::SimpleValueType VT, ISD::NodeType Opcode,
unsigned FastISel::FastEmit_ri_(MVT VT, ISD::NodeType Opcode,
unsigned Op0, uint64_t Imm,
EVT::SimpleValueType ImmType) {
MVT ImmType) {
// First check if immediate type is legal. If not, we can't use the ri form.
unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Imm);
if (ResultReg != 0)
@ -825,9 +824,9 @@ unsigned FastISel::FastEmit_ri_(EVT::SimpleValueType VT, ISD::NodeType Opcode,
/// to emit an instruction with a floating-point immediate operand using
/// FastEmit_rf. If that fails, it materializes the immediate into a register
/// and try FastEmit_rr instead.
unsigned FastISel::FastEmit_rf_(EVT::SimpleValueType VT, ISD::NodeType Opcode,
unsigned FastISel::FastEmit_rf_(MVT VT, ISD::NodeType Opcode,
unsigned Op0, ConstantFP *FPImm,
EVT::SimpleValueType ImmType) {
MVT ImmType) {
// First check if immediate type is legal. If not, we can't use the rf form.
unsigned ResultReg = FastEmit_rf(VT, VT, Opcode, Op0, FPImm);
if (ResultReg != 0)
@ -988,7 +987,7 @@ unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
return ResultReg;
}
unsigned FastISel::FastEmitInst_extractsubreg(EVT::SimpleValueType RetVT,
unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
unsigned Op0, uint32_t Idx) {
const TargetRegisterClass* RC = MRI.getRegClass(Op0);
@ -1009,6 +1008,6 @@ unsigned FastISel::FastEmitInst_extractsubreg(EVT::SimpleValueType RetVT,
/// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
/// with all but the least significant bit set to zero.
unsigned FastISel::FastEmitZExtFromI1(EVT::SimpleValueType VT, unsigned Op) {
unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op) {
return FastEmit_ri(VT, VT, ISD::AND, Op, 1);
}

View File

@ -213,7 +213,7 @@ SelectionDAGLegalize::SelectionDAGLegalize(SelectionDAG &dag,
CodeGenOpt::Level ol)
: TLI(dag.getTargetLoweringInfo()), DAG(dag), OptLevel(ol),
ValueTypeActions(TLI.getValueTypeActions()) {
assert(EVT::LAST_VALUETYPE <= EVT::MAX_ALLOWED_VALUETYPE &&
assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE &&
"Too many value types for ValueTypeActions to hold!");
}
@ -254,19 +254,19 @@ static SDNode *FindCallEndFromCallStart(SDNode *Node) {
// The chain is usually at the end.
SDValue TheChain(Node, Node->getNumValues()-1);
if (TheChain.getValueType() != EVT::Other) {
if (TheChain.getValueType() != MVT::Other) {
// Sometimes it's at the beginning.
TheChain = SDValue(Node, 0);
if (TheChain.getValueType() != EVT::Other) {
if (TheChain.getValueType() != MVT::Other) {
// Otherwise, hunt for it.
for (unsigned i = 1, e = Node->getNumValues(); i != e; ++i)
if (Node->getValueType(i) == EVT::Other) {
if (Node->getValueType(i) == MVT::Other) {
TheChain = SDValue(Node, i);
break;
}
// Otherwise, we walked into a node without a chain.
if (TheChain.getValueType() != EVT::Other)
if (TheChain.getValueType() != MVT::Other)
return 0;
}
}
@ -290,7 +290,7 @@ static SDNode *FindCallStartFromCallEnd(SDNode *Node) {
assert(Node && "Didn't find callseq_start for a call??");
if (Node->getOpcode() == ISD::CALLSEQ_START) return Node;
assert(Node->getOperand(0).getValueType() == EVT::Other &&
assert(Node->getOperand(0).getValueType() == MVT::Other &&
"Node doesn't have a token chain argument!");
return FindCallStartFromCallEnd(Node->getOperand(0).getNode());
}
@ -347,15 +347,15 @@ static SDValue ExpandConstantFP(ConstantFPSDNode *CFP, bool UseCP,
EVT VT = CFP->getValueType(0);
ConstantFP *LLVMC = const_cast<ConstantFP*>(CFP->getConstantFPValue());
if (!UseCP) {
assert((VT == EVT::f64 || VT == EVT::f32) && "Invalid type expansion");
assert((VT == MVT::f64 || VT == MVT::f32) && "Invalid type expansion");
return DAG.getConstant(LLVMC->getValueAPF().bitcastToAPInt(),
(VT == EVT::f64) ? EVT::i64 : EVT::i32);
(VT == MVT::f64) ? MVT::i64 : MVT::i32);
}
EVT OrigVT = VT;
EVT SVT = VT;
while (SVT != EVT::f32) {
SVT = (EVT::SimpleValueType)(SVT.getSimpleVT() - 1);
while (SVT != MVT::f32) {
SVT = (MVT::SimpleValueType)(SVT.getSimpleVT().SimpleTy - 1);
if (CFP->isValueValidForType(SVT, CFP->getValueAPF()) &&
// Only do this if the target has a native EXTLOAD instruction from
// smaller type.
@ -450,7 +450,7 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
MemVT, ST->isVolatile(),
MinAlign(ST->getAlignment(), Offset)));
// The order of the stores doesn't matter - say it with a TokenFactor.
return DAG.getNode(ISD::TokenFactor, dl, EVT::Other, &Stores[0],
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Stores[0],
Stores.size());
}
}
@ -459,7 +459,7 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
"Unaligned store of unknown type.");
// Get the half-size VT
EVT NewStoredVT =
(EVT::SimpleValueType)(ST->getMemoryVT().getSimpleVT() - 1);
(MVT::SimpleValueType)(ST->getMemoryVT().getSimpleVT().SimpleTy - 1);
int NumBits = NewStoredVT.getSizeInBits();
int IncrementSize = NumBits / 8;
@ -480,7 +480,7 @@ SDValue ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
ST->getSrcValue(), SVOffset + IncrementSize,
NewStoredVT, ST->isVolatile(), Alignment);
return DAG.getNode(ISD::TokenFactor, dl, EVT::Other, Store1, Store2);
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2);
}
/// ExpandUnalignedLoad - Expands an unaligned load to 2 half-size loads.
@ -552,7 +552,7 @@ SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
NULL, 0, MemVT));
// The order of the stores doesn't matter - say it with a TokenFactor.
SDValue TF = DAG.getNode(ISD::TokenFactor, dl, EVT::Other, &Stores[0],
SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &Stores[0],
Stores.size());
// Finally, perform the original load only redirected to the stack slot.
@ -607,7 +607,7 @@ SDValue ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount);
Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo);
SDValue TF = DAG.getNode(ISD::TokenFactor, dl, EVT::Other, Lo.getValue(1),
SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
Hi.getValue(1));
SDValue Ops[] = { Result, TF };
@ -702,27 +702,27 @@ SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) {
bool isVolatile = ST->isVolatile();
DebugLoc dl = ST->getDebugLoc();
if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(ST->getValue())) {
if (CFP->getValueType(0) == EVT::f32 &&
getTypeAction(EVT::i32) == Legal) {
if (CFP->getValueType(0) == MVT::f32 &&
getTypeAction(MVT::i32) == Legal) {
Tmp3 = DAG.getConstant(CFP->getValueAPF().
bitcastToAPInt().zextOrTrunc(32),
EVT::i32);
MVT::i32);
return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getSrcValue(),
SVOffset, isVolatile, Alignment);
} else if (CFP->getValueType(0) == EVT::f64) {
} else if (CFP->getValueType(0) == MVT::f64) {
// If this target supports 64-bit registers, do a single 64-bit store.
if (getTypeAction(EVT::i64) == Legal) {
if (getTypeAction(MVT::i64) == Legal) {
Tmp3 = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt().
zextOrTrunc(64), EVT::i64);
zextOrTrunc(64), MVT::i64);
return DAG.getStore(Tmp1, dl, Tmp3, Tmp2, ST->getSrcValue(),
SVOffset, isVolatile, Alignment);
} else if (getTypeAction(EVT::i32) == Legal && !ST->isVolatile()) {
} else if (getTypeAction(MVT::i32) == Legal && !ST->isVolatile()) {
// Otherwise, if the target supports 32-bit registers, use 2 32-bit
// stores. If the target supports neither 32- nor 64-bits, this
// xform is certainly not worth it.
const APInt &IntVal =CFP->getValueAPF().bitcastToAPInt();
SDValue Lo = DAG.getConstant(APInt(IntVal).trunc(32), EVT::i32);
SDValue Hi = DAG.getConstant(IntVal.lshr(32).trunc(32), EVT::i32);
SDValue Lo = DAG.getConstant(APInt(IntVal).trunc(32), MVT::i32);
SDValue Hi = DAG.getConstant(IntVal.lshr(32).trunc(32), MVT::i32);
if (TLI.isBigEndian()) std::swap(Lo, Hi);
Lo = DAG.getStore(Tmp1, dl, Lo, Tmp2, ST->getSrcValue(),
@ -732,7 +732,7 @@ SDValue SelectionDAGLegalize::OptimizeFloatStore(StoreSDNode* ST) {
Hi = DAG.getStore(Tmp1, dl, Hi, Tmp2, ST->getSrcValue(), SVOffset+4,
isVolatile, MinAlign(Alignment, 4U));
return DAG.getNode(ISD::TokenFactor, dl, EVT::Other, Lo, Hi);
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi);
}
}
}
@ -777,7 +777,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
case ISD::INTRINSIC_VOID:
case ISD::VAARG:
case ISD::STACKSAVE:
Action = TLI.getOperationAction(Node->getOpcode(), EVT::Other);
Action = TLI.getOperationAction(Node->getOpcode(), MVT::Other);
break;
case ISD::SINT_TO_FP:
case ISD::UINT_TO_FP:
@ -882,7 +882,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
case ISD::BR_CC:
case ISD::BRCOND:
// Branches tweak the chain to include LastCALLSEQ_END
Ops[0] = DAG.getNode(ISD::TokenFactor, dl, EVT::Other, Ops[0],
Ops[0] = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ops[0],
LastCALLSEQ_END);
Ops[0] = LegalizeOp(Ops[0]);
LastCALLSEQ_END = DAG.getEntryNode();
@ -979,7 +979,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// Merge in the last call, to ensure that this call start after the last
// call ended.
if (LastCALLSEQ_END.getOpcode() != ISD::EntryToken) {
Tmp1 = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
Tmp1 = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
Tmp1, LastCALLSEQ_END);
Tmp1 = LegalizeOp(Tmp1);
}
@ -1026,7 +1026,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
Tmp1 = LegalizeOp(Node->getOperand(0)); // Legalize the chain.
// Do not try to legalize the target-specific arguments (#1+), except for
// an optional flag input.
if (Node->getOperand(Node->getNumOperands()-1).getValueType() != EVT::Flag){
if (Node->getOperand(Node->getNumOperands()-1).getValueType() != MVT::Flag){
if (Tmp1 != Node->getOperand(0)) {
SmallVector<SDValue, 8> Ops(Node->op_begin(), Node->op_end());
Ops[0] = Tmp1;
@ -1122,8 +1122,8 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// tells the optimizers that those bits are undefined. It would be
// nice to have an effective generic way of getting these benefits...
// Until such a way is found, don't insist on promoting i1 here.
(SrcVT != EVT::i1 ||
TLI.getLoadExtAction(ExtType, EVT::i1) == TargetLowering::Promote)) {
(SrcVT != MVT::i1 ||
TLI.getLoadExtAction(ExtType, MVT::i1) == TargetLowering::Promote)) {
// Promote to a byte-sized load if not loading an integral number of
// bytes. For example, promote EXTLOAD:i20 -> EXTLOAD:i24.
unsigned NewWidth = SrcVT.getStoreSizeInBits();
@ -1189,7 +1189,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// Build a factor node to remember that this load is independent of the
// other one.
Ch = DAG.getNode(ISD::TokenFactor, dl, EVT::Other, Lo.getValue(1),
Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
Hi.getValue(1));
// Move the top bits to the right place.
@ -1218,7 +1218,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
// Build a factor node to remember that this load is independent of the
// other one.
Ch = DAG.getNode(ISD::TokenFactor, dl, EVT::Other, Lo.getValue(1),
Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
Hi.getValue(1));
// Move the top bits to the right place.
@ -1267,7 +1267,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
break;
case TargetLowering::Expand:
// f64 = EXTLOAD f32 should expand to LOAD, FP_EXTEND
if (SrcVT == EVT::f32 && Node->getValueType(0) == EVT::f64) {
if (SrcVT == MVT::f32 && Node->getValueType(0) == MVT::f64) {
SDValue Load = DAG.getLoad(SrcVT, dl, Tmp1, Tmp2, LD->getSrcValue(),
LD->getSrcValueOffset(),
LD->isVolatile(), LD->getAlignment());
@ -1416,7 +1416,7 @@ SDValue SelectionDAGLegalize::LegalizeOp(SDValue Op) {
}
// The order of the stores doesn't matter.
Result = DAG.getNode(ISD::TokenFactor, dl, EVT::Other, Lo, Hi);
Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi);
} else {
if (Tmp1 != ST->getChain() || Tmp3 != ST->getValue() ||
Tmp2 != ST->getBasePtr())
@ -1524,7 +1524,7 @@ SDValue SelectionDAGLegalize::ExpandVectorBuildThroughStack(SDNode* Node) {
SDValue StoreChain;
if (!Stores.empty()) // Not all undef elements?
StoreChain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&Stores[0], Stores.size());
else
StoreChain = DAG.getEntryNode();
@ -1537,28 +1537,28 @@ SDValue SelectionDAGLegalize::ExpandFCOPYSIGN(SDNode* Node) {
DebugLoc dl = Node->getDebugLoc();
SDValue Tmp1 = Node->getOperand(0);
SDValue Tmp2 = Node->getOperand(1);
assert((Tmp2.getValueType() == EVT::f32 ||
Tmp2.getValueType() == EVT::f64) &&
assert((Tmp2.getValueType() == MVT::f32 ||
Tmp2.getValueType() == MVT::f64) &&
"Ugly special-cased code!");
// Get the sign bit of the RHS.
SDValue SignBit;
EVT IVT = Tmp2.getValueType() == EVT::f64 ? EVT::i64 : EVT::i32;
EVT IVT = Tmp2.getValueType() == MVT::f64 ? MVT::i64 : MVT::i32;
if (isTypeLegal(IVT)) {
SignBit = DAG.getNode(ISD::BIT_CONVERT, dl, IVT, Tmp2);
} else {
assert(isTypeLegal(TLI.getPointerTy()) &&
(TLI.getPointerTy() == EVT::i32 ||
TLI.getPointerTy() == EVT::i64) &&
(TLI.getPointerTy() == MVT::i32 ||
TLI.getPointerTy() == MVT::i64) &&
"Legal type for load?!");
SDValue StackPtr = DAG.CreateStackTemporary(Tmp2.getValueType());
SDValue StorePtr = StackPtr, LoadPtr = StackPtr;
SDValue Ch =
DAG.getStore(DAG.getEntryNode(), dl, Tmp2, StorePtr, NULL, 0);
if (Tmp2.getValueType() == EVT::f64 && TLI.isLittleEndian())
if (Tmp2.getValueType() == MVT::f64 && TLI.isLittleEndian())
LoadPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(),
LoadPtr, DAG.getIntPtrConstant(4));
SignBit = DAG.getExtLoad(ISD::SEXTLOAD, dl, TLI.getPointerTy(),
Ch, LoadPtr, NULL, 0, EVT::i32);
Ch, LoadPtr, NULL, 0, MVT::i32);
}
SignBit =
DAG.getSetCC(dl, TLI.getSetCCResultType(SignBit.getValueType()),
@ -1577,8 +1577,8 @@ SDValue SelectionDAGLegalize::ExpandDBG_STOPPOINT(SDNode* Node) {
DebugLoc dl = Node->getDebugLoc();
DwarfWriter *DW = DAG.getDwarfWriter();
bool useDEBUG_LOC = TLI.isOperationLegalOrCustom(ISD::DEBUG_LOC,
EVT::Other);
bool useLABEL = TLI.isOperationLegalOrCustom(ISD::DBG_LABEL, EVT::Other);
MVT::Other);
bool useLABEL = TLI.isOperationLegalOrCustom(ISD::DBG_LABEL, MVT::Other);
const DbgStopPointSDNode *DSP = cast<DbgStopPointSDNode>(Node);
GlobalVariable *CU_GV = cast<GlobalVariable>(DSP->getCompileUnit());
@ -1592,9 +1592,9 @@ SDValue SelectionDAGLegalize::ExpandDBG_STOPPOINT(SDNode* Node) {
// A bit self-referential to have DebugLoc on Debug_Loc nodes, but it
// won't hurt anything.
if (useDEBUG_LOC) {
return DAG.getNode(ISD::DEBUG_LOC, dl, EVT::Other, Node->getOperand(0),
DAG.getConstant(Line, EVT::i32),
DAG.getConstant(Col, EVT::i32),
return DAG.getNode(ISD::DEBUG_LOC, dl, MVT::Other, Node->getOperand(0),
DAG.getConstant(Line, MVT::i32),
DAG.getConstant(Col, MVT::i32),
DAG.getSrcValue(CU.getGV()));
} else {
unsigned ID = DW->RecordSourceLine(Line, Col, CU);
@ -1886,12 +1886,12 @@ SDValue SelectionDAGLegalize::ExpandFPLibCall(SDNode* Node,
RTLIB::Libcall Call_F80,
RTLIB::Libcall Call_PPCF128) {
RTLIB::Libcall LC;
switch (Node->getValueType(0).getSimpleVT()) {
switch (Node->getValueType(0).getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unexpected request for libcall!");
case EVT::f32: LC = Call_F32; break;
case EVT::f64: LC = Call_F64; break;
case EVT::f80: LC = Call_F80; break;
case EVT::ppcf128: LC = Call_PPCF128; break;
case MVT::f32: LC = Call_F32; break;
case MVT::f64: LC = Call_F64; break;
case MVT::f80: LC = Call_F80; break;
case MVT::ppcf128: LC = Call_PPCF128; break;
}
return ExpandLibCall(LC, Node, false);
}
@ -1902,12 +1902,12 @@ SDValue SelectionDAGLegalize::ExpandIntLibCall(SDNode* Node, bool isSigned,
RTLIB::Libcall Call_I64,
RTLIB::Libcall Call_I128) {
RTLIB::Libcall LC;
switch (Node->getValueType(0).getSimpleVT()) {
switch (Node->getValueType(0).getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unexpected request for libcall!");
case EVT::i16: LC = Call_I16; break;
case EVT::i32: LC = Call_I32; break;
case EVT::i64: LC = Call_I64; break;
case EVT::i128: LC = Call_I128; break;
case MVT::i16: LC = Call_I16; break;
case MVT::i32: LC = Call_I32; break;
case MVT::i64: LC = Call_I64; break;
case MVT::i128: LC = Call_I128; break;
}
return ExpandLibCall(LC, Node, isSigned);
}
@ -1920,11 +1920,11 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
SDValue Op0,
EVT DestVT,
DebugLoc dl) {
if (Op0.getValueType() == EVT::i32) {
if (Op0.getValueType() == MVT::i32) {
// simple 32-bit [signed|unsigned] integer to float/double expansion
// Get the stack frame index of a 8 byte buffer.
SDValue StackSlot = DAG.CreateStackTemporary(EVT::f64);
SDValue StackSlot = DAG.CreateStackTemporary(MVT::f64);
// word offset constant for Hi/Lo address computation
SDValue WordOff = DAG.getConstant(sizeof(int), TLI.getPointerTy());
@ -1939,8 +1939,8 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
SDValue Op0Mapped;
if (isSigned) {
// constant used to invert sign bit (signed to unsigned mapping)
SDValue SignBit = DAG.getConstant(0x80000000u, EVT::i32);
Op0Mapped = DAG.getNode(ISD::XOR, dl, EVT::i32, Op0, SignBit);
SDValue SignBit = DAG.getConstant(0x80000000u, MVT::i32);
Op0Mapped = DAG.getNode(ISD::XOR, dl, MVT::i32, Op0, SignBit);
} else {
Op0Mapped = Op0;
}
@ -1948,28 +1948,28 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl,
Op0Mapped, Lo, NULL, 0);
// initial hi portion of constructed double
SDValue InitialHi = DAG.getConstant(0x43300000u, EVT::i32);
SDValue InitialHi = DAG.getConstant(0x43300000u, MVT::i32);
// store the hi of the constructed double - biased exponent
SDValue Store2=DAG.getStore(Store1, dl, InitialHi, Hi, NULL, 0);
// load the constructed double
SDValue Load = DAG.getLoad(EVT::f64, dl, Store2, StackSlot, NULL, 0);
SDValue Load = DAG.getLoad(MVT::f64, dl, Store2, StackSlot, NULL, 0);
// FP constant to bias correct the final result
SDValue Bias = DAG.getConstantFP(isSigned ?
BitsToDouble(0x4330000080000000ULL) :
BitsToDouble(0x4330000000000000ULL),
EVT::f64);
MVT::f64);
// subtract the bias
SDValue Sub = DAG.getNode(ISD::FSUB, dl, EVT::f64, Load, Bias);
SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Load, Bias);
// final result
SDValue Result;
// handle final rounding
if (DestVT == EVT::f64) {
if (DestVT == MVT::f64) {
// do nothing
Result = Sub;
} else if (DestVT.bitsLT(EVT::f64)) {
} else if (DestVT.bitsLT(MVT::f64)) {
Result = DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
DAG.getIntPtrConstant(0));
} else if (DestVT.bitsGT(EVT::f64)) {
} else if (DestVT.bitsGT(MVT::f64)) {
Result = DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub);
}
return Result;
@ -1988,12 +1988,12 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
// as a negative number. To counteract this, the dynamic code adds an
// offset depending on the data type.
uint64_t FF;
switch (Op0.getValueType().getSimpleVT()) {
switch (Op0.getValueType().getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unsupported integer type!");
case EVT::i8 : FF = 0x43800000ULL; break; // 2^8 (as a float)
case EVT::i16: FF = 0x47800000ULL; break; // 2^16 (as a float)
case EVT::i32: FF = 0x4F800000ULL; break; // 2^32 (as a float)
case EVT::i64: FF = 0x5F800000ULL; break; // 2^64 (as a float)
case MVT::i8 : FF = 0x43800000ULL; break; // 2^8 (as a float)
case MVT::i16: FF = 0x47800000ULL; break; // 2^16 (as a float)
case MVT::i32: FF = 0x4F800000ULL; break; // 2^32 (as a float)
case MVT::i64: FF = 0x5F800000ULL; break; // 2^64 (as a float)
}
if (TLI.isLittleEndian()) FF <<= 32;
Constant *FudgeFactor = ConstantInt::get(Type::Int64Ty, FF);
@ -2003,8 +2003,8 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
CPIdx = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), CPIdx, CstOffset);
Alignment = std::min(Alignment, 4u);
SDValue FudgeInReg;
if (DestVT == EVT::f32)
FudgeInReg = DAG.getLoad(EVT::f32, dl, DAG.getEntryNode(), CPIdx,
if (DestVT == MVT::f32)
FudgeInReg = DAG.getLoad(MVT::f32, dl, DAG.getEntryNode(), CPIdx,
PseudoSourceValue::getConstantPool(), 0,
false, Alignment);
else {
@ -2012,7 +2012,7 @@ SDValue SelectionDAGLegalize::ExpandLegalINT_TO_FP(bool isSigned,
LegalizeOp(DAG.getExtLoad(ISD::EXTLOAD, dl, DestVT,
DAG.getEntryNode(), CPIdx,
PseudoSourceValue::getConstantPool(), 0,
EVT::f32, false, Alignment));
MVT::f32, false, Alignment));
}
return DAG.getNode(ISD::FADD, dl, DestVT, Tmp1, FudgeInReg);
@ -2034,7 +2034,7 @@ SDValue SelectionDAGLegalize::PromoteLegalINT_TO_FP(SDValue LegalOp,
// Scan for the appropriate larger type to use.
while (1) {
NewInTy = (EVT::SimpleValueType)(NewInTy.getSimpleVT()+1);
NewInTy = (MVT::SimpleValueType)(NewInTy.getSimpleVT().SimpleTy+1);
assert(NewInTy.isInteger() && "Ran out of possibilities!");
// If the target supports SINT_TO_FP of this type, use it.
@ -2076,7 +2076,7 @@ SDValue SelectionDAGLegalize::PromoteLegalFP_TO_INT(SDValue LegalOp,
// Scan for the appropriate larger type to use.
while (1) {
NewOutTy = (EVT::SimpleValueType)(NewOutTy.getSimpleVT()+1);
NewOutTy = (MVT::SimpleValueType)(NewOutTy.getSimpleVT().SimpleTy+1);
assert(NewOutTy.isInteger() && "Ran out of possibilities!");
if (TLI.isOperationLegalOrCustom(ISD::FP_TO_SINT, NewOutTy)) {
@ -2107,13 +2107,13 @@ SDValue SelectionDAGLegalize::ExpandBSWAP(SDValue Op, DebugLoc dl) {
EVT VT = Op.getValueType();
EVT SHVT = TLI.getShiftAmountTy();
SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5, Tmp6, Tmp7, Tmp8;
switch (VT.getSimpleVT()) {
switch (VT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unhandled Expand type in BSWAP!");
case EVT::i16:
case MVT::i16:
Tmp2 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT));
Tmp1 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT));
return DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
case EVT::i32:
case MVT::i32:
Tmp4 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, SHVT));
Tmp3 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(8, SHVT));
Tmp2 = DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(8, SHVT));
@ -2123,7 +2123,7 @@ SDValue SelectionDAGLegalize::ExpandBSWAP(SDValue Op, DebugLoc dl) {
Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp3);
Tmp2 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp1);
return DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp2);
case EVT::i64:
case MVT::i64:
Tmp8 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(56, SHVT));
Tmp7 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(40, SHVT));
Tmp6 = DAG.getNode(ISD::SHL, dl, VT, Op, DAG.getConstant(24, SHVT));
@ -2834,7 +2834,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr,
TLI.getPICJumpTableRelocBase(Table, DAG));
}
Tmp1 = DAG.getNode(ISD::BRIND, dl, EVT::Other, LD.getValue(1), Addr);
Tmp1 = DAG.getNode(ISD::BRIND, dl, MVT::Other, LD.getValue(1), Addr);
Results.push_back(Tmp1);
break;
}
@ -2844,12 +2844,12 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
Tmp1 = Node->getOperand(0);
Tmp2 = Node->getOperand(1);
if (Tmp2.getOpcode() == ISD::SETCC) {
Tmp1 = DAG.getNode(ISD::BR_CC, dl, EVT::Other,
Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other,
Tmp1, Tmp2.getOperand(2),
Tmp2.getOperand(0), Tmp2.getOperand(1),
Node->getOperand(2));
} else {
Tmp1 = DAG.getNode(ISD::BR_CC, dl, EVT::Other, Tmp1,
Tmp1 = DAG.getNode(ISD::BR_CC, dl, MVT::Other, Tmp1,
DAG.getCondCode(ISD::SETNE), Tmp2,
DAG.getConstant(0, Tmp2.getValueType()),
Node->getOperand(2));

View File

@ -31,10 +31,10 @@ static RTLIB::Libcall GetFPLibCall(EVT VT,
RTLIB::Libcall Call_F80,
RTLIB::Libcall Call_PPCF128) {
return
VT == EVT::f32 ? Call_F32 :
VT == EVT::f64 ? Call_F64 :
VT == EVT::f80 ? Call_F80 :
VT == EVT::ppcf128 ? Call_PPCF128 :
VT == MVT::f32 ? Call_F32 :
VT == MVT::f64 ? Call_F64 :
VT == MVT::f80 ? Call_F80 :
VT == MVT::ppcf128 ? Call_PPCF128 :
RTLIB::UNKNOWN_LIBCALL;
}
@ -353,7 +353,7 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_FPOW(SDNode *N) {
}
SDValue DAGTypeLegalizer::SoftenFloatRes_FPOWI(SDNode *N) {
assert(N->getOperand(1).getValueType() == EVT::i32 &&
assert(N->getOperand(1).getValueType() == MVT::i32 &&
"Unsupported power type!");
EVT NVT = TLI.getTypeToTransformTo(N->getValueType(0));
SDValue Ops[2] = { GetSoftenedFloat(N->getOperand(0)), N->getOperand(1) };
@ -510,9 +510,9 @@ SDValue DAGTypeLegalizer::SoftenFloatRes_XINT_TO_FP(SDNode *N) {
// a larger type, eg: i8 -> fp. Even if it is legal, no libcall may exactly
// match. Look for an appropriate libcall.
RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
for (unsigned t = EVT::FIRST_INTEGER_VALUETYPE;
t <= EVT::LAST_INTEGER_VALUETYPE && LC == RTLIB::UNKNOWN_LIBCALL; ++t) {
NVT = (EVT::SimpleValueType)t;
for (unsigned t = MVT::FIRST_INTEGER_VALUETYPE;
t <= MVT::LAST_INTEGER_VALUETYPE && LC == RTLIB::UNKNOWN_LIBCALL; ++t) {
NVT = (MVT::SimpleValueType)t;
// The source needs to big enough to hold the operand.
if (NVT.bitsGE(SVT))
LC = Signed ? RTLIB::getSINTTOFP(NVT, RVT):RTLIB::getUINTTOFP (NVT, RVT);
@ -576,68 +576,68 @@ void DAGTypeLegalizer::SoftenSetCCOperands(SDValue &NewLHS, SDValue &NewRHS,
SDValue RHSInt = GetSoftenedFloat(NewRHS);
EVT VT = NewLHS.getValueType();
assert((VT == EVT::f32 || VT == EVT::f64) && "Unsupported setcc type!");
assert((VT == MVT::f32 || VT == MVT::f64) && "Unsupported setcc type!");
// Expand into one or more soft-fp libcall(s).
RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL;
switch (CCCode) {
case ISD::SETEQ:
case ISD::SETOEQ:
LC1 = (VT == EVT::f32) ? RTLIB::OEQ_F32 : RTLIB::OEQ_F64;
LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 : RTLIB::OEQ_F64;
break;
case ISD::SETNE:
case ISD::SETUNE:
LC1 = (VT == EVT::f32) ? RTLIB::UNE_F32 : RTLIB::UNE_F64;
LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 : RTLIB::UNE_F64;
break;
case ISD::SETGE:
case ISD::SETOGE:
LC1 = (VT == EVT::f32) ? RTLIB::OGE_F32 : RTLIB::OGE_F64;
LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 : RTLIB::OGE_F64;
break;
case ISD::SETLT:
case ISD::SETOLT:
LC1 = (VT == EVT::f32) ? RTLIB::OLT_F32 : RTLIB::OLT_F64;
LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 : RTLIB::OLT_F64;
break;
case ISD::SETLE:
case ISD::SETOLE:
LC1 = (VT == EVT::f32) ? RTLIB::OLE_F32 : RTLIB::OLE_F64;
LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 : RTLIB::OLE_F64;
break;
case ISD::SETGT:
case ISD::SETOGT:
LC1 = (VT == EVT::f32) ? RTLIB::OGT_F32 : RTLIB::OGT_F64;
LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 : RTLIB::OGT_F64;
break;
case ISD::SETUO:
LC1 = (VT == EVT::f32) ? RTLIB::UO_F32 : RTLIB::UO_F64;
LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 : RTLIB::UO_F64;
break;
case ISD::SETO:
LC1 = (VT == EVT::f32) ? RTLIB::O_F32 : RTLIB::O_F64;
LC1 = (VT == MVT::f32) ? RTLIB::O_F32 : RTLIB::O_F64;
break;
default:
LC1 = (VT == EVT::f32) ? RTLIB::UO_F32 : RTLIB::UO_F64;
LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 : RTLIB::UO_F64;
switch (CCCode) {
case ISD::SETONE:
// SETONE = SETOLT | SETOGT
LC1 = (VT == EVT::f32) ? RTLIB::OLT_F32 : RTLIB::OLT_F64;
LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 : RTLIB::OLT_F64;
// Fallthrough
case ISD::SETUGT:
LC2 = (VT == EVT::f32) ? RTLIB::OGT_F32 : RTLIB::OGT_F64;
LC2 = (VT == MVT::f32) ? RTLIB::OGT_F32 : RTLIB::OGT_F64;
break;
case ISD::SETUGE:
LC2 = (VT == EVT::f32) ? RTLIB::OGE_F32 : RTLIB::OGE_F64;
LC2 = (VT == MVT::f32) ? RTLIB::OGE_F32 : RTLIB::OGE_F64;
break;
case ISD::SETULT:
LC2 = (VT == EVT::f32) ? RTLIB::OLT_F32 : RTLIB::OLT_F64;
LC2 = (VT == MVT::f32) ? RTLIB::OLT_F32 : RTLIB::OLT_F64;
break;
case ISD::SETULE:
LC2 = (VT == EVT::f32) ? RTLIB::OLE_F32 : RTLIB::OLE_F64;
LC2 = (VT == MVT::f32) ? RTLIB::OLE_F32 : RTLIB::OLE_F64;
break;
case ISD::SETUEQ:
LC2 = (VT == EVT::f32) ? RTLIB::OEQ_F32 : RTLIB::OEQ_F64;
LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 : RTLIB::OEQ_F64;
break;
default: assert(false && "Do not know how to soften this setcc!");
}
}
EVT RetVT = EVT::i32; // FIXME: is this the correct return type?
EVT RetVT = MVT::i32; // FIXME: is this the correct return type?
SDValue Ops[2] = { LHSInt, RHSInt };
NewLHS = MakeLibCall(LC1, RetVT, Ops, 2, false/*sign irrelevant*/, dl);
NewRHS = DAG.getConstant(0, RetVT);
@ -841,7 +841,7 @@ void DAGTypeLegalizer::ExpandFloatRes_ConstantFP(SDNode *N, SDValue &Lo,
void DAGTypeLegalizer::ExpandFloatRes_FABS(SDNode *N, SDValue &Lo,
SDValue &Hi) {
assert(N->getValueType(0) == EVT::ppcf128 &&
assert(N->getValueType(0) == MVT::ppcf128 &&
"Logic only correct for ppcf128!");
DebugLoc dl = N->getDebugLoc();
SDValue Tmp;
@ -1088,7 +1088,7 @@ void DAGTypeLegalizer::ExpandFloatRes_LOAD(SDNode *N, SDValue &Lo,
void DAGTypeLegalizer::ExpandFloatRes_XINT_TO_FP(SDNode *N, SDValue &Lo,
SDValue &Hi) {
assert(N->getValueType(0) == EVT::ppcf128 && "Unsupported XINT_TO_FP!");
assert(N->getValueType(0) == MVT::ppcf128 && "Unsupported XINT_TO_FP!");
EVT VT = N->getValueType(0);
EVT NVT = TLI.getTypeToTransformTo(VT);
SDValue Src = N->getOperand(0);
@ -1099,20 +1099,20 @@ void DAGTypeLegalizer::ExpandFloatRes_XINT_TO_FP(SDNode *N, SDValue &Lo,
// First do an SINT_TO_FP, whether the original was signed or unsigned.
// When promoting partial word types to i32 we must honor the signedness,
// though.
if (SrcVT.bitsLE(EVT::i32)) {
if (SrcVT.bitsLE(MVT::i32)) {
// The integer can be represented exactly in an f64.
Src = DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, dl,
EVT::i32, Src);
MVT::i32, Src);
Lo = DAG.getConstantFP(APFloat(APInt(NVT.getSizeInBits(), 0)), NVT);
Hi = DAG.getNode(ISD::SINT_TO_FP, dl, NVT, Src);
} else {
RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
if (SrcVT.bitsLE(EVT::i64)) {
if (SrcVT.bitsLE(MVT::i64)) {
Src = DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, dl,
EVT::i64, Src);
MVT::i64, Src);
LC = RTLIB::SINTTOFP_I64_PPCF128;
} else if (SrcVT.bitsLE(EVT::i128)) {
Src = DAG.getNode(ISD::SIGN_EXTEND, dl, EVT::i128, Src);
} else if (SrcVT.bitsLE(MVT::i128)) {
Src = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i128, Src);
LC = RTLIB::SINTTOFP_I128_PPCF128;
}
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported XINT_TO_FP!");
@ -1134,23 +1134,23 @@ void DAGTypeLegalizer::ExpandFloatRes_XINT_TO_FP(SDNode *N, SDValue &Lo,
static const uint64_t TwoE128[] = { 0x47f0000000000000LL, 0 };
const uint64_t *Parts = 0;
switch (SrcVT.getSimpleVT()) {
switch (SrcVT.getSimpleVT().SimpleTy) {
default:
assert(false && "Unsupported UINT_TO_FP!");
case EVT::i32:
case MVT::i32:
Parts = TwoE32;
break;
case EVT::i64:
case MVT::i64:
Parts = TwoE64;
break;
case EVT::i128:
case MVT::i128:
Parts = TwoE128;
break;
}
Lo = DAG.getNode(ISD::FADD, dl, VT, Hi,
DAG.getConstantFP(APFloat(APInt(128, 2, Parts)),
EVT::ppcf128));
MVT::ppcf128));
Lo = DAG.getNode(ISD::SELECT_CC, dl, VT, Src, DAG.getConstant(0, SrcVT),
Lo, Hi, DAG.getCondCode(ISD::SETLT));
GetPairElements(Lo, Lo, Hi);
@ -1223,7 +1223,7 @@ void DAGTypeLegalizer::FloatExpandSetCCOperands(SDValue &NewLHS,
GetExpandedFloat(NewRHS, RHSLo, RHSHi);
EVT VT = NewLHS.getValueType();
assert(VT == EVT::ppcf128 && "Unsupported setcc type!");
assert(VT == MVT::ppcf128 && "Unsupported setcc type!");
// FIXME: This generated code sucks. We want to generate
// FCMPU crN, hi1, hi2
@ -1264,7 +1264,7 @@ SDValue DAGTypeLegalizer::ExpandFloatOp_BR_CC(SDNode *N) {
}
SDValue DAGTypeLegalizer::ExpandFloatOp_FP_ROUND(SDNode *N) {
assert(N->getOperand(0).getValueType() == EVT::ppcf128 &&
assert(N->getOperand(0).getValueType() == MVT::ppcf128 &&
"Logic only correct for ppcf128!");
SDValue Lo, Hi;
GetExpandedFloat(N->getOperand(0), Lo, Hi);
@ -1279,14 +1279,14 @@ SDValue DAGTypeLegalizer::ExpandFloatOp_FP_TO_SINT(SDNode *N) {
// Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
// PPC (the libcall is not available). FIXME: Do this in a less hacky way.
if (RVT == EVT::i32) {
assert(N->getOperand(0).getValueType() == EVT::ppcf128 &&
if (RVT == MVT::i32) {
assert(N->getOperand(0).getValueType() == MVT::ppcf128 &&
"Logic only correct for ppcf128!");
SDValue Res = DAG.getNode(ISD::FP_ROUND_INREG, dl, EVT::ppcf128,
N->getOperand(0), DAG.getValueType(EVT::f64));
Res = DAG.getNode(ISD::FP_ROUND, dl, EVT::f64, Res,
SDValue Res = DAG.getNode(ISD::FP_ROUND_INREG, dl, MVT::ppcf128,
N->getOperand(0), DAG.getValueType(MVT::f64));
Res = DAG.getNode(ISD::FP_ROUND, dl, MVT::f64, Res,
DAG.getIntPtrConstant(1));
return DAG.getNode(ISD::FP_TO_SINT, dl, EVT::i32, Res);
return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res);
}
RTLIB::Libcall LC = RTLIB::getFPTOSINT(N->getOperand(0).getValueType(), RVT);
@ -1300,24 +1300,24 @@ SDValue DAGTypeLegalizer::ExpandFloatOp_FP_TO_UINT(SDNode *N) {
// Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
// PPC (the libcall is not available). FIXME: Do this in a less hacky way.
if (RVT == EVT::i32) {
assert(N->getOperand(0).getValueType() == EVT::ppcf128 &&
if (RVT == MVT::i32) {
assert(N->getOperand(0).getValueType() == MVT::ppcf128 &&
"Logic only correct for ppcf128!");
const uint64_t TwoE31[] = {0x41e0000000000000LL, 0};
APFloat APF = APFloat(APInt(128, 2, TwoE31));
SDValue Tmp = DAG.getConstantFP(APF, EVT::ppcf128);
SDValue Tmp = DAG.getConstantFP(APF, MVT::ppcf128);
// X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X
// FIXME: generated code sucks.
return DAG.getNode(ISD::SELECT_CC, dl, EVT::i32, N->getOperand(0), Tmp,
DAG.getNode(ISD::ADD, dl, EVT::i32,
DAG.getNode(ISD::FP_TO_SINT, dl, EVT::i32,
return DAG.getNode(ISD::SELECT_CC, dl, MVT::i32, N->getOperand(0), Tmp,
DAG.getNode(ISD::ADD, dl, MVT::i32,
DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32,
DAG.getNode(ISD::FSUB, dl,
EVT::ppcf128,
MVT::ppcf128,
N->getOperand(0),
Tmp)),
DAG.getConstant(0x80000000, EVT::i32)),
DAG.getConstant(0x80000000, MVT::i32)),
DAG.getNode(ISD::FP_TO_SINT, dl,
EVT::i32, N->getOperand(0)),
MVT::i32, N->getOperand(0)),
DAG.getCondCode(ISD::SETGE));
}

View File

@ -718,7 +718,7 @@ SDValue DAGTypeLegalizer::PromoteIntOp_BRCOND(SDNode *N, unsigned OpNo) {
assert(OpNo == 1 && "only know how to promote condition");
// Promote all the way up to the canonical SetCC type.
EVT SVT = TLI.getSetCCResultType(EVT::Other);
EVT SVT = TLI.getSetCCResultType(MVT::Other);
SDValue Cond = PromoteTargetBoolean(N->getOperand(1), SVT);
// The chain (Op#0) and basic block destination (Op#2) are always legal types.
@ -802,7 +802,7 @@ SDValue DAGTypeLegalizer::PromoteIntOp_MEMBARRIER(SDNode *N) {
NewOps[0] = N->getOperand(0);
for (unsigned i = 1; i < array_lengthof(NewOps); ++i) {
SDValue Flag = GetPromotedInteger(N->getOperand(i));
NewOps[i] = DAG.getZeroExtendInReg(Flag, dl, EVT::i1);
NewOps[i] = DAG.getZeroExtendInReg(Flag, dl, MVT::i1);
}
return DAG.UpdateNodeOperands(SDValue (N, 0), NewOps,
array_lengthof(NewOps));
@ -1009,7 +1009,7 @@ void DAGTypeLegalizer::ExpandShiftByConstant(SDNode *N, unsigned Amt,
TLI.isOperationLegalOrCustom(ISD::ADDC,
TLI.getTypeToExpandTo(NVT))) {
// Emit this X << 1 as X+X.
SDVTList VTList = DAG.getVTList(NVT, EVT::Flag);
SDVTList VTList = DAG.getVTList(NVT, MVT::Flag);
SDValue LoOps[2] = { InL, InL };
Lo = DAG.getNode(ISD::ADDC, dl, VTList, LoOps, 2);
SDValue HiOps[3] = { InH, InH, Lo.getValue(1) };
@ -1237,7 +1237,7 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUB(SDNode *N,
// Do not generate ADDC/ADDE or SUBC/SUBE if the target does not support
// them. TODO: Teach operation legalization how to expand unsupported
// ADDC/ADDE/SUBC/SUBE. The problem is that these operations generate
// a carry of type EVT::Flag, but there doesn't seem to be any way to
// a carry of type MVT::Flag, but there doesn't seem to be any way to
// generate a value of this type in the expanded code sequence.
bool hasCarry =
TLI.isOperationLegalOrCustom(N->getOpcode() == ISD::ADD ?
@ -1245,7 +1245,7 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUB(SDNode *N,
TLI.getTypeToExpandTo(NVT));
if (hasCarry) {
SDVTList VTList = DAG.getVTList(NVT, EVT::Flag);
SDVTList VTList = DAG.getVTList(NVT, MVT::Flag);
if (N->getOpcode() == ISD::ADD) {
Lo = DAG.getNode(ISD::ADDC, dl, VTList, LoOps, 2);
HiOps[2] = Lo.getValue(1);
@ -1290,7 +1290,7 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUBC(SDNode *N,
DebugLoc dl = N->getDebugLoc();
GetExpandedInteger(N->getOperand(0), LHSL, LHSH);
GetExpandedInteger(N->getOperand(1), RHSL, RHSH);
SDVTList VTList = DAG.getVTList(LHSL.getValueType(), EVT::Flag);
SDVTList VTList = DAG.getVTList(LHSL.getValueType(), MVT::Flag);
SDValue LoOps[2] = { LHSL, RHSL };
SDValue HiOps[3] = { LHSH, RHSH };
@ -1316,7 +1316,7 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUBE(SDNode *N,
DebugLoc dl = N->getDebugLoc();
GetExpandedInteger(N->getOperand(0), LHSL, LHSH);
GetExpandedInteger(N->getOperand(1), RHSL, RHSH);
SDVTList VTList = DAG.getVTList(LHSL.getValueType(), EVT::Flag);
SDVTList VTList = DAG.getVTList(LHSL.getValueType(), MVT::Flag);
SDValue LoOps[3] = { LHSL, RHSL, N->getOperand(2) };
SDValue HiOps[3] = { LHSH, RHSH };
@ -1539,7 +1539,7 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
// Build a factor node to remember that this load is independent of the
// other one.
Ch = DAG.getNode(ISD::TokenFactor, dl, EVT::Other, Lo.getValue(1),
Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
Hi.getValue(1));
} else {
// Big-endian - high bits are at low addresses. Favor aligned loads at
@ -1565,7 +1565,7 @@ void DAGTypeLegalizer::ExpandIntRes_LOAD(LoadSDNode *N,
// Build a factor node to remember that this load is independent of the
// other one.
Ch = DAG.getNode(ISD::TokenFactor, dl, EVT::Other, Lo.getValue(1),
Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
Hi.getValue(1));
if (ExcessBits < NVT.getSizeInBits()) {
@ -1673,13 +1673,13 @@ void DAGTypeLegalizer::ExpandIntRes_MUL(SDNode *N,
// If nothing else, we can make a libcall.
RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
if (VT == EVT::i16)
if (VT == MVT::i16)
LC = RTLIB::MUL_I16;
else if (VT == EVT::i32)
else if (VT == MVT::i32)
LC = RTLIB::MUL_I32;
else if (VT == EVT::i64)
else if (VT == MVT::i64)
LC = RTLIB::MUL_I64;
else if (VT == EVT::i128)
else if (VT == MVT::i128)
LC = RTLIB::MUL_I128;
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported MUL!");
@ -1693,13 +1693,13 @@ void DAGTypeLegalizer::ExpandIntRes_SDIV(SDNode *N,
DebugLoc dl = N->getDebugLoc();
RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
if (VT == EVT::i16)
if (VT == MVT::i16)
LC = RTLIB::SDIV_I16;
else if (VT == EVT::i32)
else if (VT == MVT::i32)
LC = RTLIB::SDIV_I32;
else if (VT == EVT::i64)
else if (VT == MVT::i64)
LC = RTLIB::SDIV_I64;
else if (VT == EVT::i128)
else if (VT == MVT::i128)
LC = RTLIB::SDIV_I128;
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!");
@ -1755,34 +1755,34 @@ void DAGTypeLegalizer::ExpandIntRes_Shift(SDNode *N,
bool isSigned;
if (N->getOpcode() == ISD::SHL) {
isSigned = false; /*sign irrelevant*/
if (VT == EVT::i16)
if (VT == MVT::i16)
LC = RTLIB::SHL_I16;
else if (VT == EVT::i32)
else if (VT == MVT::i32)
LC = RTLIB::SHL_I32;
else if (VT == EVT::i64)
else if (VT == MVT::i64)
LC = RTLIB::SHL_I64;
else if (VT == EVT::i128)
else if (VT == MVT::i128)
LC = RTLIB::SHL_I128;
} else if (N->getOpcode() == ISD::SRL) {
isSigned = false;
if (VT == EVT::i16)
if (VT == MVT::i16)
LC = RTLIB::SRL_I16;
else if (VT == EVT::i32)
else if (VT == MVT::i32)
LC = RTLIB::SRL_I32;
else if (VT == EVT::i64)
else if (VT == MVT::i64)
LC = RTLIB::SRL_I64;
else if (VT == EVT::i128)
else if (VT == MVT::i128)
LC = RTLIB::SRL_I128;
} else {
assert(N->getOpcode() == ISD::SRA && "Unknown shift!");
isSigned = true;
if (VT == EVT::i16)
if (VT == MVT::i16)
LC = RTLIB::SRA_I16;
else if (VT == EVT::i32)
else if (VT == MVT::i32)
LC = RTLIB::SRA_I32;
else if (VT == EVT::i64)
else if (VT == MVT::i64)
LC = RTLIB::SRA_I64;
else if (VT == EVT::i128)
else if (VT == MVT::i128)
LC = RTLIB::SRA_I128;
}
@ -1857,13 +1857,13 @@ void DAGTypeLegalizer::ExpandIntRes_SREM(SDNode *N,
DebugLoc dl = N->getDebugLoc();
RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
if (VT == EVT::i16)
if (VT == MVT::i16)
LC = RTLIB::SREM_I16;
else if (VT == EVT::i32)
else if (VT == MVT::i32)
LC = RTLIB::SREM_I32;
else if (VT == EVT::i64)
else if (VT == MVT::i64)
LC = RTLIB::SREM_I64;
else if (VT == EVT::i128)
else if (VT == MVT::i128)
LC = RTLIB::SREM_I128;
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!");
@ -1888,13 +1888,13 @@ void DAGTypeLegalizer::ExpandIntRes_UDIV(SDNode *N,
DebugLoc dl = N->getDebugLoc();
RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
if (VT == EVT::i16)
if (VT == MVT::i16)
LC = RTLIB::UDIV_I16;
else if (VT == EVT::i32)
else if (VT == MVT::i32)
LC = RTLIB::UDIV_I32;
else if (VT == EVT::i64)
else if (VT == MVT::i64)
LC = RTLIB::UDIV_I64;
else if (VT == EVT::i128)
else if (VT == MVT::i128)
LC = RTLIB::UDIV_I128;
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported UDIV!");
@ -1908,13 +1908,13 @@ void DAGTypeLegalizer::ExpandIntRes_UREM(SDNode *N,
DebugLoc dl = N->getDebugLoc();
RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
if (VT == EVT::i16)
if (VT == MVT::i16)
LC = RTLIB::UREM_I16;
else if (VT == EVT::i32)
else if (VT == MVT::i32)
LC = RTLIB::UREM_I32;
else if (VT == EVT::i64)
else if (VT == MVT::i64)
LC = RTLIB::UREM_I64;
else if (VT == EVT::i128)
else if (VT == MVT::i128)
LC = RTLIB::UREM_I128;
assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported UREM!");
@ -2222,7 +2222,7 @@ SDValue DAGTypeLegalizer::ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo) {
Hi = DAG.getTruncStore(Ch, dl, Hi, Ptr, N->getSrcValue(),
SVOffset+IncrementSize, NEVT,
isVolatile, MinAlign(Alignment, IncrementSize));
return DAG.getNode(ISD::TokenFactor, dl, EVT::Other, Lo, Hi);
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi);
} else {
// Big-endian - high bits are at low addresses. Favor aligned stores at
// the cost of some bit-fiddling.
@ -2257,7 +2257,7 @@ SDValue DAGTypeLegalizer::ExpandIntOp_STORE(StoreSDNode *N, unsigned OpNo) {
SVOffset+IncrementSize,
EVT::getIntegerVT(ExcessBits),
isVolatile, MinAlign(Alignment, IncrementSize));
return DAG.getNode(ISD::TokenFactor, dl, EVT::Other, Lo, Hi);
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi);
}
}
@ -2288,11 +2288,11 @@ SDValue DAGTypeLegalizer::ExpandIntOp_UINT_TO_FP(SDNode *N) {
const uint64_t F32TwoE128 = 0x7F800000ULL;
APInt FF(32, 0);
if (SrcVT == EVT::i32)
if (SrcVT == MVT::i32)
FF = APInt(32, F32TwoE32);
else if (SrcVT == EVT::i64)
else if (SrcVT == MVT::i64)
FF = APInt(32, F32TwoE64);
else if (SrcVT == EVT::i128)
else if (SrcVT == MVT::i128)
FF = APInt(32, F32TwoE128);
else
assert(false && "Unsupported UINT_TO_FP!");
@ -2323,7 +2323,7 @@ SDValue DAGTypeLegalizer::ExpandIntOp_UINT_TO_FP(SDNode *N) {
// Load the value out, extending it from f32 to the destination float type.
// FIXME: Avoid the extend by constructing the right constant pool?
SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, DstVT, DAG.getEntryNode(),
FudgePtr, NULL, 0, EVT::f32,
FudgePtr, NULL, 0, MVT::f32,
false, Alignment);
return DAG.getNode(ISD::FADD, dl, DstVT, SignedConv, Fudge);
}

View File

@ -159,7 +159,7 @@ public:
explicit DAGTypeLegalizer(SelectionDAG &dag)
: TLI(dag.getTargetLoweringInfo()), DAG(dag),
ValueTypeActions(TLI.getValueTypeActions()) {
assert(EVT::LAST_VALUETYPE <= EVT::MAX_ALLOWED_VALUETYPE &&
assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE &&
"Too many value types for ValueTypeActions to hold!");
}

View File

@ -220,7 +220,7 @@ void DAGTypeLegalizer::ExpandRes_NormalLoad(SDNode *N, SDValue &Lo,
// Build a factor node to remember that this load is independent of the
// other one.
Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other, Lo.getValue(1),
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
Hi.getValue(1));
// Handle endianness of the load.
@ -402,7 +402,7 @@ SDValue DAGTypeLegalizer::ExpandOp_NormalStore(SDNode *N, unsigned OpNo) {
SVOffset + IncrementSize,
isVolatile, MinAlign(Alignment, IncrementSize));
return DAG.getNode(ISD::TokenFactor, dl, EVT::Other, Lo, Hi);
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi);
}

View File

@ -306,7 +306,7 @@ SDValue VectorLegalizer::UnrollVectorOp(SDValue Op) {
Operands[j] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
OperandEltVT,
Operand,
DAG.getConstant(i, EVT::i32));
DAG.getConstant(i, MVT::i32));
} else {
// A scalar operand; just use it as is.
Operands[j] = Operand;

View File

@ -213,7 +213,7 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_SETCC(SDNode *N) {
DebugLoc DL = N->getDebugLoc();
// Turn it into a scalar SETCC.
return DAG.getNode(ISD::SETCC, DL, EVT::i1, LHS, RHS, N->getOperand(2));
return DAG.getNode(ISD::SETCC, DL, MVT::i1, LHS, RHS, N->getOperand(2));
}
SDValue DAGTypeLegalizer::ScalarizeVecRes_UNDEF(SDNode *N) {
@ -247,16 +247,16 @@ SDValue DAGTypeLegalizer::ScalarizeVecRes_VSETCC(SDNode *N) {
if (TLI.getBooleanContents() !=
TargetLowering::ZeroOrNegativeOneBooleanContent)
Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, SVT, Res,
DAG.getValueType(EVT::i1));
DAG.getValueType(MVT::i1));
// Truncate to the final type.
return DAG.getNode(ISD::TRUNCATE, DL, NVT, Res);
}
// The SETCC result type is smaller than the vector element type.
// If the SetCC result is not sign-extended, chop it down to EVT::i1.
// If the SetCC result is not sign-extended, chop it down to MVT::i1.
if (TLI.getBooleanContents() !=
TargetLowering::ZeroOrNegativeOneBooleanContent)
Res = DAG.getNode(ISD::TRUNCATE, DL, EVT::i1, Res);
Res = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Res);
// Sign extend to the final type.
return DAG.getNode(ISD::SIGN_EXTEND, DL, NVT, Res);
}
@ -725,7 +725,7 @@ void DAGTypeLegalizer::SplitVecRes_LOAD(LoadSDNode *LD, SDValue &Lo,
// Build a factor node to remember that this load is independent of the
// other one.
Ch = DAG.getNode(ISD::TokenFactor, dl, EVT::Other, Lo.getValue(1),
Ch = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1),
Hi.getValue(1));
// Legalized the chain result - switch anything that used the old chain to
@ -1097,7 +1097,7 @@ SDValue DAGTypeLegalizer::SplitVecOp_STORE(StoreSDNode *N, unsigned OpNo) {
Hi = DAG.getStore(Ch, dl, Hi, Ptr, N->getSrcValue(), SVOffset+IncrementSize,
isVol, MinAlign(Alignment, IncrementSize));
return DAG.getNode(ISD::TokenFactor, dl, EVT::Other, Lo, Hi);
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo, Hi);
}
@ -1660,7 +1660,7 @@ SDValue DAGTypeLegalizer::WidenVecRes_LOAD(SDNode *N) {
if (LdChain.size() == 1)
NewChain = LdChain[0];
else
NewChain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other, &LdChain[0],
NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &LdChain[0],
LdChain.size());
// Modified the chain - switch anything that used the old chain to use
@ -1941,7 +1941,7 @@ SDValue DAGTypeLegalizer::WidenVecOp_STORE(SDNode *N) {
return StChain[0];
else
return DAG.getNode(ISD::TokenFactor, dl,
EVT::Other,&StChain[0],StChain.size());
MVT::Other,&StChain[0],StChain.size());
}
//===----------------------------------------------------------------------===//

View File

@ -216,15 +216,15 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
bool TryUnfold = false;
for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
EVT VT = N->getValueType(i);
if (VT == EVT::Flag)
if (VT == MVT::Flag)
return NULL;
else if (VT == EVT::Other)
else if (VT == MVT::Other)
TryUnfold = true;
}
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
const SDValue &Op = N->getOperand(i);
EVT VT = Op.getNode()->getValueType(Op.getResNo());
if (VT == EVT::Flag)
if (VT == MVT::Flag)
return NULL;
}

View File

@ -353,15 +353,15 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
bool TryUnfold = false;
for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
EVT VT = N->getValueType(i);
if (VT == EVT::Flag)
if (VT == MVT::Flag)
return NULL;
else if (VT == EVT::Other)
else if (VT == MVT::Other)
TryUnfold = true;
}
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
const SDValue &Op = N->getOperand(i);
EVT VT = Op.getNode()->getValueType(Op.getResNo());
if (VT == EVT::Flag)
if (VT == MVT::Flag)
return NULL;
}
@ -630,7 +630,7 @@ bool ScheduleDAGRRList::DelayForLiveRegsBottomUp(SUnit *SU,
if (Node->getOpcode() == ISD::INLINEASM) {
// Inline asm can clobber physical defs.
unsigned NumOps = Node->getNumOperands();
if (Node->getOperand(NumOps-1).getValueType() == EVT::Flag)
if (Node->getOperand(NumOps-1).getValueType() == MVT::Flag)
--NumOps; // Ignore the flag operand.
for (unsigned i = 2; i != NumOps;) {
@ -1217,7 +1217,7 @@ static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
return false;
for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
EVT VT = N->getValueType(i);
if (VT == EVT::Flag || VT == EVT::Other)
if (VT == MVT::Flag || VT == MVT::Other)
continue;
if (!N->hasAnyUseOfValue(i))
continue;

View File

@ -111,7 +111,7 @@ void ScheduleDAGSDNodes::BuildSchedUnits() {
// Scan up to find flagged preds.
SDNode *N = NI;
while (N->getNumOperands() &&
N->getOperand(N->getNumOperands()-1).getValueType() == EVT::Flag) {
N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Flag) {
N = N->getOperand(N->getNumOperands()-1).getNode();
assert(N->getNodeId() == -1 && "Node already inserted!");
N->setNodeId(NodeSUnit->NodeNum);
@ -119,7 +119,7 @@ void ScheduleDAGSDNodes::BuildSchedUnits() {
// Scan down to find any flagged succs.
N = NI;
while (N->getValueType(N->getNumValues()-1) == EVT::Flag) {
while (N->getValueType(N->getNumValues()-1) == MVT::Flag) {
SDValue FlagVal(N, N->getNumValues()-1);
// There are either zero or one users of the Flag result.
@ -190,8 +190,8 @@ void ScheduleDAGSDNodes::AddSchedEdges() {
if (OpSU == SU) continue; // In the same group.
EVT OpVT = N->getOperand(i).getValueType();
assert(OpVT != EVT::Flag && "Flagged nodes should be in same sunit!");
bool isChain = OpVT == EVT::Other;
assert(OpVT != MVT::Flag && "Flagged nodes should be in same sunit!");
bool isChain = OpVT == MVT::Other;
unsigned PhysReg = 0;
int Cost = 1;
@ -244,9 +244,9 @@ void ScheduleDAGSDNodes::ComputeLatency(SUnit *SU) {
/// not go into the resulting MachineInstr).
unsigned ScheduleDAGSDNodes::CountResults(SDNode *Node) {
unsigned N = Node->getNumValues();
while (N && Node->getValueType(N - 1) == EVT::Flag)
while (N && Node->getValueType(N - 1) == MVT::Flag)
--N;
if (N && Node->getValueType(N - 1) == EVT::Other)
if (N && Node->getValueType(N - 1) == MVT::Other)
--N; // Skip over chain result.
return N;
}
@ -266,9 +266,9 @@ unsigned ScheduleDAGSDNodes::CountOperands(SDNode *Node) {
/// operand
unsigned ScheduleDAGSDNodes::ComputeMemOperandsEnd(SDNode *Node) {
unsigned N = Node->getNumOperands();
while (N && Node->getOperand(N - 1).getValueType() == EVT::Flag)
while (N && Node->getOperand(N - 1).getValueType() == MVT::Flag)
--N;
if (N && Node->getOperand(N - 1).getValueType() == EVT::Other)
if (N && Node->getOperand(N - 1).getValueType() == MVT::Other)
--N; // Ignore chain if it exists.
return N;
}

View File

@ -70,7 +70,7 @@ EmitCopyFromReg(SDNode *Node, unsigned ResNo, bool IsClone, bool IsCloned,
if (Op.getNode() != Node || Op.getResNo() != ResNo)
continue;
EVT VT = Node->getValueType(Op.getResNo());
if (VT == EVT::Other || VT == EVT::Flag)
if (VT == MVT::Other || VT == MVT::Flag)
continue;
Match = false;
if (User->isMachineOpcode()) {
@ -238,8 +238,8 @@ ScheduleDAGSDNodes::AddRegisterOperand(MachineInstr *MI, SDValue Op,
unsigned IIOpNum,
const TargetInstrDesc *II,
DenseMap<SDValue, unsigned> &VRBaseMap) {
assert(Op.getValueType() != EVT::Other &&
Op.getValueType() != EVT::Flag &&
assert(Op.getValueType() != MVT::Other &&
Op.getValueType() != MVT::Flag &&
"Chain and flag operands should occur at end of operand list!");
// Get/emit the operand.
unsigned VReg = getVR(Op, VRBaseMap);
@ -322,8 +322,8 @@ void ScheduleDAGSDNodes::AddOperand(MachineInstr *MI, SDValue Op,
MI->addOperand(MachineOperand::CreateES(ES->getSymbol(), 0,
ES->getTargetFlags()));
} else {
assert(Op.getValueType() != EVT::Other &&
Op.getValueType() != EVT::Flag &&
assert(Op.getValueType() != MVT::Other &&
Op.getValueType() != MVT::Flag &&
"Chain and flag operands should occur at end of operand list!");
AddRegisterOperand(MI, Op, IIOpNum, II, VRBaseMap);
}
@ -599,7 +599,7 @@ void ScheduleDAGSDNodes::EmitNode(SDNode *Node, bool IsClone, bool IsCloned,
}
case ISD::INLINEASM: {
unsigned NumOps = Node->getNumOperands();
if (Node->getOperand(NumOps-1).getValueType() == EVT::Flag)
if (Node->getOperand(NumOps-1).getValueType() == MVT::Flag)
--NumOps; // Ignore the flag operand.
// Create the inline asm machine instruction.

View File

@ -54,13 +54,13 @@ static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
}
static const fltSemantics *EVTToAPFloatSemantics(EVT VT) {
switch (VT.getSimpleVT()) {
switch (VT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unknown FP format");
case EVT::f32: return &APFloat::IEEEsingle;
case EVT::f64: return &APFloat::IEEEdouble;
case EVT::f80: return &APFloat::x87DoubleExtended;
case EVT::f128: return &APFloat::IEEEquad;
case EVT::ppcf128: return &APFloat::PPCDoubleDouble;
case MVT::f32: return &APFloat::IEEEsingle;
case MVT::f64: return &APFloat::IEEEdouble;
case MVT::f80: return &APFloat::x87DoubleExtended;
case MVT::f128: return &APFloat::IEEEquad;
case MVT::ppcf128: return &APFloat::PPCDoubleDouble;
}
}
@ -83,7 +83,7 @@ bool ConstantFPSDNode::isValueValidForType(EVT VT,
assert(VT.isFloatingPoint() && "Can only convert between FP types");
// PPC long double cannot be converted to any other type.
if (VT == EVT::ppcf128 ||
if (VT == MVT::ppcf128 ||
&Val.getSemantics() == &APFloat::PPCDoubleDouble)
return false;
@ -503,7 +503,7 @@ encodeMemSDNodeFlags(int ConvType, ISD::MemIndexedMode AM,
/// doNotCSE - Return true if CSE should not be performed for this node.
static bool doNotCSE(SDNode *N) {
if (N->getValueType(0) == EVT::Flag)
if (N->getValueType(0) == MVT::Flag)
return true; // Never CSE anything that produces a flag.
switch (N->getOpcode()) {
@ -518,7 +518,7 @@ static bool doNotCSE(SDNode *N) {
// Check that remaining values produced are not flags.
for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
if (N->getValueType(i) == EVT::Flag)
if (N->getValueType(i) == MVT::Flag)
return true; // Never CSE anything that produces a flag.
return false;
@ -643,8 +643,8 @@ bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
if (VT.isExtended()) {
Erased = ExtendedValueTypeNodes.erase(VT);
} else {
Erased = ValueTypeNodes[VT.getSimpleVT()] != 0;
ValueTypeNodes[VT.getSimpleVT()] = 0;
Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != 0;
ValueTypeNodes[VT.getSimpleVT().SimpleTy] = 0;
}
break;
}
@ -657,7 +657,7 @@ bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
// Verify that the node was actually in one of the CSE maps, unless it has a
// flag result (which cannot be CSE'd) or is one of the special cases that are
// not subject to CSE.
if (!Erased && N->getValueType(N->getNumValues()-1) != EVT::Flag &&
if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Flag &&
!N->isMachineOpcode() && !doNotCSE(N)) {
N->dump(this);
cerr << "\n";
@ -788,7 +788,7 @@ void SelectionDAG::VerifyNode(SDNode *N) {
/// given type.
///
unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
const Type *Ty = VT == EVT::iPTR ?
const Type *Ty = VT == MVT::iPTR ?
PointerType::get(Type::Int8Ty, 0) :
VT.getTypeForEVT();
@ -799,7 +799,7 @@ unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
SelectionDAG::SelectionDAG(TargetLowering &tli, FunctionLoweringInfo &fli)
: TLI(tli), FLI(fli), DW(0),
EntryNode(ISD::EntryToken, DebugLoc::getUnknownLoc(),
getVTList(EVT::Other)), Root(getEntryNode()) {
getVTList(MVT::Other)), Root(getEntryNode()) {
AllNodes.push_back(&EntryNode);
}
@ -950,7 +950,7 @@ SDValue SelectionDAG::getConstantFP(const ConstantFP& V, EVT VT, bool isTarget){
SDValue SelectionDAG::getConstantFP(double Val, EVT VT, bool isTarget) {
EVT EltVT =
VT.isVector() ? VT.getVectorElementType() : VT;
if (EltVT==EVT::f32)
if (EltVT==MVT::f32)
return getConstantFP(APFloat((float)Val), VT, isTarget);
else
return getConstantFP(APFloat(Val), VT, isTarget);
@ -1084,7 +1084,7 @@ SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::BasicBlock, getVTList(EVT::Other), 0, 0);
AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), 0, 0);
ID.AddPointer(MBB);
void *IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
@ -1097,11 +1097,12 @@ SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
}
SDValue SelectionDAG::getValueType(EVT VT) {
if (VT.isSimple() && (unsigned)VT.getSimpleVT() >= ValueTypeNodes.size())
ValueTypeNodes.resize(VT.getSimpleVT()+1);
if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
ValueTypeNodes.size())
ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
SDNode *&N = VT.isExtended() ?
ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT()];
ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
if (N) return SDValue(N, 0);
N = NodeAllocator.Allocate<VTSDNode>();
@ -1299,7 +1300,7 @@ SDValue SelectionDAG::getLabel(unsigned Opcode, DebugLoc dl,
unsigned LabelID) {
FoldingSetNodeID ID;
SDValue Ops[] = { Root };
AddNodeIDNode(ID, Opcode, getVTList(EVT::Other), &Ops[0], 1);
AddNodeIDNode(ID, Opcode, getVTList(MVT::Other), &Ops[0], 1);
ID.AddInteger(LabelID);
void *IP = 0;
if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
@ -1316,7 +1317,7 @@ SDValue SelectionDAG::getSrcValue(const Value *V) {
"SrcValue is not a pointer?");
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(EVT::Other), 0, 0);
AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), 0, 0);
ID.AddPointer(V);
void *IP = 0;
@ -1338,7 +1339,7 @@ SDValue SelectionDAG::getMemOperand(const MachineMemOperand &MO) {
#endif
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::MEMOPERAND, getVTList(EVT::Other), 0, 0);
AddNodeIDNode(ID, ISD::MEMOPERAND, getVTList(MVT::Other), 0, 0);
MO.Profile(ID);
void *IP = 0;
@ -1439,7 +1440,7 @@ SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1,
if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1.getNode())) {
if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2.getNode())) {
// No compile time operations on this type yet.
if (N1C->getValueType(0) == EVT::ppcf128)
if (N1C->getValueType(0) == MVT::ppcf128)
return SDValue();
APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF());
@ -2277,7 +2278,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
case ISD::SINT_TO_FP: {
const uint64_t zero[] = {0, 0};
// No compile time operations on this type.
if (VT==EVT::ppcf128)
if (VT==MVT::ppcf128)
break;
APFloat apf = APFloat(APInt(BitWidth, 2, zero));
(void)apf.convertFromAPInt(Val,
@ -2286,9 +2287,9 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
return getConstantFP(apf, VT);
}
case ISD::BIT_CONVERT:
if (VT == EVT::f32 && C->getValueType(0) == EVT::i32)
if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
return getConstantFP(Val.bitsToFloat(), VT);
else if (VT == EVT::f64 && C->getValueType(0) == EVT::i64)
else if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
return getConstantFP(Val.bitsToDouble(), VT);
break;
case ISD::BSWAP:
@ -2305,7 +2306,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
// Constant fold unary operations with a floating point constant operand.
if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand.getNode())) {
APFloat V = C->getValueAPF(); // make copy
if (VT != EVT::ppcf128 && Operand.getValueType() != EVT::ppcf128) {
if (VT != MVT::ppcf128 && Operand.getValueType() != MVT::ppcf128) {
switch (Opcode) {
case ISD::FNEG:
V.changeSign();
@ -2337,9 +2338,9 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
return getConstant(api, VT);
}
case ISD::BIT_CONVERT:
if (VT == EVT::i32 && C->getValueType(0) == EVT::f32)
if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), VT);
else if (VT == EVT::i64 && C->getValueType(0) == EVT::f64)
else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
return getConstant(V.bitcastToAPInt().getZExtValue(), VT);
break;
}
@ -2450,7 +2451,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
SDNode *N;
SDVTList VTs = getVTList(VT);
if (VT != EVT::Flag) { // Don't CSE flag producing nodes
if (VT != MVT::Flag) { // Don't CSE flag producing nodes
FoldingSetNodeID ID;
SDValue Ops[1] = { Operand };
AddNodeIDNode(ID, Opcode, VTs, Ops, 1);
@ -2515,8 +2516,8 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
switch (Opcode) {
default: break;
case ISD::TokenFactor:
assert(VT == EVT::Other && N1.getValueType() == EVT::Other &&
N2.getValueType() == EVT::Other && "Invalid token factor!");
assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
N2.getValueType() == MVT::Other && "Invalid token factor!");
// Fold trivial token factors.
if (N1.getOpcode() == ISD::EntryToken) return N2;
if (N2.getOpcode() == ISD::EntryToken) return N1;
@ -2606,7 +2607,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
// Always fold shifts of i1 values so the code generator doesn't need to
// handle them. Since we know the size of the shift has to be less than the
// size of the value, the shift/rotate count is guaranteed to be zero.
if (VT == EVT::i1)
if (VT == MVT::i1)
return N1;
break;
case ISD::FP_ROUND_INREG: {
@ -2747,7 +2748,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
// Cannonicalize constant to RHS if commutative
std::swap(N1CFP, N2CFP);
std::swap(N1, N2);
} else if (N2CFP && VT != EVT::ppcf128) {
} else if (N2CFP && VT != MVT::ppcf128) {
APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF();
APFloat::opStatus s;
switch (Opcode) {
@ -2862,7 +2863,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
// Memoize this node if possible.
SDNode *N;
SDVTList VTs = getVTList(VT);
if (VT != EVT::Flag) {
if (VT != MVT::Flag) {
SDValue Ops[] = { N1, N2 };
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opcode, VTs, Ops, 2);
@ -2921,7 +2922,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
case ISD::BRCOND:
if (N2C) {
if (N2C->getZExtValue()) // Unconditional branch
return getNode(ISD::BR, DL, EVT::Other, N1, N3);
return getNode(ISD::BR, DL, MVT::Other, N1, N3);
else
return N1; // Never-taken branch
}
@ -2939,7 +2940,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
// Memoize node if it doesn't produce a flag.
SDNode *N;
SDVTList VTs = getVTList(VT);
if (VT != EVT::Flag) {
if (VT != MVT::Flag) {
SDValue Ops[] = { N1, N2, N3 };
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opcode, VTs, Ops, 3);
@ -2993,7 +2994,7 @@ SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
ArgChains.push_back(SDValue(L, 1));
// Build a tokenfactor for all the chains.
return getNode(ISD::TokenFactor, Chain.getDebugLoc(), EVT::Other,
return getNode(ISD::TokenFactor, Chain.getDebugLoc(), MVT::Other,
&ArgChains[0], ArgChains.size());
}
@ -3041,7 +3042,7 @@ static SDValue getMemsetStringVal(EVT VT, DebugLoc dl, SelectionDAG &DAG,
if (VT.isInteger())
return DAG.getConstant(0, VT);
unsigned NumElts = VT.getVectorNumElements();
EVT EltVT = (VT.getVectorElementType() == EVT::f32) ? EVT::i32 : EVT::i64;
EVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
return DAG.getNode(ISD::BIT_CONVERT, dl, VT,
DAG.getConstant(0, EVT::getVectorVT(EltVT, NumElts)));
}
@ -3105,7 +3106,7 @@ bool MeetsMaxMemopRequirement(std::vector<EVT> &MemOps,
bool isSrcConst = isa<ConstantSDNode>(Src);
bool AllowUnalign = TLI.allowsUnalignedMemoryAccesses();
EVT VT = TLI.getOptimalMemOpType(Size, Align, isSrcConst, isSrcStr, DAG);
if (VT != EVT::iAny) {
if (VT != MVT::iAny) {
unsigned NewAlign = (unsigned)
TLI.getTargetData()->getABITypeAlignment(VT.getTypeForEVT());
// If source is a string constant, this will require an unaligned load.
@ -3113,14 +3114,14 @@ bool MeetsMaxMemopRequirement(std::vector<EVT> &MemOps,
if (Dst.getOpcode() != ISD::FrameIndex) {
// Can't change destination alignment. It requires a unaligned store.
if (AllowUnalign)
VT = EVT::iAny;
VT = MVT::iAny;
} else {
int FI = cast<FrameIndexSDNode>(Dst)->getIndex();
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
if (MFI->isFixedObjectIndex(FI)) {
// Can't change destination alignment. It requires a unaligned store.
if (AllowUnalign)
VT = EVT::iAny;
VT = MVT::iAny;
} else {
// Give the stack frame object a larger alignment if needed.
if (MFI->getObjectAlignment(FI) < NewAlign)
@ -3131,21 +3132,21 @@ bool MeetsMaxMemopRequirement(std::vector<EVT> &MemOps,
}
}
if (VT == EVT::iAny) {
if (VT == MVT::iAny) {
if (AllowUnalign) {
VT = EVT::i64;
VT = MVT::i64;
} else {
switch (Align & 7) {
case 0: VT = EVT::i64; break;
case 4: VT = EVT::i32; break;
case 2: VT = EVT::i16; break;
default: VT = EVT::i8; break;
case 0: VT = MVT::i64; break;
case 4: VT = MVT::i32; break;
case 2: VT = MVT::i16; break;
default: VT = MVT::i8; break;
}
}
EVT LVT = EVT::i64;
EVT LVT = MVT::i64;
while (!TLI.isTypeLegal(LVT))
LVT = (EVT::SimpleValueType)(LVT.getSimpleVT() - 1);
LVT = (MVT::SimpleValueType)(LVT.getSimpleVT().SimpleTy - 1);
assert(LVT.isInteger());
if (VT.bitsGT(LVT))
@ -3158,14 +3159,14 @@ bool MeetsMaxMemopRequirement(std::vector<EVT> &MemOps,
while (VTSize > Size) {
// For now, only use non-vector load / store's for the left-over pieces.
if (VT.isVector()) {
VT = EVT::i64;
VT = MVT::i64;
while (!TLI.isTypeLegal(VT))
VT = (EVT::SimpleValueType)(VT.getSimpleVT() - 1);
VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1);
VTSize = VT.getSizeInBits() / 8;
} else {
// This can result in a type that is not legal on the target, e.g.
// 1 or 2 bytes on PPC.
VT = (EVT::SimpleValueType)(VT.getSimpleVT() - 1);
VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1);
VTSize >>= 1;
}
}
@ -3240,7 +3241,7 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
DstOff += VTSize;
}
return DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&OutChains[0], OutChains.size());
}
@ -3283,7 +3284,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
LoadChains.push_back(Value.getValue(1));
SrcOff += VTSize;
}
Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&LoadChains[0], LoadChains.size());
OutChains.clear();
for (unsigned i = 0; i < NumMemOps; i++) {
@ -3298,7 +3299,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
DstOff += VTSize;
}
return DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&OutChains[0], OutChains.size());
}
@ -3333,7 +3334,7 @@ static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl,
DstOff += VTSize;
}
return DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&OutChains[0], OutChains.size());
}
@ -3478,10 +3479,10 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
Entry.Node = Dst; Entry.Ty = IntPtrTy;
Args.push_back(Entry);
// Extend or truncate the argument to be an i32 value for the call.
if (Src.getValueType().bitsGT(EVT::i32))
Src = getNode(ISD::TRUNCATE, dl, EVT::i32, Src);
if (Src.getValueType().bitsGT(MVT::i32))
Src = getNode(ISD::TRUNCATE, dl, MVT::i32, Src);
else
Src = getNode(ISD::ZERO_EXTEND, dl, EVT::i32, Src);
Src = getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Src);
Entry.Node = Src; Entry.Ty = Type::Int32Ty; Entry.isSExt = true;
Args.push_back(Entry);
Entry.Node = Size; Entry.Ty = IntPtrTy; Entry.isSExt = false;
@ -3510,7 +3511,7 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = getEVTAlignment(MemVT);
SDVTList VTs = getVTList(VT, EVT::Other);
SDVTList VTs = getVTList(VT, MVT::Other);
FoldingSetNodeID ID;
ID.AddInteger(MemVT.getRawBits());
SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
@ -3549,7 +3550,7 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = getEVTAlignment(MemVT);
SDVTList VTs = getVTList(VT, EVT::Other);
SDVTList VTs = getVTList(VT, MVT::Other);
FoldingSetNodeID ID;
ID.AddInteger(MemVT.getRawBits());
SDValue Ops[] = {Chain, Ptr, Val};
@ -3600,7 +3601,7 @@ SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList,
bool ReadMem, bool WriteMem) {
// Memoize the node unless it returns a flag.
MemIntrinsicSDNode *N;
if (VTList.VTs[VTList.NumVTs-1] != EVT::Flag) {
if (VTList.VTs[VTList.NumVTs-1] != MVT::Flag) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
void *IP = 0;
@ -3652,7 +3653,7 @@ SelectionDAG::getLoad(ISD::MemIndexedMode AM, DebugLoc dl,
"Unindexed load with an offset!");
SDVTList VTs = Indexed ?
getVTList(VT, Ptr.getValueType(), EVT::Other) : getVTList(VT, EVT::Other);
getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
SDValue Ops[] = { Chain, Ptr, Offset };
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::LOAD, VTs, Ops, 3);
@ -3708,7 +3709,7 @@ SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val,
if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = getEVTAlignment(VT);
SDVTList VTs = getVTList(EVT::Other);
SDVTList VTs = getVTList(MVT::Other);
SDValue Undef = getUNDEF(Ptr.getValueType());
SDValue Ops[] = { Chain, Val, Ptr, Undef };
FoldingSetNodeID ID;
@ -3743,7 +3744,7 @@ SDValue SelectionDAG::getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val,
if (Alignment == 0) // Ensure that codegen never sees alignment 0
Alignment = getEVTAlignment(VT);
SDVTList VTs = getVTList(EVT::Other);
SDVTList VTs = getVTList(MVT::Other);
SDValue Undef = getUNDEF(Ptr.getValueType());
SDValue Ops[] = { Chain, Val, Ptr, Undef };
FoldingSetNodeID ID;
@ -3768,7 +3769,7 @@ SelectionDAG::getIndexedStore(SDValue OrigStore, DebugLoc dl, SDValue Base,
StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
assert(ST->getOffset().getOpcode() == ISD::UNDEF &&
"Store is already a indexed store!");
SDVTList VTs = getVTList(Base.getValueType(), EVT::Other);
SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
FoldingSetNodeID ID;
AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
@ -3791,7 +3792,7 @@ SDValue SelectionDAG::getVAArg(EVT VT, DebugLoc dl,
SDValue Chain, SDValue Ptr,
SDValue SV) {
SDValue Ops[] = { Chain, Ptr, SV };
return getNode(ISD::VAARG, dl, getVTList(VT, EVT::Other), Ops, 3);
return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops, 3);
}
SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
@ -3844,7 +3845,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
SDNode *N;
SDVTList VTs = getVTList(VT);
if (VT != EVT::Flag) {
if (VT != MVT::Flag) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opcode, VTs, Ops, NumOps);
void *IP = 0;
@ -3896,7 +3897,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
case ISD::SRL_PARTS:
case ISD::SHL_PARTS:
if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
cast<VTSDNode>(N3.getOperand(1))->getVT() != EVT::i1)
cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
else if (N3.getOpcode() == ISD::AND)
if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
@ -3912,7 +3913,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
// Memoize the node unless it returns a flag.
SDNode *N;
if (VTList.VTs[VTList.NumVTs-1] != EVT::Flag) {
if (VTList.VTs[VTList.NumVTs-1] != MVT::Flag) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
void *IP = 0;
@ -4407,7 +4408,7 @@ SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
unsigned NumOps) {
// If an identical node already exists, use it.
void *IP = 0;
if (VTs.VTs[VTs.NumVTs-1] != EVT::Flag) {
if (VTs.VTs[VTs.NumVTs-1] != MVT::Flag) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opc, VTs, Ops, NumOps);
if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP))
@ -4581,7 +4582,7 @@ SDNode *SelectionDAG::getTargetNode(unsigned Opcode, DebugLoc dl,
/// else return NULL.
SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
const SDValue *Ops, unsigned NumOps) {
if (VTList.VTs[VTList.NumVTs-1] != EVT::Flag) {
if (VTList.VTs[VTList.NumVTs-1] != MVT::Flag) {
FoldingSetNodeID ID;
AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
void *IP = 0;
@ -4996,7 +4997,7 @@ void SDNode::Profile(FoldingSetNodeID &ID) const {
}
static ManagedStatic<std::set<EVT, EVT::compareRawBits> > EVTs;
static EVT VTs[EVT::LAST_VALUETYPE];
static EVT VTs[MVT::LAST_VALUETYPE];
static ManagedStatic<sys::SmartMutex<true> > VTMutex;
/// getValueTypeList - Return a pointer to the specified value type.
@ -5006,8 +5007,8 @@ const EVT *SDNode::getValueTypeList(EVT VT) {
if (VT.isExtended()) {
return &(*EVTs->insert(VT).first);
} else {
VTs[VT.getSimpleVT()] = VT;
return &VTs[VT.getSimpleVT()];
VTs[VT.getSimpleVT().SimpleTy] = VT;
return &VTs[VT.getSimpleVT().SimpleTy];
}
}
@ -5449,7 +5450,7 @@ void SDNode::print_types(raw_ostream &OS, const SelectionDAG *G) const {
for (unsigned i = 0, e = getNumValues(); i != e; ++i) {
if (i) OS << ",";
if (getValueType(i) == EVT::Other)
if (getValueType(i) == MVT::Other)
OS << "ch";
else
OS << getValueType(i).getEVTString();

File diff suppressed because it is too large Load Diff

View File

@ -421,7 +421,7 @@ void SelectionDAGISel::ComputeLiveOutVRegInfo() {
// Otherwise, add all chain operands to the worklist.
for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
if (N->getOperand(i).getValueType() == EVT::Other)
if (N->getOperand(i).getValueType() == MVT::Other)
Worklist.push_back(N->getOperand(i).getNode());
// If this is a CopyToReg with a vreg dest, process it.
@ -1098,7 +1098,7 @@ SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops) {
Ops.push_back(InOps[1]); // input asm string.
unsigned i = 2, e = InOps.size();
if (InOps[e-1].getValueType() == EVT::Flag)
if (InOps[e-1].getValueType() == MVT::Flag)
--e; // Don't process a flag operand if it is here.
while (i != e) {
@ -1236,7 +1236,7 @@ bool SelectionDAGISel::IsLegalAndProfitableToFold(SDNode *N, SDNode *U,
// a cycle in the scheduling graph.
EVT VT = Root->getValueType(Root->getNumValues()-1);
while (VT == EVT::Flag) {
while (VT == MVT::Flag) {
SDNode *FU = findFlagUse(Root);
if (FU == NULL)
break;

View File

@ -85,9 +85,9 @@ namespace llvm {
static std::string getEdgeAttributes(const void *Node, EdgeIter EI) {
SDValue Op = EI.getNode()->getOperand(EI.getOperand());
EVT VT = Op.getValueType();
if (VT == EVT::Flag)
if (VT == MVT::Flag)
return "color=red,style=bold";
else if (VT == EVT::Other)
else if (VT == MVT::Other)
return "color=blue,style=dashed";
return "";
}

View File

@ -250,8 +250,8 @@ static void InitLibcallNames(const char **Names) {
/// getFPEXT - Return the FPEXT_*_* value for the given types, or
/// UNKNOWN_LIBCALL if there is none.
RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) {
if (OpVT == EVT::f32) {
if (RetVT == EVT::f64)
if (OpVT == MVT::f32) {
if (RetVT == MVT::f64)
return FPEXT_F32_F64;
}
return UNKNOWN_LIBCALL;
@ -260,17 +260,17 @@ RTLIB::Libcall RTLIB::getFPEXT(EVT OpVT, EVT RetVT) {
/// getFPROUND - Return the FPROUND_*_* value for the given types, or
/// UNKNOWN_LIBCALL if there is none.
RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) {
if (RetVT == EVT::f32) {
if (OpVT == EVT::f64)
if (RetVT == MVT::f32) {
if (OpVT == MVT::f64)
return FPROUND_F64_F32;
if (OpVT == EVT::f80)
if (OpVT == MVT::f80)
return FPROUND_F80_F32;
if (OpVT == EVT::ppcf128)
if (OpVT == MVT::ppcf128)
return FPROUND_PPCF128_F32;
} else if (RetVT == EVT::f64) {
if (OpVT == EVT::f80)
} else if (RetVT == MVT::f64) {
if (OpVT == MVT::f80)
return FPROUND_F80_F64;
if (OpVT == EVT::ppcf128)
if (OpVT == MVT::ppcf128)
return FPROUND_PPCF128_F64;
}
return UNKNOWN_LIBCALL;
@ -279,37 +279,37 @@ RTLIB::Libcall RTLIB::getFPROUND(EVT OpVT, EVT RetVT) {
/// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
/// UNKNOWN_LIBCALL if there is none.
RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) {
if (OpVT == EVT::f32) {
if (RetVT == EVT::i8)
if (OpVT == MVT::f32) {
if (RetVT == MVT::i8)
return FPTOSINT_F32_I8;
if (RetVT == EVT::i16)
if (RetVT == MVT::i16)
return FPTOSINT_F32_I16;
if (RetVT == EVT::i32)
if (RetVT == MVT::i32)
return FPTOSINT_F32_I32;
if (RetVT == EVT::i64)
if (RetVT == MVT::i64)
return FPTOSINT_F32_I64;
if (RetVT == EVT::i128)
if (RetVT == MVT::i128)
return FPTOSINT_F32_I128;
} else if (OpVT == EVT::f64) {
if (RetVT == EVT::i32)
} else if (OpVT == MVT::f64) {
if (RetVT == MVT::i32)
return FPTOSINT_F64_I32;
if (RetVT == EVT::i64)
if (RetVT == MVT::i64)
return FPTOSINT_F64_I64;
if (RetVT == EVT::i128)
if (RetVT == MVT::i128)
return FPTOSINT_F64_I128;
} else if (OpVT == EVT::f80) {
if (RetVT == EVT::i32)
} else if (OpVT == MVT::f80) {
if (RetVT == MVT::i32)
return FPTOSINT_F80_I32;
if (RetVT == EVT::i64)
if (RetVT == MVT::i64)
return FPTOSINT_F80_I64;
if (RetVT == EVT::i128)
if (RetVT == MVT::i128)
return FPTOSINT_F80_I128;
} else if (OpVT == EVT::ppcf128) {
if (RetVT == EVT::i32)
} else if (OpVT == MVT::ppcf128) {
if (RetVT == MVT::i32)
return FPTOSINT_PPCF128_I32;
if (RetVT == EVT::i64)
if (RetVT == MVT::i64)
return FPTOSINT_PPCF128_I64;
if (RetVT == EVT::i128)
if (RetVT == MVT::i128)
return FPTOSINT_PPCF128_I128;
}
return UNKNOWN_LIBCALL;
@ -318,37 +318,37 @@ RTLIB::Libcall RTLIB::getFPTOSINT(EVT OpVT, EVT RetVT) {
/// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
/// UNKNOWN_LIBCALL if there is none.
RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) {
if (OpVT == EVT::f32) {
if (RetVT == EVT::i8)
if (OpVT == MVT::f32) {
if (RetVT == MVT::i8)
return FPTOUINT_F32_I8;
if (RetVT == EVT::i16)
if (RetVT == MVT::i16)
return FPTOUINT_F32_I16;
if (RetVT == EVT::i32)
if (RetVT == MVT::i32)
return FPTOUINT_F32_I32;
if (RetVT == EVT::i64)
if (RetVT == MVT::i64)
return FPTOUINT_F32_I64;
if (RetVT == EVT::i128)
if (RetVT == MVT::i128)
return FPTOUINT_F32_I128;
} else if (OpVT == EVT::f64) {
if (RetVT == EVT::i32)
} else if (OpVT == MVT::f64) {
if (RetVT == MVT::i32)
return FPTOUINT_F64_I32;
if (RetVT == EVT::i64)
if (RetVT == MVT::i64)
return FPTOUINT_F64_I64;
if (RetVT == EVT::i128)
if (RetVT == MVT::i128)
return FPTOUINT_F64_I128;
} else if (OpVT == EVT::f80) {
if (RetVT == EVT::i32)
} else if (OpVT == MVT::f80) {
if (RetVT == MVT::i32)
return FPTOUINT_F80_I32;
if (RetVT == EVT::i64)
if (RetVT == MVT::i64)
return FPTOUINT_F80_I64;
if (RetVT == EVT::i128)
if (RetVT == MVT::i128)
return FPTOUINT_F80_I128;
} else if (OpVT == EVT::ppcf128) {
if (RetVT == EVT::i32)
} else if (OpVT == MVT::ppcf128) {
if (RetVT == MVT::i32)
return FPTOUINT_PPCF128_I32;
if (RetVT == EVT::i64)
if (RetVT == MVT::i64)
return FPTOUINT_PPCF128_I64;
if (RetVT == EVT::i128)
if (RetVT == MVT::i128)
return FPTOUINT_PPCF128_I128;
}
return UNKNOWN_LIBCALL;
@ -357,32 +357,32 @@ RTLIB::Libcall RTLIB::getFPTOUINT(EVT OpVT, EVT RetVT) {
/// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
/// UNKNOWN_LIBCALL if there is none.
RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) {
if (OpVT == EVT::i32) {
if (RetVT == EVT::f32)
if (OpVT == MVT::i32) {
if (RetVT == MVT::f32)
return SINTTOFP_I32_F32;
else if (RetVT == EVT::f64)
else if (RetVT == MVT::f64)
return SINTTOFP_I32_F64;
else if (RetVT == EVT::f80)
else if (RetVT == MVT::f80)
return SINTTOFP_I32_F80;
else if (RetVT == EVT::ppcf128)
else if (RetVT == MVT::ppcf128)
return SINTTOFP_I32_PPCF128;
} else if (OpVT == EVT::i64) {
if (RetVT == EVT::f32)
} else if (OpVT == MVT::i64) {
if (RetVT == MVT::f32)
return SINTTOFP_I64_F32;
else if (RetVT == EVT::f64)
else if (RetVT == MVT::f64)
return SINTTOFP_I64_F64;
else if (RetVT == EVT::f80)
else if (RetVT == MVT::f80)
return SINTTOFP_I64_F80;
else if (RetVT == EVT::ppcf128)
else if (RetVT == MVT::ppcf128)
return SINTTOFP_I64_PPCF128;
} else if (OpVT == EVT::i128) {
if (RetVT == EVT::f32)
} else if (OpVT == MVT::i128) {
if (RetVT == MVT::f32)
return SINTTOFP_I128_F32;
else if (RetVT == EVT::f64)
else if (RetVT == MVT::f64)
return SINTTOFP_I128_F64;
else if (RetVT == EVT::f80)
else if (RetVT == MVT::f80)
return SINTTOFP_I128_F80;
else if (RetVT == EVT::ppcf128)
else if (RetVT == MVT::ppcf128)
return SINTTOFP_I128_PPCF128;
}
return UNKNOWN_LIBCALL;
@ -391,32 +391,32 @@ RTLIB::Libcall RTLIB::getSINTTOFP(EVT OpVT, EVT RetVT) {
/// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
/// UNKNOWN_LIBCALL if there is none.
RTLIB::Libcall RTLIB::getUINTTOFP(EVT OpVT, EVT RetVT) {
if (OpVT == EVT::i32) {
if (RetVT == EVT::f32)
if (OpVT == MVT::i32) {
if (RetVT == MVT::f32)
return UINTTOFP_I32_F32;
else if (RetVT == EVT::f64)
else if (RetVT == MVT::f64)
return UINTTOFP_I32_F64;
else if (RetVT == EVT::f80)
else if (RetVT == MVT::f80)
return UINTTOFP_I32_F80;
else if (RetVT == EVT::ppcf128)
else if (RetVT == MVT::ppcf128)
return UINTTOFP_I32_PPCF128;
} else if (OpVT == EVT::i64) {
if (RetVT == EVT::f32)
} else if (OpVT == MVT::i64) {
if (RetVT == MVT::f32)
return UINTTOFP_I64_F32;
else if (RetVT == EVT::f64)
else if (RetVT == MVT::f64)
return UINTTOFP_I64_F64;
else if (RetVT == EVT::f80)
else if (RetVT == MVT::f80)
return UINTTOFP_I64_F80;
else if (RetVT == EVT::ppcf128)
else if (RetVT == MVT::ppcf128)
return UINTTOFP_I64_PPCF128;
} else if (OpVT == EVT::i128) {
if (RetVT == EVT::f32)
} else if (OpVT == MVT::i128) {
if (RetVT == MVT::f32)
return UINTTOFP_I128_F32;
else if (RetVT == EVT::f64)
else if (RetVT == MVT::f64)
return UINTTOFP_I128_F64;
else if (RetVT == EVT::f80)
else if (RetVT == MVT::f80)
return UINTTOFP_I128_F80;
else if (RetVT == EVT::ppcf128)
else if (RetVT == MVT::ppcf128)
return UINTTOFP_I128_PPCF128;
}
return UNKNOWN_LIBCALL;
@ -456,48 +456,49 @@ TargetLowering::TargetLowering(TargetMachine &tm,TargetLoweringObjectFile *tlof)
memset(CondCodeActions, 0, sizeof(CondCodeActions));
// Set default actions for various operations.
for (unsigned VT = 0; VT != (unsigned)EVT::LAST_VALUETYPE; ++VT) {
for (unsigned VT = 0; VT != (unsigned)MVT::LAST_VALUETYPE; ++VT) {
// Default all indexed load / store to expand.
for (unsigned IM = (unsigned)ISD::PRE_INC;
IM != (unsigned)ISD::LAST_INDEXED_MODE; ++IM) {
setIndexedLoadAction(IM, (EVT::SimpleValueType)VT, Expand);
setIndexedStoreAction(IM, (EVT::SimpleValueType)VT, Expand);
setIndexedLoadAction(IM, (MVT::SimpleValueType)VT, Expand);
setIndexedStoreAction(IM, (MVT::SimpleValueType)VT, Expand);
}
// These operations default to expand.
setOperationAction(ISD::FGETSIGN, (EVT::SimpleValueType)VT, Expand);
setOperationAction(ISD::CONCAT_VECTORS, (EVT::SimpleValueType)VT, Expand);
setOperationAction(ISD::FGETSIGN, (MVT::SimpleValueType)VT, Expand);
setOperationAction(ISD::CONCAT_VECTORS, (MVT::SimpleValueType)VT, Expand);
}
// Most targets ignore the @llvm.prefetch intrinsic.
setOperationAction(ISD::PREFETCH, EVT::Other, Expand);
setOperationAction(ISD::PREFETCH, MVT::Other, Expand);
// ConstantFP nodes default to expand. Targets can either change this to
// Legal, in which case all fp constants are legal, or use addLegalFPImmediate
// to optimize expansions for certain constants.
setOperationAction(ISD::ConstantFP, EVT::f32, Expand);
setOperationAction(ISD::ConstantFP, EVT::f64, Expand);
setOperationAction(ISD::ConstantFP, EVT::f80, Expand);
setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
setOperationAction(ISD::ConstantFP, MVT::f80, Expand);
// These library functions default to expand.
setOperationAction(ISD::FLOG , EVT::f64, Expand);
setOperationAction(ISD::FLOG2, EVT::f64, Expand);
setOperationAction(ISD::FLOG10,EVT::f64, Expand);
setOperationAction(ISD::FEXP , EVT::f64, Expand);
setOperationAction(ISD::FEXP2, EVT::f64, Expand);
setOperationAction(ISD::FLOG , EVT::f32, Expand);
setOperationAction(ISD::FLOG2, EVT::f32, Expand);
setOperationAction(ISD::FLOG10,EVT::f32, Expand);
setOperationAction(ISD::FEXP , EVT::f32, Expand);
setOperationAction(ISD::FEXP2, EVT::f32, Expand);
setOperationAction(ISD::FLOG , MVT::f64, Expand);
setOperationAction(ISD::FLOG2, MVT::f64, Expand);
setOperationAction(ISD::FLOG10,MVT::f64, Expand);
setOperationAction(ISD::FEXP , MVT::f64, Expand);
setOperationAction(ISD::FEXP2, MVT::f64, Expand);
setOperationAction(ISD::FLOG , MVT::f32, Expand);
setOperationAction(ISD::FLOG2, MVT::f32, Expand);
setOperationAction(ISD::FLOG10,MVT::f32, Expand);
setOperationAction(ISD::FEXP , MVT::f32, Expand);
setOperationAction(ISD::FEXP2, MVT::f32, Expand);
// Default ISD::TRAP to expand (which turns it into abort).
setOperationAction(ISD::TRAP, EVT::Other, Expand);
setOperationAction(ISD::TRAP, MVT::Other, Expand);
IsLittleEndian = TD->isLittleEndian();
UsesGlobalOffsetTable = false;
ShiftAmountTy = PointerTy = getValueType(TD->getIntPtrType()).getSimpleVT();
memset(RegClassForVT, 0,EVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*));
ShiftAmountTy = PointerTy =
getValueType(TD->getIntPtrType()).getSimpleVT().SimpleTy;
memset(RegClassForVT, 0,MVT::LAST_VALUETYPE*sizeof(TargetRegisterClass*));
memset(TargetDAGCombineArray, 0, array_lengthof(TargetDAGCombineArray));
maxStoresPerMemset = maxStoresPerMemcpy = maxStoresPerMemmove = 8;
allowUnalignedMemoryAccesses = false;
@ -524,7 +525,7 @@ TargetLowering::TargetLowering(TargetMachine &tm,TargetLoweringObjectFile *tlof)
// Tell Legalize whether the assembler supports DEBUG_LOC.
const TargetAsmInfo *TASM = TM.getTargetAsmInfo();
if (!TASM || !TASM->hasDotLocAndDotFile())
setOperationAction(ISD::DEBUG_LOC, EVT::Other, Expand);
setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
}
TargetLowering::~TargetLowering() {
@ -534,31 +535,31 @@ TargetLowering::~TargetLowering() {
/// computeRegisterProperties - Once all of the register classes are added,
/// this allows us to compute derived properties we expose.
void TargetLowering::computeRegisterProperties() {
assert(EVT::LAST_VALUETYPE <= EVT::MAX_ALLOWED_VALUETYPE &&
assert(MVT::LAST_VALUETYPE <= MVT::MAX_ALLOWED_VALUETYPE &&
"Too many value types for ValueTypeActions to hold!");
// Everything defaults to needing one register.
for (unsigned i = 0; i != EVT::LAST_VALUETYPE; ++i) {
for (unsigned i = 0; i != MVT::LAST_VALUETYPE; ++i) {
NumRegistersForVT[i] = 1;
RegisterTypeForVT[i] = TransformToType[i] = (EVT::SimpleValueType)i;
RegisterTypeForVT[i] = TransformToType[i] = (MVT::SimpleValueType)i;
}
// ...except isVoid, which doesn't need any registers.
NumRegistersForVT[EVT::isVoid] = 0;
NumRegistersForVT[MVT::isVoid] = 0;
// Find the largest integer register class.
unsigned LargestIntReg = EVT::LAST_INTEGER_VALUETYPE;
unsigned LargestIntReg = MVT::LAST_INTEGER_VALUETYPE;
for (; RegClassForVT[LargestIntReg] == 0; --LargestIntReg)
assert(LargestIntReg != EVT::i1 && "No integer registers defined!");
assert(LargestIntReg != MVT::i1 && "No integer registers defined!");
// Every integer value type larger than this largest register takes twice as
// many registers to represent as the previous ValueType.
for (unsigned ExpandedReg = LargestIntReg + 1; ; ++ExpandedReg) {
EVT EVT = (EVT::SimpleValueType)ExpandedReg;
EVT EVT = (MVT::SimpleValueType)ExpandedReg;
if (!EVT.isInteger())
break;
NumRegistersForVT[ExpandedReg] = 2*NumRegistersForVT[ExpandedReg-1];
RegisterTypeForVT[ExpandedReg] = (EVT::SimpleValueType)LargestIntReg;
TransformToType[ExpandedReg] = (EVT::SimpleValueType)(ExpandedReg - 1);
RegisterTypeForVT[ExpandedReg] = (MVT::SimpleValueType)LargestIntReg;
TransformToType[ExpandedReg] = (MVT::SimpleValueType)(ExpandedReg - 1);
ValueTypeActions.setTypeAction(EVT, Expand);
}
@ -566,54 +567,54 @@ void TargetLowering::computeRegisterProperties() {
// register to see which ones need promotion.
unsigned LegalIntReg = LargestIntReg;
for (unsigned IntReg = LargestIntReg - 1;
IntReg >= (unsigned)EVT::i1; --IntReg) {
EVT IVT = (EVT::SimpleValueType)IntReg;
IntReg >= (unsigned)MVT::i1; --IntReg) {
EVT IVT = (MVT::SimpleValueType)IntReg;
if (isTypeLegal(IVT)) {
LegalIntReg = IntReg;
} else {
RegisterTypeForVT[IntReg] = TransformToType[IntReg] =
(EVT::SimpleValueType)LegalIntReg;
(MVT::SimpleValueType)LegalIntReg;
ValueTypeActions.setTypeAction(IVT, Promote);
}
}
// ppcf128 type is really two f64's.
if (!isTypeLegal(EVT::ppcf128)) {
NumRegistersForVT[EVT::ppcf128] = 2*NumRegistersForVT[EVT::f64];
RegisterTypeForVT[EVT::ppcf128] = EVT::f64;
TransformToType[EVT::ppcf128] = EVT::f64;
ValueTypeActions.setTypeAction(EVT::ppcf128, Expand);
if (!isTypeLegal(MVT::ppcf128)) {
NumRegistersForVT[MVT::ppcf128] = 2*NumRegistersForVT[MVT::f64];
RegisterTypeForVT[MVT::ppcf128] = MVT::f64;
TransformToType[MVT::ppcf128] = MVT::f64;
ValueTypeActions.setTypeAction(MVT::ppcf128, Expand);
}
// Decide how to handle f64. If the target does not have native f64 support,
// expand it to i64 and we will be generating soft float library calls.
if (!isTypeLegal(EVT::f64)) {
NumRegistersForVT[EVT::f64] = NumRegistersForVT[EVT::i64];
RegisterTypeForVT[EVT::f64] = RegisterTypeForVT[EVT::i64];
TransformToType[EVT::f64] = EVT::i64;
ValueTypeActions.setTypeAction(EVT::f64, Expand);
if (!isTypeLegal(MVT::f64)) {
NumRegistersForVT[MVT::f64] = NumRegistersForVT[MVT::i64];
RegisterTypeForVT[MVT::f64] = RegisterTypeForVT[MVT::i64];
TransformToType[MVT::f64] = MVT::i64;
ValueTypeActions.setTypeAction(MVT::f64, Expand);
}
// Decide how to handle f32. If the target does not have native support for
// f32, promote it to f64 if it is legal. Otherwise, expand it to i32.
if (!isTypeLegal(EVT::f32)) {
if (isTypeLegal(EVT::f64)) {
NumRegistersForVT[EVT::f32] = NumRegistersForVT[EVT::f64];
RegisterTypeForVT[EVT::f32] = RegisterTypeForVT[EVT::f64];
TransformToType[EVT::f32] = EVT::f64;
ValueTypeActions.setTypeAction(EVT::f32, Promote);
if (!isTypeLegal(MVT::f32)) {
if (isTypeLegal(MVT::f64)) {
NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::f64];
RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::f64];
TransformToType[MVT::f32] = MVT::f64;
ValueTypeActions.setTypeAction(MVT::f32, Promote);
} else {
NumRegistersForVT[EVT::f32] = NumRegistersForVT[EVT::i32];
RegisterTypeForVT[EVT::f32] = RegisterTypeForVT[EVT::i32];
TransformToType[EVT::f32] = EVT::i32;
ValueTypeActions.setTypeAction(EVT::f32, Expand);
NumRegistersForVT[MVT::f32] = NumRegistersForVT[MVT::i32];
RegisterTypeForVT[MVT::f32] = RegisterTypeForVT[MVT::i32];
TransformToType[MVT::f32] = MVT::i32;
ValueTypeActions.setTypeAction(MVT::f32, Expand);
}
}
// Loop over all of the vector value types to see which need transformations.
for (unsigned i = EVT::FIRST_VECTOR_VALUETYPE;
i <= (unsigned)EVT::LAST_VECTOR_VALUETYPE; ++i) {
EVT VT = (EVT::SimpleValueType)i;
for (unsigned i = MVT::FIRST_VECTOR_VALUETYPE;
i <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++i) {
EVT VT = (MVT::SimpleValueType)i;
if (!isTypeLegal(VT)) {
EVT IntermediateVT, RegisterVT;
unsigned NumIntermediates;
@ -627,8 +628,8 @@ void TargetLowering::computeRegisterProperties() {
bool IsLegalWiderType = false;
EVT EltVT = VT.getVectorElementType();
unsigned NElts = VT.getVectorNumElements();
for (unsigned nVT = i+1; nVT <= EVT::LAST_VECTOR_VALUETYPE; ++nVT) {
EVT SVT = (EVT::SimpleValueType)nVT;
for (unsigned nVT = i+1; nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
EVT SVT = (MVT::SimpleValueType)nVT;
if (isTypeLegal(SVT) && SVT.getVectorElementType() == EltVT &&
SVT.getVectorNumElements() > NElts) {
TransformToType[i] = SVT;
@ -641,7 +642,7 @@ void TargetLowering::computeRegisterProperties() {
EVT NVT = VT.getPow2VectorType();
if (NVT == VT) {
// Type is already a power of 2. The default action is to split.
TransformToType[i] = EVT::Other;
TransformToType[i] = MVT::Other;
ValueTypeActions.setTypeAction(VT, Expand);
} else {
TransformToType[i] = NVT;
@ -657,15 +658,15 @@ const char *TargetLowering::getTargetNodeName(unsigned Opcode) const {
}
EVT::SimpleValueType TargetLowering::getSetCCResultType(EVT VT) const {
return getValueType(TD->getIntPtrType()).getSimpleVT();
MVT::SimpleValueType TargetLowering::getSetCCResultType(EVT VT) const {
return getValueType(TD->getIntPtrType()).getSimpleVT().SimpleTy;
}
/// getVectorTypeBreakdown - Vector types are broken down into some number of
/// legal first class types. For example, EVT::v8f32 maps to 2 EVT::v4f32
/// with Altivec or SSE1, or 8 promoted EVT::f64 values with the X86 FP stack.
/// Similarly, EVT::v2i64 turns into 4 EVT::i32 values with both PPC and X86.
/// legal first class types. For example, MVT::v8f32 maps to 2 MVT::v4f32
/// with Altivec or SSE1, or 8 promoted MVT::f64 values with the X86 FP stack.
/// Similarly, MVT::v2i64 turns into 4 MVT::i32 values with both PPC and X86.
///
/// This method returns the number of registers needed, and the VT for each
/// register. It also returns the VT and quantity of the intermediate values
@ -718,7 +719,7 @@ unsigned TargetLowering::getVectorTypeBreakdown(EVT VT,
/// getWidenVectorType: given a vector type, returns the type to widen to
/// (e.g., v7i8 to v8i8). If the vector type is legal, it returns itself.
/// If there is no vector type that we want to widen to, returns EVT::Other
/// If there is no vector type that we want to widen to, returns MVT::Other
/// When and where to widen is target dependent based on the cost of
/// scalarizing vs using the wider vector type.
EVT TargetLowering::getWidenVectorType(EVT VT) const {
@ -727,7 +728,7 @@ EVT TargetLowering::getWidenVectorType(EVT VT) const {
return VT;
// Default is not to widen until moved to LegalizeTypes
return EVT::Other;
return MVT::Other;
}
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
@ -1393,8 +1394,8 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
// If this is an FP->Int bitcast and if the sign bit is the only thing that
// is demanded, turn this into a FGETSIGN.
if (NewMask == EVT::getIntegerVTSignBit(Op.getValueType()) &&
EVT::isFloatingPoint(Op.getOperand(0).getValueType()) &&
!EVT::isVector(Op.getOperand(0).getValueType())) {
MVT::isFloatingPoint(Op.getOperand(0).getValueType()) &&
!MVT::isVector(Op.getOperand(0).getValueType())) {
// Only do this xform if FGETSIGN is valid or if before legalize.
if (!TLO.AfterLegalize ||
isOperationLegal(ISD::FGETSIGN, Op.getValueType())) {
@ -2010,46 +2011,46 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
// Fold away ALL boolean setcc's.
SDValue Temp;
if (N0.getValueType() == EVT::i1 && foldBooleans) {
if (N0.getValueType() == MVT::i1 && foldBooleans) {
switch (Cond) {
default: llvm_unreachable("Unknown integer setcc!");
case ISD::SETEQ: // X == Y -> ~(X^Y)
Temp = DAG.getNode(ISD::XOR, dl, EVT::i1, N0, N1);
N0 = DAG.getNOT(dl, Temp, EVT::i1);
Temp = DAG.getNode(ISD::XOR, dl, MVT::i1, N0, N1);
N0 = DAG.getNOT(dl, Temp, MVT::i1);
if (!DCI.isCalledByLegalizer())
DCI.AddToWorklist(Temp.getNode());
break;
case ISD::SETNE: // X != Y --> (X^Y)
N0 = DAG.getNode(ISD::XOR, dl, EVT::i1, N0, N1);
N0 = DAG.getNode(ISD::XOR, dl, MVT::i1, N0, N1);
break;
case ISD::SETGT: // X >s Y --> X == 0 & Y == 1 --> ~X & Y
case ISD::SETULT: // X <u Y --> X == 0 & Y == 1 --> ~X & Y
Temp = DAG.getNOT(dl, N0, EVT::i1);
N0 = DAG.getNode(ISD::AND, dl, EVT::i1, N1, Temp);
Temp = DAG.getNOT(dl, N0, MVT::i1);
N0 = DAG.getNode(ISD::AND, dl, MVT::i1, N1, Temp);
if (!DCI.isCalledByLegalizer())
DCI.AddToWorklist(Temp.getNode());
break;
case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> ~Y & X
case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> ~Y & X
Temp = DAG.getNOT(dl, N1, EVT::i1);
N0 = DAG.getNode(ISD::AND, dl, EVT::i1, N0, Temp);
Temp = DAG.getNOT(dl, N1, MVT::i1);
N0 = DAG.getNode(ISD::AND, dl, MVT::i1, N0, Temp);
if (!DCI.isCalledByLegalizer())
DCI.AddToWorklist(Temp.getNode());
break;
case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> ~X | Y
case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> ~X | Y
Temp = DAG.getNOT(dl, N0, EVT::i1);
N0 = DAG.getNode(ISD::OR, dl, EVT::i1, N1, Temp);
Temp = DAG.getNOT(dl, N0, MVT::i1);
N0 = DAG.getNode(ISD::OR, dl, MVT::i1, N1, Temp);
if (!DCI.isCalledByLegalizer())
DCI.AddToWorklist(Temp.getNode());
break;
case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> ~Y | X
case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> ~Y | X
Temp = DAG.getNOT(dl, N1, EVT::i1);
N0 = DAG.getNode(ISD::OR, dl, EVT::i1, N0, Temp);
Temp = DAG.getNOT(dl, N1, MVT::i1);
N0 = DAG.getNode(ISD::OR, dl, MVT::i1, N0, Temp);
break;
}
if (VT != EVT::i1) {
if (VT != MVT::i1) {
if (!DCI.isCalledByLegalizer())
DCI.AddToWorklist(N0.getNode());
// FIXME: If running after legalize, we probably can't do this.
@ -2245,7 +2246,7 @@ void TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
// now; without this it would get ZExt'd later in
// ScheduleDAGSDNodes::EmitNode, which is very generic.
Ops.push_back(DAG.getTargetConstant(C->getAPIntValue().getSExtValue(),
EVT::i64));
MVT::i64));
return;
}
}

View File

@ -63,7 +63,7 @@ public:
/// getI32Imm - Return a target constant with the specified value, of type i32.
inline SDValue getI32Imm(unsigned Imm) {
return CurDAG->getTargetConstant(Imm, EVT::i32);
return CurDAG->getTargetConstant(Imm, MVT::i32);
}
SDNode *Select(SDValue Op);
@ -156,13 +156,13 @@ bool ARMDAGToDAGISel::SelectShifterOperandReg(SDValue Op,
BaseReg = N.getOperand(0);
unsigned ShImmVal = 0;
if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
ShReg = CurDAG->getRegister(0, EVT::i32);
ShReg = CurDAG->getRegister(0, MVT::i32);
ShImmVal = RHS->getZExtValue() & 31;
} else {
ShReg = N.getOperand(1);
}
Opc = CurDAG->getTargetConstant(ARM_AM::getSORegOpc(ShOpcVal, ShImmVal),
EVT::i32);
MVT::i32);
return true;
}
@ -185,7 +185,7 @@ bool ARMDAGToDAGISel::SelectAddrMode2(SDValue Op, SDValue N,
Base = Offset = N.getOperand(0);
Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt,
ARM_AM::lsl),
EVT::i32);
MVT::i32);
return true;
}
}
@ -200,10 +200,10 @@ bool ARMDAGToDAGISel::SelectAddrMode2(SDValue Op, SDValue N,
} else if (N.getOpcode() == ARMISD::Wrapper) {
Base = N.getOperand(0);
}
Offset = CurDAG->getRegister(0, EVT::i32);
Offset = CurDAG->getRegister(0, MVT::i32);
Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
ARM_AM::no_shift),
EVT::i32);
MVT::i32);
return true;
}
@ -218,7 +218,7 @@ bool ARMDAGToDAGISel::SelectAddrMode2(SDValue Op, SDValue N,
int FI = cast<FrameIndexSDNode>(Base)->getIndex();
Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
}
Offset = CurDAG->getRegister(0, EVT::i32);
Offset = CurDAG->getRegister(0, MVT::i32);
ARM_AM::AddrOpc AddSub = ARM_AM::add;
if (RHSC < 0) {
@ -227,7 +227,7 @@ bool ARMDAGToDAGISel::SelectAddrMode2(SDValue Op, SDValue N,
}
Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, RHSC,
ARM_AM::no_shift),
EVT::i32);
MVT::i32);
return true;
}
}
@ -270,7 +270,7 @@ bool ARMDAGToDAGISel::SelectAddrMode2(SDValue Op, SDValue N,
}
Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
EVT::i32);
MVT::i32);
return true;
}
@ -285,10 +285,10 @@ bool ARMDAGToDAGISel::SelectAddrMode2Offset(SDValue Op, SDValue N,
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) {
int Val = (int)C->getZExtValue();
if (Val >= 0 && Val < 0x1000) { // 12 bits.
Offset = CurDAG->getRegister(0, EVT::i32);
Offset = CurDAG->getRegister(0, MVT::i32);
Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val,
ARM_AM::no_shift),
EVT::i32);
MVT::i32);
return true;
}
}
@ -308,7 +308,7 @@ bool ARMDAGToDAGISel::SelectAddrMode2Offset(SDValue Op, SDValue N,
}
Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
EVT::i32);
MVT::i32);
return true;
}
@ -320,7 +320,7 @@ bool ARMDAGToDAGISel::SelectAddrMode3(SDValue Op, SDValue N,
// X - C is canonicalize to X + -C, no need to handle it here.
Base = N.getOperand(0);
Offset = N.getOperand(1);
Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),EVT::i32);
Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),MVT::i32);
return true;
}
@ -330,8 +330,8 @@ bool ARMDAGToDAGISel::SelectAddrMode3(SDValue Op, SDValue N,
int FI = cast<FrameIndexSDNode>(N)->getIndex();
Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
}
Offset = CurDAG->getRegister(0, EVT::i32);
Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),EVT::i32);
Offset = CurDAG->getRegister(0, MVT::i32);
Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),MVT::i32);
return true;
}
@ -345,21 +345,21 @@ bool ARMDAGToDAGISel::SelectAddrMode3(SDValue Op, SDValue N,
int FI = cast<FrameIndexSDNode>(Base)->getIndex();
Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
}
Offset = CurDAG->getRegister(0, EVT::i32);
Offset = CurDAG->getRegister(0, MVT::i32);
ARM_AM::AddrOpc AddSub = ARM_AM::add;
if (RHSC < 0) {
AddSub = ARM_AM::sub;
RHSC = - RHSC;
}
Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC),EVT::i32);
Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, RHSC),MVT::i32);
return true;
}
}
Base = N.getOperand(0);
Offset = N.getOperand(1);
Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), EVT::i32);
Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), MVT::i32);
return true;
}
@ -374,21 +374,21 @@ bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDValue Op, SDValue N,
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) {
int Val = (int)C->getZExtValue();
if (Val >= 0 && Val < 256) {
Offset = CurDAG->getRegister(0, EVT::i32);
Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), EVT::i32);
Offset = CurDAG->getRegister(0, MVT::i32);
Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), MVT::i32);
return true;
}
}
Offset = N;
Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), EVT::i32);
Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), MVT::i32);
return true;
}
bool ARMDAGToDAGISel::SelectAddrMode4(SDValue Op, SDValue N,
SDValue &Addr, SDValue &Mode) {
Addr = N;
Mode = CurDAG->getTargetConstant(0, EVT::i32);
Mode = CurDAG->getTargetConstant(0, MVT::i32);
return true;
}
@ -403,7 +403,7 @@ bool ARMDAGToDAGISel::SelectAddrMode5(SDValue Op, SDValue N,
Base = N.getOperand(0);
}
Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
EVT::i32);
MVT::i32);
return true;
}
@ -426,7 +426,7 @@ bool ARMDAGToDAGISel::SelectAddrMode5(SDValue Op, SDValue N,
RHSC = - RHSC;
}
Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(AddSub, RHSC),
EVT::i32);
MVT::i32);
return true;
}
}
@ -434,7 +434,7 @@ bool ARMDAGToDAGISel::SelectAddrMode5(SDValue Op, SDValue N,
Base = N;
Offset = CurDAG->getTargetConstant(ARM_AM::getAM5Opc(ARM_AM::add, 0),
EVT::i32);
MVT::i32);
return true;
}
@ -443,8 +443,8 @@ bool ARMDAGToDAGISel::SelectAddrMode6(SDValue Op, SDValue N,
SDValue &Opc) {
Addr = N;
// The optional writeback is handled in ARMLoadStoreOpt.
Update = CurDAG->getRegister(0, EVT::i32);
Opc = CurDAG->getTargetConstant(ARM_AM::getAM6Opc(false), EVT::i32);
Update = CurDAG->getRegister(0, MVT::i32);
Opc = CurDAG->getTargetConstant(ARM_AM::getAM6Opc(false), MVT::i32);
return true;
}
@ -454,7 +454,7 @@ bool ARMDAGToDAGISel::SelectAddrModePC(SDValue Op, SDValue N,
Offset = N.getOperand(0);
SDValue N1 = N.getOperand(1);
Label = CurDAG->getTargetConstant(cast<ConstantSDNode>(N1)->getZExtValue(),
EVT::i32);
MVT::i32);
return true;
}
return false;
@ -493,8 +493,8 @@ ARMDAGToDAGISel::SelectThumbAddrModeRI5(SDValue Op, SDValue N,
if (N.getOpcode() != ISD::ADD) {
Base = (N.getOpcode() == ARMISD::Wrapper) ? N.getOperand(0) : N;
Offset = CurDAG->getRegister(0, EVT::i32);
OffImm = CurDAG->getTargetConstant(0, EVT::i32);
Offset = CurDAG->getRegister(0, MVT::i32);
OffImm = CurDAG->getTargetConstant(0, MVT::i32);
return true;
}
@ -504,8 +504,8 @@ ARMDAGToDAGISel::SelectThumbAddrModeRI5(SDValue Op, SDValue N,
if ((LHSR && LHSR->getReg() == ARM::SP) ||
(RHSR && RHSR->getReg() == ARM::SP)) {
Base = N;
Offset = CurDAG->getRegister(0, EVT::i32);
OffImm = CurDAG->getTargetConstant(0, EVT::i32);
Offset = CurDAG->getRegister(0, MVT::i32);
OffImm = CurDAG->getTargetConstant(0, MVT::i32);
return true;
}
@ -516,8 +516,8 @@ ARMDAGToDAGISel::SelectThumbAddrModeRI5(SDValue Op, SDValue N,
RHSC /= Scale;
if (RHSC >= 0 && RHSC < 32) {
Base = N.getOperand(0);
Offset = CurDAG->getRegister(0, EVT::i32);
OffImm = CurDAG->getTargetConstant(RHSC, EVT::i32);
Offset = CurDAG->getRegister(0, MVT::i32);
OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
return true;
}
}
@ -525,7 +525,7 @@ ARMDAGToDAGISel::SelectThumbAddrModeRI5(SDValue Op, SDValue N,
Base = N.getOperand(0);
Offset = N.getOperand(1);
OffImm = CurDAG->getTargetConstant(0, EVT::i32);
OffImm = CurDAG->getTargetConstant(0, MVT::i32);
return true;
}
@ -552,7 +552,7 @@ bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue Op, SDValue N,
if (N.getOpcode() == ISD::FrameIndex) {
int FI = cast<FrameIndexSDNode>(N)->getIndex();
Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
OffImm = CurDAG->getTargetConstant(0, EVT::i32);
OffImm = CurDAG->getTargetConstant(0, MVT::i32);
return true;
}
@ -573,7 +573,7 @@ bool ARMDAGToDAGISel::SelectThumbAddrModeSP(SDValue Op, SDValue N,
int FI = cast<FrameIndexSDNode>(Base)->getIndex();
Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
}
OffImm = CurDAG->getTargetConstant(RHSC, EVT::i32);
OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
return true;
}
}
@ -613,7 +613,7 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue Op, SDValue N,
// Match frame index...
int FI = cast<FrameIndexSDNode>(N)->getIndex();
Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
OffImm = CurDAG->getTargetConstant(0, EVT::i32);
OffImm = CurDAG->getTargetConstant(0, MVT::i32);
return true;
} else if (N.getOpcode() == ARMISD::Wrapper) {
Base = N.getOperand(0);
@ -621,7 +621,7 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue Op, SDValue N,
return false; // We want to select t2LDRpci instead.
} else
Base = N;
OffImm = CurDAG->getTargetConstant(0, EVT::i32);
OffImm = CurDAG->getTargetConstant(0, MVT::i32);
return true;
}
@ -640,14 +640,14 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm12(SDValue Op, SDValue N,
int FI = cast<FrameIndexSDNode>(Base)->getIndex();
Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
}
OffImm = CurDAG->getTargetConstant(RHSC, EVT::i32);
OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
return true;
}
}
// Base only.
Base = N;
OffImm = CurDAG->getTargetConstant(0, EVT::i32);
OffImm = CurDAG->getTargetConstant(0, MVT::i32);
return true;
}
@ -666,7 +666,7 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm8(SDValue Op, SDValue N,
int FI = cast<FrameIndexSDNode>(Base)->getIndex();
Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
}
OffImm = CurDAG->getTargetConstant(RHSC, EVT::i32);
OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
return true;
}
}
@ -685,8 +685,8 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm8Offset(SDValue Op, SDValue N,
int RHSC = (int)RHS->getZExtValue();
if (RHSC >= 0 && RHSC < 0x100) { // 8 bits.
OffImm = ((AM == ISD::PRE_INC) || (AM == ISD::POST_INC))
? CurDAG->getTargetConstant(RHSC, EVT::i32)
: CurDAG->getTargetConstant(-RHSC, EVT::i32);
? CurDAG->getTargetConstant(RHSC, MVT::i32)
: CurDAG->getTargetConstant(-RHSC, MVT::i32);
return true;
}
}
@ -702,7 +702,7 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm8s4(SDValue Op, SDValue N,
if (((RHSC & 0x3) == 0) &&
((RHSC >= 0 && RHSC < 0x400) || (RHSC < 0 && RHSC > -0x400))) { // 8 bits.
Base = N.getOperand(0);
OffImm = CurDAG->getTargetConstant(RHSC, EVT::i32);
OffImm = CurDAG->getTargetConstant(RHSC, MVT::i32);
return true;
}
}
@ -711,7 +711,7 @@ bool ARMDAGToDAGISel::SelectT2AddrModeImm8s4(SDValue Op, SDValue N,
int RHSC = (int)RHS->getZExtValue();
if (((RHSC & 0x3) == 0) && (RHSC >= 0 && RHSC < 0x400)) { // 8 bits.
Base = N.getOperand(0);
OffImm = CurDAG->getTargetConstant(-RHSC, EVT::i32);
OffImm = CurDAG->getTargetConstant(-RHSC, MVT::i32);
return true;
}
}
@ -764,7 +764,7 @@ bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue Op, SDValue N,
}
}
ShImm = CurDAG->getTargetConstant(ShAmt, EVT::i32);
ShImm = CurDAG->getTargetConstant(ShAmt, MVT::i32);
return true;
}
@ -773,7 +773,7 @@ bool ARMDAGToDAGISel::SelectT2AddrModeSoReg(SDValue Op, SDValue N,
/// getAL - Returns a ARMCC::AL immediate node.
static inline SDValue getAL(SelectionDAG *CurDAG) {
return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, EVT::i32);
return CurDAG->getTargetConstant((uint64_t)ARMCC::AL, MVT::i32);
}
SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDValue Op) {
@ -787,17 +787,17 @@ SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDValue Op) {
bool isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
unsigned Opcode = 0;
bool Match = false;
if (LoadedVT == EVT::i32 &&
if (LoadedVT == MVT::i32 &&
SelectAddrMode2Offset(Op, LD->getOffset(), Offset, AMOpc)) {
Opcode = isPre ? ARM::LDR_PRE : ARM::LDR_POST;
Match = true;
} else if (LoadedVT == EVT::i16 &&
} else if (LoadedVT == MVT::i16 &&
SelectAddrMode3Offset(Op, LD->getOffset(), Offset, AMOpc)) {
Match = true;
Opcode = (LD->getExtensionType() == ISD::SEXTLOAD)
? (isPre ? ARM::LDRSH_PRE : ARM::LDRSH_POST)
: (isPre ? ARM::LDRH_PRE : ARM::LDRH_POST);
} else if (LoadedVT == EVT::i8 || LoadedVT == EVT::i1) {
} else if (LoadedVT == MVT::i8 || LoadedVT == MVT::i1) {
if (LD->getExtensionType() == ISD::SEXTLOAD) {
if (SelectAddrMode3Offset(Op, LD->getOffset(), Offset, AMOpc)) {
Match = true;
@ -815,9 +815,9 @@ SDNode *ARMDAGToDAGISel::SelectARMIndexedLoad(SDValue Op) {
SDValue Chain = LD->getChain();
SDValue Base = LD->getBasePtr();
SDValue Ops[]= { Base, Offset, AMOpc, getAL(CurDAG),
CurDAG->getRegister(0, EVT::i32), Chain };
return CurDAG->getTargetNode(Opcode, Op.getDebugLoc(), EVT::i32, EVT::i32,
EVT::Other, Ops, 6);
CurDAG->getRegister(0, MVT::i32), Chain };
return CurDAG->getTargetNode(Opcode, Op.getDebugLoc(), MVT::i32, MVT::i32,
MVT::Other, Ops, 6);
}
return NULL;
@ -836,18 +836,18 @@ SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDValue Op) {
unsigned Opcode = 0;
bool Match = false;
if (SelectT2AddrModeImm8Offset(Op, LD->getOffset(), Offset)) {
switch (LoadedVT.getSimpleVT()) {
case EVT::i32:
switch (LoadedVT.getSimpleVT().SimpleTy) {
case MVT::i32:
Opcode = isPre ? ARM::t2LDR_PRE : ARM::t2LDR_POST;
break;
case EVT::i16:
case MVT::i16:
if (isSExtLd)
Opcode = isPre ? ARM::t2LDRSH_PRE : ARM::t2LDRSH_POST;
else
Opcode = isPre ? ARM::t2LDRH_PRE : ARM::t2LDRH_POST;
break;
case EVT::i8:
case EVT::i1:
case MVT::i8:
case MVT::i1:
if (isSExtLd)
Opcode = isPre ? ARM::t2LDRSB_PRE : ARM::t2LDRSB_POST;
else
@ -863,9 +863,9 @@ SDNode *ARMDAGToDAGISel::SelectT2IndexedLoad(SDValue Op) {
SDValue Chain = LD->getChain();
SDValue Base = LD->getBasePtr();
SDValue Ops[]= { Base, Offset, getAL(CurDAG),
CurDAG->getRegister(0, EVT::i32), Chain };
return CurDAG->getTargetNode(Opcode, Op.getDebugLoc(), EVT::i32, EVT::i32,
EVT::Other, Ops, 5);
CurDAG->getRegister(0, MVT::i32), Chain };
return CurDAG->getTargetNode(Opcode, Op.getDebugLoc(), MVT::i32, MVT::i32,
MVT::Other, Ops, 5);
}
return NULL;
@ -878,7 +878,7 @@ SDNode *ARMDAGToDAGISel::SelectDYN_ALLOC(SDValue Op) {
SDValue Chain = Op.getOperand(0);
SDValue Size = Op.getOperand(1);
SDValue Align = Op.getOperand(2);
SDValue SP = CurDAG->getRegister(ARM::SP, EVT::i32);
SDValue SP = CurDAG->getRegister(ARM::SP, MVT::i32);
int32_t AlignVal = cast<ConstantSDNode>(Align)->getSExtValue();
if (AlignVal < 0)
// We need to align the stack. Use Thumb1 tAND which is the only thumb
@ -893,8 +893,8 @@ SDNode *ARMDAGToDAGISel::SelectDYN_ALLOC(SDValue Op) {
// tSUBspi - immediate is between 0 ... 508 inclusive.
if (C <= 508 && ((C & 3) == 0))
// FIXME: tSUBspi encode scale 4 implicitly.
return CurDAG->SelectNodeTo(N, ARM::tSUBspi_, VT, EVT::Other, SP,
CurDAG->getTargetConstant(C/4, EVT::i32),
return CurDAG->SelectNodeTo(N, ARM::tSUBspi_, VT, MVT::Other, SP,
CurDAG->getTargetConstant(C/4, MVT::i32),
Chain);
if (Subtarget->isThumb1Only()) {
@ -902,22 +902,22 @@ SDNode *ARMDAGToDAGISel::SelectDYN_ALLOC(SDValue Op) {
// should have negated the size operand already. FIXME: We can't insert
// new target independent node at this stage so we are forced to negate
// it earlier. Is there a better solution?
return CurDAG->SelectNodeTo(N, ARM::tADDspr_, VT, EVT::Other, SP, Size,
return CurDAG->SelectNodeTo(N, ARM::tADDspr_, VT, MVT::Other, SP, Size,
Chain);
} else if (Subtarget->isThumb2()) {
if (isC && Predicate_t2_so_imm(Size.getNode())) {
// t2SUBrSPi
SDValue Ops[] = { SP, CurDAG->getTargetConstant(C, EVT::i32), Chain };
return CurDAG->SelectNodeTo(N, ARM::t2SUBrSPi_, VT, EVT::Other, Ops, 3);
SDValue Ops[] = { SP, CurDAG->getTargetConstant(C, MVT::i32), Chain };
return CurDAG->SelectNodeTo(N, ARM::t2SUBrSPi_, VT, MVT::Other, Ops, 3);
} else if (isC && Predicate_imm0_4095(Size.getNode())) {
// t2SUBrSPi12
SDValue Ops[] = { SP, CurDAG->getTargetConstant(C, EVT::i32), Chain };
return CurDAG->SelectNodeTo(N, ARM::t2SUBrSPi12_, VT, EVT::Other, Ops, 3);
SDValue Ops[] = { SP, CurDAG->getTargetConstant(C, MVT::i32), Chain };
return CurDAG->SelectNodeTo(N, ARM::t2SUBrSPi12_, VT, MVT::Other, Ops, 3);
} else {
// t2SUBrSPs
SDValue Ops[] = { SP, Size,
getI32Imm(ARM_AM::getSORegOpc(ARM_AM::lsl,0)), Chain };
return CurDAG->SelectNodeTo(N, ARM::t2SUBrSPs_, VT, EVT::Other, Ops, 4);
return CurDAG->SelectNodeTo(N, ARM::t2SUBrSPs_, VT, MVT::Other, Ops, 4);
}
}
@ -957,21 +957,21 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
SDNode *ResNode;
if (Subtarget->isThumb1Only()) {
SDValue Pred = CurDAG->getTargetConstant(0xEULL, EVT::i32);
SDValue PredReg = CurDAG->getRegister(0, EVT::i32);
SDValue Pred = CurDAG->getTargetConstant(0xEULL, MVT::i32);
SDValue PredReg = CurDAG->getRegister(0, MVT::i32);
SDValue Ops[] = { CPIdx, Pred, PredReg, CurDAG->getEntryNode() };
ResNode = CurDAG->getTargetNode(ARM::tLDRcp, dl, EVT::i32, EVT::Other,
ResNode = CurDAG->getTargetNode(ARM::tLDRcp, dl, MVT::i32, MVT::Other,
Ops, 4);
} else {
SDValue Ops[] = {
CPIdx,
CurDAG->getRegister(0, EVT::i32),
CurDAG->getTargetConstant(0, EVT::i32),
CurDAG->getRegister(0, MVT::i32),
CurDAG->getTargetConstant(0, MVT::i32),
getAL(CurDAG),
CurDAG->getRegister(0, EVT::i32),
CurDAG->getRegister(0, MVT::i32),
CurDAG->getEntryNode()
};
ResNode=CurDAG->getTargetNode(ARM::LDRcp, dl, EVT::i32, EVT::Other,
ResNode=CurDAG->getTargetNode(ARM::LDRcp, dl, MVT::i32, MVT::Other,
Ops, 6);
}
ReplaceUses(Op, SDValue(ResNode, 0));
@ -986,15 +986,15 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
int FI = cast<FrameIndexSDNode>(N)->getIndex();
SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
if (Subtarget->isThumb1Only()) {
return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, EVT::i32, TFI,
CurDAG->getTargetConstant(0, EVT::i32));
return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, MVT::i32, TFI,
CurDAG->getTargetConstant(0, MVT::i32));
} else {
unsigned Opc = ((Subtarget->isThumb() && Subtarget->hasThumb2()) ?
ARM::t2ADDri : ARM::ADDri);
SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, EVT::i32),
getAL(CurDAG), CurDAG->getRegister(0, EVT::i32),
CurDAG->getRegister(0, EVT::i32) };
return CurDAG->SelectNodeTo(N, Opc, EVT::i32, Ops, 5);
SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
CurDAG->getRegister(0, MVT::i32) };
return CurDAG->SelectNodeTo(N, Opc, MVT::i32, Ops, 5);
}
}
case ARMISD::DYN_ALLOC:
@ -1011,14 +1011,14 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
break;
SDValue V = Op.getOperand(0);
ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, EVT::i32);
SDValue Reg0 = CurDAG->getRegister(0, EVT::i32);
SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
if (Subtarget->isThumb()) {
SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
return CurDAG->SelectNodeTo(N, ARM::t2ADDrs, EVT::i32, Ops, 6);
return CurDAG->SelectNodeTo(N, ARM::t2ADDrs, MVT::i32, Ops, 6);
} else {
SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
return CurDAG->SelectNodeTo(N, ARM::ADDrs, EVT::i32, Ops, 7);
return CurDAG->SelectNodeTo(N, ARM::ADDrs, MVT::i32, Ops, 7);
}
}
if (isPowerOf2_32(RHSV+1)) { // 2^n-1?
@ -1027,35 +1027,35 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
break;
SDValue V = Op.getOperand(0);
ShImm = ARM_AM::getSORegOpc(ARM_AM::lsl, ShImm);
SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, EVT::i32);
SDValue Reg0 = CurDAG->getRegister(0, EVT::i32);
SDValue ShImmOp = CurDAG->getTargetConstant(ShImm, MVT::i32);
SDValue Reg0 = CurDAG->getRegister(0, MVT::i32);
if (Subtarget->isThumb()) {
SDValue Ops[] = { V, V, ShImmOp, getAL(CurDAG), Reg0 };
return CurDAG->SelectNodeTo(N, ARM::t2RSBrs, EVT::i32, Ops, 5);
return CurDAG->SelectNodeTo(N, ARM::t2RSBrs, MVT::i32, Ops, 5);
} else {
SDValue Ops[] = { V, V, Reg0, ShImmOp, getAL(CurDAG), Reg0, Reg0 };
return CurDAG->SelectNodeTo(N, ARM::RSBrs, EVT::i32, Ops, 7);
return CurDAG->SelectNodeTo(N, ARM::RSBrs, MVT::i32, Ops, 7);
}
}
}
break;
case ARMISD::FMRRD:
return CurDAG->getTargetNode(ARM::FMRRD, dl, EVT::i32, EVT::i32,
return CurDAG->getTargetNode(ARM::FMRRD, dl, MVT::i32, MVT::i32,
Op.getOperand(0), getAL(CurDAG),
CurDAG->getRegister(0, EVT::i32));
CurDAG->getRegister(0, MVT::i32));
case ISD::UMUL_LOHI: {
if (Subtarget->isThumb1Only())
break;
if (Subtarget->isThumb()) {
SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1),
getAL(CurDAG), CurDAG->getRegister(0, EVT::i32),
CurDAG->getRegister(0, EVT::i32) };
return CurDAG->getTargetNode(ARM::t2UMULL, dl, EVT::i32, EVT::i32, Ops,4);
getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
CurDAG->getRegister(0, MVT::i32) };
return CurDAG->getTargetNode(ARM::t2UMULL, dl, MVT::i32, MVT::i32, Ops,4);
} else {
SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1),
getAL(CurDAG), CurDAG->getRegister(0, EVT::i32),
CurDAG->getRegister(0, EVT::i32) };
return CurDAG->getTargetNode(ARM::UMULL, dl, EVT::i32, EVT::i32, Ops, 5);
getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
CurDAG->getRegister(0, MVT::i32) };
return CurDAG->getTargetNode(ARM::UMULL, dl, MVT::i32, MVT::i32, Ops, 5);
}
}
case ISD::SMUL_LOHI: {
@ -1063,13 +1063,13 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
break;
if (Subtarget->isThumb()) {
SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1),
getAL(CurDAG), CurDAG->getRegister(0, EVT::i32) };
return CurDAG->getTargetNode(ARM::t2SMULL, dl, EVT::i32, EVT::i32, Ops,4);
getAL(CurDAG), CurDAG->getRegister(0, MVT::i32) };
return CurDAG->getTargetNode(ARM::t2SMULL, dl, MVT::i32, MVT::i32, Ops,4);
} else {
SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1),
getAL(CurDAG), CurDAG->getRegister(0, EVT::i32),
CurDAG->getRegister(0, EVT::i32) };
return CurDAG->getTargetNode(ARM::SMULL, dl, EVT::i32, EVT::i32, Ops, 5);
getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
CurDAG->getRegister(0, MVT::i32) };
return CurDAG->getTargetNode(ARM::SMULL, dl, MVT::i32, MVT::i32, Ops, 5);
}
}
case ISD::LOAD: {
@ -1109,10 +1109,10 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
cast<ConstantSDNode>(N2)->getZExtValue()),
EVT::i32);
MVT::i32);
SDValue Ops[] = { N1, Tmp2, N3, Chain, InFlag };
SDNode *ResNode = CurDAG->getTargetNode(Opc, dl, EVT::Other,
EVT::Flag, Ops, 5);
SDNode *ResNode = CurDAG->getTargetNode(Opc, dl, MVT::Other,
MVT::Flag, Ops, 5);
Chain = SDValue(ResNode, 0);
if (Op.getNode()->getNumValues() == 2) {
InFlag = SDValue(ResNode, 1);
@ -1131,7 +1131,7 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
assert(N2.getOpcode() == ISD::Constant);
assert(N3.getOpcode() == ISD::Register);
if (!Subtarget->isThumb1Only() && VT == EVT::i32) {
if (!Subtarget->isThumb1Only() && VT == MVT::i32) {
// Pattern: (ARMcmov:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
// Emits: (MOVCCs:i32 GPR:i32:$false, so_reg:i32:$true, (imm:i32):$cc)
// Pattern complexity = 18 cost = 1 size = 0
@ -1153,21 +1153,21 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
break;
}
SDValue SOShImm =
CurDAG->getTargetConstant(ARM_AM::getSORegOffset(SOVal), EVT::i32);
CurDAG->getTargetConstant(ARM_AM::getSORegOffset(SOVal), MVT::i32);
SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
cast<ConstantSDNode>(N2)->getZExtValue()),
EVT::i32);
MVT::i32);
SDValue Ops[] = { N0, CPTmp0, SOShImm, Tmp2, N3, InFlag };
return CurDAG->SelectNodeTo(Op.getNode(), Opc, EVT::i32,Ops, 6);
return CurDAG->SelectNodeTo(Op.getNode(), Opc, MVT::i32,Ops, 6);
}
} else {
if (SelectShifterOperandReg(Op, N1, CPTmp0, CPTmp1, CPTmp2)) {
SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
cast<ConstantSDNode>(N2)->getZExtValue()),
EVT::i32);
MVT::i32);
SDValue Ops[] = { N0, CPTmp0, CPTmp1, CPTmp2, Tmp2, N3, InFlag };
return CurDAG->SelectNodeTo(Op.getNode(),
ARM::MOVCCs, EVT::i32, Ops, 7);
ARM::MOVCCs, MVT::i32, Ops, 7);
}
}
@ -1182,25 +1182,25 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
if (Predicate_t2_so_imm(N3.getNode())) {
SDValue Tmp1 = CurDAG->getTargetConstant(((unsigned)
cast<ConstantSDNode>(N1)->getZExtValue()),
EVT::i32);
MVT::i32);
SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
cast<ConstantSDNode>(N2)->getZExtValue()),
EVT::i32);
MVT::i32);
SDValue Ops[] = { N0, Tmp1, Tmp2, N3, InFlag };
return CurDAG->SelectNodeTo(Op.getNode(),
ARM::t2MOVCCi, EVT::i32, Ops, 5);
ARM::t2MOVCCi, MVT::i32, Ops, 5);
}
} else {
if (Predicate_so_imm(N3.getNode())) {
SDValue Tmp1 = CurDAG->getTargetConstant(((unsigned)
cast<ConstantSDNode>(N1)->getZExtValue()),
EVT::i32);
MVT::i32);
SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
cast<ConstantSDNode>(N2)->getZExtValue()),
EVT::i32);
MVT::i32);
SDValue Ops[] = { N0, Tmp1, Tmp2, N3, InFlag };
return CurDAG->SelectNodeTo(Op.getNode(),
ARM::MOVCCi, EVT::i32, Ops, 5);
ARM::MOVCCi, MVT::i32, Ops, 5);
}
}
}
@ -1217,21 +1217,21 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
// Also FCPYScc and FCPYDcc.
SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
cast<ConstantSDNode>(N2)->getZExtValue()),
EVT::i32);
MVT::i32);
SDValue Ops[] = { N0, N1, Tmp2, N3, InFlag };
unsigned Opc = 0;
switch (VT.getSimpleVT()) {
switch (VT.getSimpleVT().SimpleTy) {
default: assert(false && "Illegal conditional move type!");
break;
case EVT::i32:
case MVT::i32:
Opc = Subtarget->isThumb()
? (Subtarget->hasThumb2() ? ARM::t2MOVCCr : ARM::tMOVCCr)
: ARM::MOVCCr;
break;
case EVT::f32:
case MVT::f32:
Opc = ARM::FCPYScc;
break;
case EVT::f64:
case MVT::f64:
Opc = ARM::FCPYDcc;
break;
}
@ -1249,16 +1249,16 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
SDValue Tmp2 = CurDAG->getTargetConstant(((unsigned)
cast<ConstantSDNode>(N2)->getZExtValue()),
EVT::i32);
MVT::i32);
SDValue Ops[] = { N0, N1, Tmp2, N3, InFlag };
unsigned Opc = 0;
switch (VT.getSimpleVT()) {
switch (VT.getSimpleVT().SimpleTy) {
default: assert(false && "Illegal conditional move type!");
break;
case EVT::f32:
case MVT::f32:
Opc = ARM::FNEGScc;
break;
case EVT::f64:
case MVT::f64:
Opc = ARM::FNEGDcc;
break;
}
@ -1303,7 +1303,7 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
SDValue Tmp2 = CurDAG->getTargetGlobalAddress(GV, TLI.getPointerTy());
SDValue Ops[] = { Tmp1, Tmp2, Chain };
return CurDAG->getTargetNode(TargetInstrInfo::DECLARE, dl,
EVT::Other, Ops, 3);
MVT::Other, Ops, 3);
}
case ISD::VECTOR_SHUFFLE: {
@ -1322,20 +1322,20 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
EVT HalfVT;
unsigned Opc = 0;
switch (VT.getVectorElementType().getSimpleVT()) {
switch (VT.getVectorElementType().getSimpleVT().SimpleTy) {
default: llvm_unreachable("unhandled VDUP splat type");
case EVT::i8: Opc = ARM::VDUPLN8q; HalfVT = EVT::v8i8; break;
case EVT::i16: Opc = ARM::VDUPLN16q; HalfVT = EVT::v4i16; break;
case EVT::i32: Opc = ARM::VDUPLN32q; HalfVT = EVT::v2i32; break;
case EVT::f32: Opc = ARM::VDUPLNfq; HalfVT = EVT::v2f32; break;
case MVT::i8: Opc = ARM::VDUPLN8q; HalfVT = MVT::v8i8; break;
case MVT::i16: Opc = ARM::VDUPLN16q; HalfVT = MVT::v4i16; break;
case MVT::i32: Opc = ARM::VDUPLN32q; HalfVT = MVT::v2i32; break;
case MVT::f32: Opc = ARM::VDUPLNfq; HalfVT = MVT::v2f32; break;
}
// The source operand needs to be changed to a subreg of the original
// 128-bit operand, and the lane number needs to be adjusted accordingly.
unsigned NumElts = VT.getVectorNumElements() / 2;
unsigned SRVal = (LaneVal < NumElts ? arm_dsubreg_0 : arm_dsubreg_1);
SDValue SR = CurDAG->getTargetConstant(SRVal, EVT::i32);
SDValue NewLane = CurDAG->getTargetConstant(LaneVal % NumElts, EVT::i32);
SDValue SR = CurDAG->getTargetConstant(SRVal, MVT::i32);
SDValue NewLane = CurDAG->getTargetConstant(LaneVal % NumElts, MVT::i32);
SDNode *SubReg = CurDAG->getTargetNode(TargetInstrInfo::EXTRACT_SUBREG,
dl, HalfVT, N->getOperand(0), SR);
return CurDAG->SelectNodeTo(N, Opc, VT, SDValue(SubReg, 0), NewLane);
@ -1350,15 +1350,15 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
return NULL;
unsigned Opc = 0;
EVT VT = Op.getValueType();
switch (VT.getSimpleVT()) {
switch (VT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("unhandled VLD2D type");
case EVT::v8i8: Opc = ARM::VLD2d8; break;
case EVT::v4i16: Opc = ARM::VLD2d16; break;
case EVT::v2f32:
case EVT::v2i32: Opc = ARM::VLD2d32; break;
case MVT::v8i8: Opc = ARM::VLD2d8; break;
case MVT::v4i16: Opc = ARM::VLD2d16; break;
case MVT::v2f32:
case MVT::v2i32: Opc = ARM::VLD2d32; break;
}
const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc };
return CurDAG->getTargetNode(Opc, dl, VT, VT, EVT::Other, Ops, 3);
return CurDAG->getTargetNode(Opc, dl, VT, VT, MVT::Other, Ops, 3);
}
case ARMISD::VLD3D: {
@ -1367,15 +1367,15 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
return NULL;
unsigned Opc = 0;
EVT VT = Op.getValueType();
switch (VT.getSimpleVT()) {
switch (VT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("unhandled VLD3D type");
case EVT::v8i8: Opc = ARM::VLD3d8; break;
case EVT::v4i16: Opc = ARM::VLD3d16; break;
case EVT::v2f32:
case EVT::v2i32: Opc = ARM::VLD3d32; break;
case MVT::v8i8: Opc = ARM::VLD3d8; break;
case MVT::v4i16: Opc = ARM::VLD3d16; break;
case MVT::v2f32:
case MVT::v2i32: Opc = ARM::VLD3d32; break;
}
const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc };
return CurDAG->getTargetNode(Opc, dl, VT, VT, VT, EVT::Other, Ops, 3);
return CurDAG->getTargetNode(Opc, dl, VT, VT, VT, MVT::Other, Ops, 3);
}
case ARMISD::VLD4D: {
@ -1384,16 +1384,16 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
return NULL;
unsigned Opc = 0;
EVT VT = Op.getValueType();
switch (VT.getSimpleVT()) {
switch (VT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("unhandled VLD4D type");
case EVT::v8i8: Opc = ARM::VLD4d8; break;
case EVT::v4i16: Opc = ARM::VLD4d16; break;
case EVT::v2f32:
case EVT::v2i32: Opc = ARM::VLD4d32; break;
case MVT::v8i8: Opc = ARM::VLD4d8; break;
case MVT::v4i16: Opc = ARM::VLD4d16; break;
case MVT::v2f32:
case MVT::v2i32: Opc = ARM::VLD4d32; break;
}
const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc };
std::vector<EVT> ResTys(4, VT);
ResTys.push_back(EVT::Other);
ResTys.push_back(MVT::Other);
return CurDAG->getTargetNode(Opc, dl, ResTys, Ops, 3);
}
@ -1402,16 +1402,16 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
if (!SelectAddrMode6(Op, N->getOperand(1), MemAddr, MemUpdate, MemOpc))
return NULL;
unsigned Opc = 0;
switch (N->getOperand(2).getValueType().getSimpleVT()) {
switch (N->getOperand(2).getValueType().getSimpleVT().SimpleTy) {
default: llvm_unreachable("unhandled VST2D type");
case EVT::v8i8: Opc = ARM::VST2d8; break;
case EVT::v4i16: Opc = ARM::VST2d16; break;
case EVT::v2f32:
case EVT::v2i32: Opc = ARM::VST2d32; break;
case MVT::v8i8: Opc = ARM::VST2d8; break;
case MVT::v4i16: Opc = ARM::VST2d16; break;
case MVT::v2f32:
case MVT::v2i32: Opc = ARM::VST2d32; break;
}
const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
N->getOperand(2), N->getOperand(3) };
return CurDAG->getTargetNode(Opc, dl, EVT::Other, Ops, 5);
return CurDAG->getTargetNode(Opc, dl, MVT::Other, Ops, 5);
}
case ARMISD::VST3D: {
@ -1419,17 +1419,17 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
if (!SelectAddrMode6(Op, N->getOperand(1), MemAddr, MemUpdate, MemOpc))
return NULL;
unsigned Opc = 0;
switch (N->getOperand(2).getValueType().getSimpleVT()) {
switch (N->getOperand(2).getValueType().getSimpleVT().SimpleTy) {
default: llvm_unreachable("unhandled VST3D type");
case EVT::v8i8: Opc = ARM::VST3d8; break;
case EVT::v4i16: Opc = ARM::VST3d16; break;
case EVT::v2f32:
case EVT::v2i32: Opc = ARM::VST3d32; break;
case MVT::v8i8: Opc = ARM::VST3d8; break;
case MVT::v4i16: Opc = ARM::VST3d16; break;
case MVT::v2f32:
case MVT::v2i32: Opc = ARM::VST3d32; break;
}
const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
N->getOperand(2), N->getOperand(3),
N->getOperand(4) };
return CurDAG->getTargetNode(Opc, dl, EVT::Other, Ops, 6);
return CurDAG->getTargetNode(Opc, dl, MVT::Other, Ops, 6);
}
case ARMISD::VST4D: {
@ -1437,17 +1437,17 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
if (!SelectAddrMode6(Op, N->getOperand(1), MemAddr, MemUpdate, MemOpc))
return NULL;
unsigned Opc = 0;
switch (N->getOperand(2).getValueType().getSimpleVT()) {
switch (N->getOperand(2).getValueType().getSimpleVT().SimpleTy) {
default: llvm_unreachable("unhandled VST4D type");
case EVT::v8i8: Opc = ARM::VST4d8; break;
case EVT::v4i16: Opc = ARM::VST4d16; break;
case EVT::v2f32:
case EVT::v2i32: Opc = ARM::VST4d32; break;
case MVT::v8i8: Opc = ARM::VST4d8; break;
case MVT::v4i16: Opc = ARM::VST4d16; break;
case MVT::v2f32:
case MVT::v2i32: Opc = ARM::VST4d32; break;
}
const SDValue Ops[] = { MemAddr, MemUpdate, MemOpc,
N->getOperand(2), N->getOperand(3),
N->getOperand(4), N->getOperand(5) };
return CurDAG->getTargetNode(Opc, dl, EVT::Other, Ops, 7);
return CurDAG->getTargetNode(Opc, dl, MVT::Other, Ops, 7);
}
case ISD::INTRINSIC_WO_CHAIN: {
@ -1460,46 +1460,46 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
default: break;
case Intrinsic::arm_neon_vtrn:
switch (VT.getSimpleVT()) {
switch (VT.getSimpleVT().SimpleTy) {
default: return NULL;
case EVT::v8i8: Opc = ARM::VTRNd8; break;
case EVT::v4i16: Opc = ARM::VTRNd16; break;
case EVT::v2f32:
case EVT::v2i32: Opc = ARM::VTRNd32; break;
case EVT::v16i8: Opc = ARM::VTRNq8; break;
case EVT::v8i16: Opc = ARM::VTRNq16; break;
case EVT::v4f32:
case EVT::v4i32: Opc = ARM::VTRNq32; break;
case MVT::v8i8: Opc = ARM::VTRNd8; break;
case MVT::v4i16: Opc = ARM::VTRNd16; break;
case MVT::v2f32:
case MVT::v2i32: Opc = ARM::VTRNd32; break;
case MVT::v16i8: Opc = ARM::VTRNq8; break;
case MVT::v8i16: Opc = ARM::VTRNq16; break;
case MVT::v4f32:
case MVT::v4i32: Opc = ARM::VTRNq32; break;
}
return CurDAG->getTargetNode(Opc, dl, VT, VT, N->getOperand(1),
N->getOperand(2));
case Intrinsic::arm_neon_vuzp:
switch (VT.getSimpleVT()) {
switch (VT.getSimpleVT().SimpleTy) {
default: return NULL;
case EVT::v8i8: Opc = ARM::VUZPd8; break;
case EVT::v4i16: Opc = ARM::VUZPd16; break;
case EVT::v2f32:
case EVT::v2i32: Opc = ARM::VUZPd32; break;
case EVT::v16i8: Opc = ARM::VUZPq8; break;
case EVT::v8i16: Opc = ARM::VUZPq16; break;
case EVT::v4f32:
case EVT::v4i32: Opc = ARM::VUZPq32; break;
case MVT::v8i8: Opc = ARM::VUZPd8; break;
case MVT::v4i16: Opc = ARM::VUZPd16; break;
case MVT::v2f32:
case MVT::v2i32: Opc = ARM::VUZPd32; break;
case MVT::v16i8: Opc = ARM::VUZPq8; break;
case MVT::v8i16: Opc = ARM::VUZPq16; break;
case MVT::v4f32:
case MVT::v4i32: Opc = ARM::VUZPq32; break;
}
return CurDAG->getTargetNode(Opc, dl, VT, VT, N->getOperand(1),
N->getOperand(2));
case Intrinsic::arm_neon_vzip:
switch (VT.getSimpleVT()) {
switch (VT.getSimpleVT().SimpleTy) {
default: return NULL;
case EVT::v8i8: Opc = ARM::VZIPd8; break;
case EVT::v4i16: Opc = ARM::VZIPd16; break;
case EVT::v2f32:
case EVT::v2i32: Opc = ARM::VZIPd32; break;
case EVT::v16i8: Opc = ARM::VZIPq8; break;
case EVT::v8i16: Opc = ARM::VZIPq16; break;
case EVT::v4f32:
case EVT::v4i32: Opc = ARM::VZIPq32; break;
case MVT::v8i8: Opc = ARM::VZIPd8; break;
case MVT::v4i16: Opc = ARM::VZIPd16; break;
case MVT::v2f32:
case MVT::v2i32: Opc = ARM::VZIPd32; break;
case MVT::v16i8: Opc = ARM::VZIPq8; break;
case MVT::v8i16: Opc = ARM::VZIPq16; break;
case MVT::v4f32:
case MVT::v4i32: Opc = ARM::VZIPq32; break;
}
return CurDAG->getTargetNode(Opc, dl, VT, VT, N->getOperand(1),
N->getOperand(2));

File diff suppressed because it is too large Load Diff

View File

@ -129,13 +129,13 @@ class RegConstraint<string C> {
// so_imm_neg_XFORM - Return a so_imm value packed into the format described for
// so_imm_neg def below.
def so_imm_neg_XFORM : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(-(int)N->getZExtValue(), EVT::i32);
return CurDAG->getTargetConstant(-(int)N->getZExtValue(), MVT::i32);
}]>;
// so_imm_not_XFORM - Return a so_imm value packed into the format described for
// so_imm_not def below.
def so_imm_not_XFORM : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(~(int)N->getZExtValue(), EVT::i32);
return CurDAG->getTargetConstant(~(int)N->getZExtValue(), MVT::i32);
}]>;
// rot_imm predicate - True if the 32-bit immediate is equal to 8, 16, or 24.
@ -254,12 +254,12 @@ def so_imm2part : Operand<i32>,
def so_imm2part_1 : SDNodeXForm<imm, [{
unsigned V = ARM_AM::getSOImmTwoPartFirst((unsigned)N->getZExtValue());
return CurDAG->getTargetConstant(V, EVT::i32);
return CurDAG->getTargetConstant(V, MVT::i32);
}]>;
def so_imm2part_2 : SDNodeXForm<imm, [{
unsigned V = ARM_AM::getSOImmTwoPartSecond((unsigned)N->getZExtValue());
return CurDAG->getTargetConstant(V, EVT::i32);
return CurDAG->getTargetConstant(V, MVT::i32);
}]>;

View File

@ -277,33 +277,33 @@ def VST4d32 : VST4D<"vst4.32">;
// Extract D sub-registers of Q registers.
// (arm_dsubreg_0 is 5; arm_dsubreg_1 is 6)
def DSubReg_i8_reg : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(5 + N->getZExtValue() / 8, EVT::i32);
return CurDAG->getTargetConstant(5 + N->getZExtValue() / 8, MVT::i32);
}]>;
def DSubReg_i16_reg : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(5 + N->getZExtValue() / 4, EVT::i32);
return CurDAG->getTargetConstant(5 + N->getZExtValue() / 4, MVT::i32);
}]>;
def DSubReg_i32_reg : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(5 + N->getZExtValue() / 2, EVT::i32);
return CurDAG->getTargetConstant(5 + N->getZExtValue() / 2, MVT::i32);
}]>;
def DSubReg_f64_reg : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(5 + N->getZExtValue(), EVT::i32);
return CurDAG->getTargetConstant(5 + N->getZExtValue(), MVT::i32);
}]>;
// Extract S sub-registers of Q registers.
// (arm_ssubreg_0 is 1; arm_ssubreg_1 is 2; etc.)
def SSubReg_f32_reg : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(1 + N->getZExtValue(), EVT::i32);
return CurDAG->getTargetConstant(1 + N->getZExtValue(), MVT::i32);
}]>;
// Translate lane numbers from Q registers to D subregs.
def SubReg_i8_lane : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(N->getZExtValue() & 7, EVT::i32);
return CurDAG->getTargetConstant(N->getZExtValue() & 7, MVT::i32);
}]>;
def SubReg_i16_lane : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(N->getZExtValue() & 3, EVT::i32);
return CurDAG->getTargetConstant(N->getZExtValue() & 3, MVT::i32);
}]>;
def SubReg_i32_lane : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(N->getZExtValue() & 1, EVT::i32);
return CurDAG->getTargetConstant(N->getZExtValue() & 1, MVT::i32);
}]>;
//===----------------------------------------------------------------------===//
@ -1772,7 +1772,7 @@ def VDUPfq : NVDup<0b11101010, 0b1011, 0b00, (outs QPR:$dst), (ins GPR:$src),
def SHUFFLE_get_splat_lane : SDNodeXForm<vector_shuffle, [{
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
return CurDAG->getTargetConstant(SVOp->getSplatIndex(), EVT::i32);
return CurDAG->getTargetConstant(SVOp->getSplatIndex(), MVT::i32);
}]>;
def splat_lane : PatFrag<(ops node:$lhs, node:$rhs),

View File

@ -19,10 +19,10 @@ def ARMtcall : SDNode<"ARMISD::tCALL", SDT_ARMcall,
[SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
def imm_neg_XFORM : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(-(int)N->getZExtValue(), EVT::i32);
return CurDAG->getTargetConstant(-(int)N->getZExtValue(), MVT::i32);
}]>;
def imm_comp_XFORM : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(~((uint32_t)N->getZExtValue()), EVT::i32);
return CurDAG->getTargetConstant(~((uint32_t)N->getZExtValue()), MVT::i32);
}]>;
@ -58,12 +58,12 @@ def thumb_immshifted : PatLeaf<(imm), [{
def thumb_immshifted_val : SDNodeXForm<imm, [{
unsigned V = ARM_AM::getThumbImmNonShiftedVal((unsigned)N->getZExtValue());
return CurDAG->getTargetConstant(V, EVT::i32);
return CurDAG->getTargetConstant(V, MVT::i32);
}]>;
def thumb_immshifted_shamt : SDNodeXForm<imm, [{
unsigned V = ARM_AM::getThumbImmValShift((unsigned)N->getZExtValue());
return CurDAG->getTargetConstant(V, EVT::i32);
return CurDAG->getTargetConstant(V, MVT::i32);
}]>;
// Define Thumb specific addressing modes.

View File

@ -37,12 +37,12 @@ def t2_so_reg : Operand<i32>, // reg imm
// t2_so_imm_not_XFORM - Return the complement of a t2_so_imm value
def t2_so_imm_not_XFORM : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(~((uint32_t)N->getZExtValue()), EVT::i32);
return CurDAG->getTargetConstant(~((uint32_t)N->getZExtValue()), MVT::i32);
}]>;
// t2_so_imm_neg_XFORM - Return the negation of a t2_so_imm value
def t2_so_imm_neg_XFORM : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(-((int)N->getZExtValue()), EVT::i32);
return CurDAG->getTargetConstant(-((int)N->getZExtValue()), MVT::i32);
}]>;
// t2_so_imm - Match a 32-bit immediate operand, which is an
@ -97,11 +97,11 @@ def imm0_65535 : PatLeaf<(i32 imm), [{
/// Split a 32-bit immediate into two 16 bit parts.
def t2_lo16 : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant((uint32_t)N->getZExtValue() & 0xffff,
EVT::i32);
MVT::i32);
}]>;
def t2_hi16 : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant((uint32_t)N->getZExtValue() >> 16, EVT::i32);
return CurDAG->getTargetConstant((uint32_t)N->getZExtValue() >> 16, MVT::i32);
}]>;
def t2_lo16AllZero : PatLeaf<(i32 imm), [{

View File

@ -38,7 +38,7 @@ public:
/// Code Generation virtual methods...
const TargetRegisterClass *
getPhysicalRegisterRegClass(unsigned Reg, EVT VT = EVT::Other) const;
getPhysicalRegisterRegClass(unsigned Reg, EVT VT = MVT::Other) const;
bool requiresRegisterScavenging(const MachineFunction &MF) const;

View File

@ -152,7 +152,7 @@ namespace {
/// getI64Imm - Return a target constant with the specified value, of type
/// i64.
inline SDValue getI64Imm(int64_t Imm) {
return CurDAG->getTargetConstant(Imm, EVT::i64);
return CurDAG->getTargetConstant(Imm, MVT::i64);
}
// Select - Convert the specified operand from a target-independent to a
@ -251,8 +251,8 @@ SDNode *AlphaDAGToDAGISel::Select(SDValue Op) {
case ISD::FrameIndex: {
int FI = cast<FrameIndexSDNode>(N)->getIndex();
return CurDAG->SelectNodeTo(N, Alpha::LDA, EVT::i64,
CurDAG->getTargetFrameIndex(FI, EVT::i32),
return CurDAG->SelectNodeTo(N, Alpha::LDA, MVT::i64,
CurDAG->getTargetFrameIndex(FI, MVT::i32),
getI64Imm(0));
}
case ISD::GLOBAL_OFFSET_TABLE:
@ -272,16 +272,16 @@ SDNode *AlphaDAGToDAGISel::Select(SDValue Op) {
Chain = CurDAG->getCopyToReg(Chain, dl, Alpha::R27, N0,
Chain.getValue(1));
SDNode *CNode =
CurDAG->getTargetNode(Alpha::JSRs, dl, EVT::Other, EVT::Flag,
CurDAG->getTargetNode(Alpha::JSRs, dl, MVT::Other, MVT::Flag,
Chain, Chain.getValue(1));
Chain = CurDAG->getCopyFromReg(Chain, dl, Alpha::R27, EVT::i64,
Chain = CurDAG->getCopyFromReg(Chain, dl, Alpha::R27, MVT::i64,
SDValue(CNode, 1));
return CurDAG->SelectNodeTo(N, Alpha::BISr, EVT::i64, Chain, Chain);
return CurDAG->SelectNodeTo(N, Alpha::BISr, MVT::i64, Chain, Chain);
}
case ISD::READCYCLECOUNTER: {
SDValue Chain = N->getOperand(0);
return CurDAG->getTargetNode(Alpha::RPCC, dl, EVT::i64, EVT::Other,
return CurDAG->getTargetNode(Alpha::RPCC, dl, MVT::i64, MVT::Other,
Chain);
}
@ -290,7 +290,7 @@ SDNode *AlphaDAGToDAGISel::Select(SDValue Op) {
if (uval == 0) {
SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
Alpha::R31, EVT::i64);
Alpha::R31, MVT::i64);
ReplaceUses(Op, Result);
return NULL;
}
@ -306,17 +306,17 @@ SDNode *AlphaDAGToDAGISel::Select(SDValue Op) {
break; //(zext (LDAH (LDA)))
//Else use the constant pool
ConstantInt *C = ConstantInt::get(Type::Int64Ty, uval);
SDValue CPI = CurDAG->getTargetConstantPool(C, EVT::i64);
SDNode *Tmp = CurDAG->getTargetNode(Alpha::LDAHr, dl, EVT::i64, CPI,
SDValue CPI = CurDAG->getTargetConstantPool(C, MVT::i64);
SDNode *Tmp = CurDAG->getTargetNode(Alpha::LDAHr, dl, MVT::i64, CPI,
SDValue(getGlobalBaseReg(), 0));
return CurDAG->SelectNodeTo(N, Alpha::LDQr, EVT::i64, EVT::Other,
return CurDAG->SelectNodeTo(N, Alpha::LDQr, MVT::i64, MVT::Other,
CPI, SDValue(Tmp, 0), CurDAG->getEntryNode());
}
case ISD::TargetConstantFP:
case ISD::ConstantFP: {
ConstantFPSDNode *CN = cast<ConstantFPSDNode>(N);
bool isDouble = N->getValueType(0) == EVT::f64;
EVT T = isDouble ? EVT::f64 : EVT::f32;
bool isDouble = N->getValueType(0) == MVT::f64;
EVT T = isDouble ? MVT::f64 : MVT::f32;
if (CN->getValueAPF().isPosZero()) {
return CurDAG->SelectNodeTo(N, isDouble ? Alpha::CPYST : Alpha::CPYSS,
T, CurDAG->getRegister(Alpha::F31, T),
@ -359,18 +359,18 @@ SDNode *AlphaDAGToDAGISel::Select(SDValue Op) {
};
SDValue tmp1 = N->getOperand(rev?1:0);
SDValue tmp2 = N->getOperand(rev?0:1);
SDNode *cmp = CurDAG->getTargetNode(Opc, dl, EVT::f64, tmp1, tmp2);
SDNode *cmp = CurDAG->getTargetNode(Opc, dl, MVT::f64, tmp1, tmp2);
if (inv)
cmp = CurDAG->getTargetNode(Alpha::CMPTEQ, dl,
EVT::f64, SDValue(cmp, 0),
CurDAG->getRegister(Alpha::F31, EVT::f64));
MVT::f64, SDValue(cmp, 0),
CurDAG->getRegister(Alpha::F31, MVT::f64));
switch(CC) {
case ISD::SETUEQ: case ISD::SETULT: case ISD::SETULE:
case ISD::SETUNE: case ISD::SETUGT: case ISD::SETUGE:
{
SDNode* cmp2 = CurDAG->getTargetNode(Alpha::CMPTUN, dl, EVT::f64,
SDNode* cmp2 = CurDAG->getTargetNode(Alpha::CMPTUN, dl, MVT::f64,
tmp1, tmp2);
cmp = CurDAG->getTargetNode(Alpha::ADDT, dl, EVT::f64,
cmp = CurDAG->getTargetNode(Alpha::ADDT, dl, MVT::f64,
SDValue(cmp2, 0), SDValue(cmp, 0));
break;
}
@ -378,9 +378,9 @@ SDNode *AlphaDAGToDAGISel::Select(SDValue Op) {
}
SDNode* LD = CurDAG->getTargetNode(Alpha::FTOIT, dl,
EVT::i64, SDValue(cmp, 0));
return CurDAG->getTargetNode(Alpha::CMPULT, dl, EVT::i64,
CurDAG->getRegister(Alpha::R31, EVT::i64),
MVT::i64, SDValue(cmp, 0));
return CurDAG->getTargetNode(Alpha::CMPULT, dl, MVT::i64,
CurDAG->getRegister(Alpha::R31, MVT::i64),
SDValue(LD,0));
}
break;
@ -406,10 +406,10 @@ SDNode *AlphaDAGToDAGISel::Select(SDValue Op) {
if (get_zapImm(mask)) {
SDValue Z =
SDValue(CurDAG->getTargetNode(Alpha::ZAPNOTi, dl, EVT::i64,
SDValue(CurDAG->getTargetNode(Alpha::ZAPNOTi, dl, MVT::i64,
N->getOperand(0).getOperand(0),
getI64Imm(get_zapImm(mask))), 0);
return CurDAG->getTargetNode(Alpha::SRLr, dl, EVT::i64, Z,
return CurDAG->getTargetNode(Alpha::SRLr, dl, MVT::i64, Z,
getI64Imm(sval));
}
}
@ -434,14 +434,14 @@ void AlphaDAGToDAGISel::SelectCALL(SDValue Op) {
SDValue GOT = SDValue(getGlobalBaseReg(), 0);
Chain = CurDAG->getCopyToReg(Chain, dl, Alpha::R29, GOT, InFlag);
InFlag = Chain.getValue(1);
Chain = SDValue(CurDAG->getTargetNode(Alpha::BSR, dl, EVT::Other,
EVT::Flag, Addr.getOperand(0),
Chain = SDValue(CurDAG->getTargetNode(Alpha::BSR, dl, MVT::Other,
MVT::Flag, Addr.getOperand(0),
Chain, InFlag), 0);
} else {
Chain = CurDAG->getCopyToReg(Chain, dl, Alpha::R27, Addr, InFlag);
InFlag = Chain.getValue(1);
Chain = SDValue(CurDAG->getTargetNode(Alpha::JSR, dl, EVT::Other,
EVT::Flag, Chain, InFlag), 0);
Chain = SDValue(CurDAG->getTargetNode(Alpha::JSR, dl, MVT::Other,
MVT::Flag, Chain, InFlag), 0);
}
InFlag = Chain.getValue(1);

View File

@ -46,114 +46,114 @@ AlphaTargetLowering::AlphaTargetLowering(TargetMachine &TM)
: TargetLowering(TM, new TargetLoweringObjectFileELF()) {
// Set up the TargetLowering object.
//I am having problems with shr n i8 1
setShiftAmountType(EVT::i64);
setShiftAmountType(MVT::i64);
setBooleanContents(ZeroOrOneBooleanContent);
setUsesGlobalOffsetTable(true);
addRegisterClass(EVT::i64, Alpha::GPRCRegisterClass);
addRegisterClass(EVT::f64, Alpha::F8RCRegisterClass);
addRegisterClass(EVT::f32, Alpha::F4RCRegisterClass);
addRegisterClass(MVT::i64, Alpha::GPRCRegisterClass);
addRegisterClass(MVT::f64, Alpha::F8RCRegisterClass);
addRegisterClass(MVT::f32, Alpha::F4RCRegisterClass);
// We want to custom lower some of our intrinsics.
setOperationAction(ISD::INTRINSIC_WO_CHAIN, EVT::Other, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
setLoadExtAction(ISD::EXTLOAD, EVT::i1, Promote);
setLoadExtAction(ISD::EXTLOAD, EVT::f32, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
setLoadExtAction(ISD::ZEXTLOAD, EVT::i1, Promote);
setLoadExtAction(ISD::ZEXTLOAD, EVT::i32, Expand);
setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
setLoadExtAction(ISD::ZEXTLOAD, MVT::i32, Expand);
setLoadExtAction(ISD::SEXTLOAD, EVT::i1, Promote);
setLoadExtAction(ISD::SEXTLOAD, EVT::i8, Expand);
setLoadExtAction(ISD::SEXTLOAD, EVT::i16, Expand);
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
setLoadExtAction(ISD::SEXTLOAD, MVT::i16, Expand);
setTruncStoreAction(EVT::f64, EVT::f32, Expand);
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
// setOperationAction(ISD::BRIND, EVT::Other, Expand);
setOperationAction(ISD::BR_JT, EVT::Other, Expand);
setOperationAction(ISD::BR_CC, EVT::Other, Expand);
setOperationAction(ISD::SELECT_CC, EVT::Other, Expand);
// setOperationAction(ISD::BRIND, MVT::Other, Expand);
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
setOperationAction(ISD::BR_CC, MVT::Other, Expand);
setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i1, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
setOperationAction(ISD::FREM, EVT::f32, Expand);
setOperationAction(ISD::FREM, EVT::f64, Expand);
setOperationAction(ISD::FREM, MVT::f32, Expand);
setOperationAction(ISD::FREM, MVT::f64, Expand);
setOperationAction(ISD::UINT_TO_FP, EVT::i64, Expand);
setOperationAction(ISD::SINT_TO_FP, EVT::i64, Custom);
setOperationAction(ISD::FP_TO_UINT, EVT::i64, Expand);
setOperationAction(ISD::FP_TO_SINT, EVT::i64, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
if (!TM.getSubtarget<AlphaSubtarget>().hasCT()) {
setOperationAction(ISD::CTPOP , EVT::i64 , Expand);
setOperationAction(ISD::CTTZ , EVT::i64 , Expand);
setOperationAction(ISD::CTLZ , EVT::i64 , Expand);
setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
setOperationAction(ISD::CTTZ , MVT::i64 , Expand);
setOperationAction(ISD::CTLZ , MVT::i64 , Expand);
}
setOperationAction(ISD::BSWAP , EVT::i64, Expand);
setOperationAction(ISD::ROTL , EVT::i64, Expand);
setOperationAction(ISD::ROTR , EVT::i64, Expand);
setOperationAction(ISD::BSWAP , MVT::i64, Expand);
setOperationAction(ISD::ROTL , MVT::i64, Expand);
setOperationAction(ISD::ROTR , MVT::i64, Expand);
setOperationAction(ISD::SREM , EVT::i64, Custom);
setOperationAction(ISD::UREM , EVT::i64, Custom);
setOperationAction(ISD::SDIV , EVT::i64, Custom);
setOperationAction(ISD::UDIV , EVT::i64, Custom);
setOperationAction(ISD::SREM , MVT::i64, Custom);
setOperationAction(ISD::UREM , MVT::i64, Custom);
setOperationAction(ISD::SDIV , MVT::i64, Custom);
setOperationAction(ISD::UDIV , MVT::i64, Custom);
setOperationAction(ISD::ADDC , EVT::i64, Expand);
setOperationAction(ISD::ADDE , EVT::i64, Expand);
setOperationAction(ISD::SUBC , EVT::i64, Expand);
setOperationAction(ISD::SUBE , EVT::i64, Expand);
setOperationAction(ISD::ADDC , MVT::i64, Expand);
setOperationAction(ISD::ADDE , MVT::i64, Expand);
setOperationAction(ISD::SUBC , MVT::i64, Expand);
setOperationAction(ISD::SUBE , MVT::i64, Expand);
setOperationAction(ISD::UMUL_LOHI, EVT::i64, Expand);
setOperationAction(ISD::SMUL_LOHI, EVT::i64, Expand);
setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
setOperationAction(ISD::SRL_PARTS, EVT::i64, Custom);
setOperationAction(ISD::SRA_PARTS, EVT::i64, Expand);
setOperationAction(ISD::SHL_PARTS, EVT::i64, Expand);
setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
// We don't support sin/cos/sqrt/pow
setOperationAction(ISD::FSIN , EVT::f64, Expand);
setOperationAction(ISD::FCOS , EVT::f64, Expand);
setOperationAction(ISD::FSIN , EVT::f32, Expand);
setOperationAction(ISD::FCOS , EVT::f32, Expand);
setOperationAction(ISD::FSIN , MVT::f64, Expand);
setOperationAction(ISD::FCOS , MVT::f64, Expand);
setOperationAction(ISD::FSIN , MVT::f32, Expand);
setOperationAction(ISD::FCOS , MVT::f32, Expand);
setOperationAction(ISD::FSQRT, EVT::f64, Expand);
setOperationAction(ISD::FSQRT, EVT::f32, Expand);
setOperationAction(ISD::FSQRT, MVT::f64, Expand);
setOperationAction(ISD::FSQRT, MVT::f32, Expand);
setOperationAction(ISD::FPOW , EVT::f32, Expand);
setOperationAction(ISD::FPOW , EVT::f64, Expand);
setOperationAction(ISD::FPOW , MVT::f32, Expand);
setOperationAction(ISD::FPOW , MVT::f64, Expand);
setOperationAction(ISD::SETCC, EVT::f32, Promote);
setOperationAction(ISD::SETCC, MVT::f32, Promote);
setOperationAction(ISD::BIT_CONVERT, EVT::f32, Promote);
setOperationAction(ISD::BIT_CONVERT, MVT::f32, Promote);
// We don't have line number support yet.
setOperationAction(ISD::DBG_STOPPOINT, EVT::Other, Expand);
setOperationAction(ISD::DEBUG_LOC, EVT::Other, Expand);
setOperationAction(ISD::DBG_LABEL, EVT::Other, Expand);
setOperationAction(ISD::EH_LABEL, EVT::Other, Expand);
setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
setOperationAction(ISD::DBG_LABEL, MVT::Other, Expand);
setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
// Not implemented yet.
setOperationAction(ISD::STACKSAVE, EVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, EVT::Other, Expand);
setOperationAction(ISD::DYNAMIC_STACKALLOC, EVT::i64, Expand);
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
// We want to legalize GlobalAddress and ConstantPool and
// ExternalSymbols nodes into the appropriate instructions to
// materialize the address.
setOperationAction(ISD::GlobalAddress, EVT::i64, Custom);
setOperationAction(ISD::ConstantPool, EVT::i64, Custom);
setOperationAction(ISD::ExternalSymbol, EVT::i64, Custom);
setOperationAction(ISD::GlobalTLSAddress, EVT::i64, Custom);
setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
setOperationAction(ISD::ExternalSymbol, MVT::i64, Custom);
setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
setOperationAction(ISD::VASTART, EVT::Other, Custom);
setOperationAction(ISD::VAEND, EVT::Other, Expand);
setOperationAction(ISD::VACOPY, EVT::Other, Custom);
setOperationAction(ISD::VAARG, EVT::Other, Custom);
setOperationAction(ISD::VAARG, EVT::i32, Custom);
setOperationAction(ISD::VASTART, MVT::Other, Custom);
setOperationAction(ISD::VAEND, MVT::Other, Expand);
setOperationAction(ISD::VACOPY, MVT::Other, Custom);
setOperationAction(ISD::VAARG, MVT::Other, Custom);
setOperationAction(ISD::VAARG, MVT::i32, Custom);
setOperationAction(ISD::JumpTable, EVT::i64, Custom);
setOperationAction(ISD::JumpTable, EVT::i32, Custom);
setOperationAction(ISD::JumpTable, MVT::i64, Custom);
setOperationAction(ISD::JumpTable, MVT::i32, Custom);
setStackPointerRegisterToSaveRestore(Alpha::R30);
@ -168,8 +168,8 @@ AlphaTargetLowering::AlphaTargetLowering(TargetMachine &TM)
computeRegisterProperties();
}
EVT::SimpleValueType AlphaTargetLowering::getSetCCResultType(EVT VT) const {
return EVT::i64;
MVT::SimpleValueType AlphaTargetLowering::getSetCCResultType(EVT VT) const {
return MVT::i64;
}
const char *AlphaTargetLowering::getTargetNodeName(unsigned Opcode) const {
@ -203,9 +203,9 @@ static SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) {
// FIXME there isn't really any debug info here
DebugLoc dl = Op.getDebugLoc();
SDValue Hi = DAG.getNode(AlphaISD::GPRelHi, dl, EVT::i64, JTI,
DAG.getGLOBAL_OFFSET_TABLE(EVT::i64));
SDValue Lo = DAG.getNode(AlphaISD::GPRelLo, dl, EVT::i64, JTI, Hi);
SDValue Hi = DAG.getNode(AlphaISD::GPRelHi, dl, MVT::i64, JTI,
DAG.getGLOBAL_OFFSET_TABLE(MVT::i64));
SDValue Lo = DAG.getNode(AlphaISD::GPRelLo, dl, MVT::i64, JTI, Hi);
return Lo;
}
@ -285,7 +285,7 @@ AlphaTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
assert(VA.isMemLoc());
if (StackPtr.getNode() == 0)
StackPtr = DAG.getCopyFromReg(Chain, dl, Alpha::R30, EVT::i64);
StackPtr = DAG.getCopyFromReg(Chain, dl, Alpha::R30, MVT::i64);
SDValue PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(),
StackPtr,
@ -299,7 +299,7 @@ AlphaTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Transform all store nodes into one single node because all store nodes are
// independent of each other.
if (!MemOpChains.empty())
Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&MemOpChains[0], MemOpChains.size());
// Build a sequence of copy-to-reg nodes chained together with token chain and
@ -313,7 +313,7 @@ AlphaTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
}
// Returns a chain & a flag for retval copy to use.
SDVTList NodeTys = DAG.getVTList(EVT::Other, EVT::Flag);
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
SmallVector<SDValue, 8> Ops;
Ops.push_back(Chain);
Ops.push_back(Callee);
@ -410,23 +410,23 @@ AlphaTargetLowering::LowerFormalArguments(SDValue Chain,
SDValue ArgVal;
if (ArgNo < 6) {
switch (ObjectVT.getSimpleVT()) {
switch (ObjectVT.getSimpleVT().SimpleTy) {
default:
assert(false && "Invalid value type!");
case EVT::f64:
case MVT::f64:
args_float[ArgNo] = AddLiveIn(MF, args_float[ArgNo],
&Alpha::F8RCRegClass);
ArgVal = DAG.getCopyFromReg(Chain, dl, args_float[ArgNo], ObjectVT);
break;
case EVT::f32:
case MVT::f32:
args_float[ArgNo] = AddLiveIn(MF, args_float[ArgNo],
&Alpha::F4RCRegClass);
ArgVal = DAG.getCopyFromReg(Chain, dl, args_float[ArgNo], ObjectVT);
break;
case EVT::i64:
case MVT::i64:
args_int[ArgNo] = AddLiveIn(MF, args_int[ArgNo],
&Alpha::GPRCRegClass);
ArgVal = DAG.getCopyFromReg(Chain, dl, args_int[ArgNo], EVT::i64);
ArgVal = DAG.getCopyFromReg(Chain, dl, args_int[ArgNo], MVT::i64);
break;
}
} else { //more args
@ -435,7 +435,7 @@ AlphaTargetLowering::LowerFormalArguments(SDValue Chain,
// Create the SelectionDAG nodes corresponding to a load
//from this parameter
SDValue FIN = DAG.getFrameIndex(FI, EVT::i64);
SDValue FIN = DAG.getFrameIndex(FI, MVT::i64);
ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, NULL, 0);
}
InVals.push_back(ArgVal);
@ -448,22 +448,22 @@ AlphaTargetLowering::LowerFormalArguments(SDValue Chain,
for (int i = 0; i < 6; ++i) {
if (TargetRegisterInfo::isPhysicalRegister(args_int[i]))
args_int[i] = AddLiveIn(MF, args_int[i], &Alpha::GPRCRegClass);
SDValue argt = DAG.getCopyFromReg(Chain, dl, args_int[i], EVT::i64);
SDValue argt = DAG.getCopyFromReg(Chain, dl, args_int[i], MVT::i64);
int FI = MFI->CreateFixedObject(8, -8 * (6 - i));
if (i == 0) VarArgsBase = FI;
SDValue SDFI = DAG.getFrameIndex(FI, EVT::i64);
SDValue SDFI = DAG.getFrameIndex(FI, MVT::i64);
LS.push_back(DAG.getStore(Chain, dl, argt, SDFI, NULL, 0));
if (TargetRegisterInfo::isPhysicalRegister(args_float[i]))
args_float[i] = AddLiveIn(MF, args_float[i], &Alpha::F8RCRegClass);
argt = DAG.getCopyFromReg(Chain, dl, args_float[i], EVT::f64);
argt = DAG.getCopyFromReg(Chain, dl, args_float[i], MVT::f64);
FI = MFI->CreateFixedObject(8, - 8 * (12 - i));
SDFI = DAG.getFrameIndex(FI, EVT::i64);
SDFI = DAG.getFrameIndex(FI, MVT::i64);
LS.push_back(DAG.getStore(Chain, dl, argt, SDFI, NULL, 0));
}
//Set up a token factor with all the stack traffic
Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other, &LS[0], LS.size());
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &LS[0], LS.size());
}
return Chain;
@ -478,7 +478,7 @@ AlphaTargetLowering::LowerReturn(SDValue Chain,
SDValue Copy = DAG.getCopyToReg(Chain, dl, Alpha::R26,
DAG.getNode(AlphaISD::GlobalRetAddr,
DebugLoc::getUnknownLoc(),
EVT::i64),
MVT::i64),
SDValue());
switch (Outs.size()) {
default:
@ -528,7 +528,7 @@ AlphaTargetLowering::LowerReturn(SDValue Chain,
}
}
return DAG.getNode(AlphaISD::RET_FLAG, dl,
EVT::Other, Copy, Copy.getValue(1));
MVT::Other, Copy, Copy.getValue(1));
}
void AlphaTargetLowering::LowerVAARG(SDNode *N, SDValue &Chain,
@ -538,26 +538,26 @@ void AlphaTargetLowering::LowerVAARG(SDNode *N, SDValue &Chain,
const Value *VAListS = cast<SrcValueSDNode>(N->getOperand(2))->getValue();
DebugLoc dl = N->getDebugLoc();
SDValue Base = DAG.getLoad(EVT::i64, dl, Chain, VAListP, VAListS, 0);
SDValue Tmp = DAG.getNode(ISD::ADD, dl, EVT::i64, VAListP,
DAG.getConstant(8, EVT::i64));
SDValue Offset = DAG.getExtLoad(ISD::SEXTLOAD, dl, EVT::i64, Base.getValue(1),
Tmp, NULL, 0, EVT::i32);
DataPtr = DAG.getNode(ISD::ADD, dl, EVT::i64, Base, Offset);
SDValue Base = DAG.getLoad(MVT::i64, dl, Chain, VAListP, VAListS, 0);
SDValue Tmp = DAG.getNode(ISD::ADD, dl, MVT::i64, VAListP,
DAG.getConstant(8, MVT::i64));
SDValue Offset = DAG.getExtLoad(ISD::SEXTLOAD, dl, MVT::i64, Base.getValue(1),
Tmp, NULL, 0, MVT::i32);
DataPtr = DAG.getNode(ISD::ADD, dl, MVT::i64, Base, Offset);
if (N->getValueType(0).isFloatingPoint())
{
//if fp && Offset < 6*8, then subtract 6*8 from DataPtr
SDValue FPDataPtr = DAG.getNode(ISD::SUB, dl, EVT::i64, DataPtr,
DAG.getConstant(8*6, EVT::i64));
SDValue CC = DAG.getSetCC(dl, EVT::i64, Offset,
DAG.getConstant(8*6, EVT::i64), ISD::SETLT);
DataPtr = DAG.getNode(ISD::SELECT, dl, EVT::i64, CC, FPDataPtr, DataPtr);
SDValue FPDataPtr = DAG.getNode(ISD::SUB, dl, MVT::i64, DataPtr,
DAG.getConstant(8*6, MVT::i64));
SDValue CC = DAG.getSetCC(dl, MVT::i64, Offset,
DAG.getConstant(8*6, MVT::i64), ISD::SETLT);
DataPtr = DAG.getNode(ISD::SELECT, dl, MVT::i64, CC, FPDataPtr, DataPtr);
}
SDValue NewOffset = DAG.getNode(ISD::ADD, dl, EVT::i64, Offset,
DAG.getConstant(8, EVT::i64));
SDValue NewOffset = DAG.getNode(ISD::ADD, dl, MVT::i64, Offset,
DAG.getConstant(8, MVT::i64));
Chain = DAG.getTruncStore(Offset.getValue(1), dl, NewOffset, Tmp, NULL, 0,
EVT::i32);
MVT::i32);
}
/// LowerOperation - Provide custom lowering hooks for some operations.
@ -573,7 +573,7 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
switch (IntNo) {
default: break; // Don't custom lower most intrinsics.
case Intrinsic::alpha_umulh:
return DAG.getNode(ISD::MULHU, dl, EVT::i64,
return DAG.getNode(ISD::MULHU, dl, MVT::i64,
Op.getOperand(1), Op.getOperand(2));
}
}
@ -582,23 +582,23 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
SDValue ShOpLo = Op.getOperand(0);
SDValue ShOpHi = Op.getOperand(1);
SDValue ShAmt = Op.getOperand(2);
SDValue bm = DAG.getNode(ISD::SUB, dl, EVT::i64,
DAG.getConstant(64, EVT::i64), ShAmt);
SDValue BMCC = DAG.getSetCC(dl, EVT::i64, bm,
DAG.getConstant(0, EVT::i64), ISD::SETLE);
SDValue bm = DAG.getNode(ISD::SUB, dl, MVT::i64,
DAG.getConstant(64, MVT::i64), ShAmt);
SDValue BMCC = DAG.getSetCC(dl, MVT::i64, bm,
DAG.getConstant(0, MVT::i64), ISD::SETLE);
// if 64 - shAmt <= 0
SDValue Hi_Neg = DAG.getConstant(0, EVT::i64);
SDValue ShAmt_Neg = DAG.getNode(ISD::SUB, dl, EVT::i64,
DAG.getConstant(0, EVT::i64), bm);
SDValue Lo_Neg = DAG.getNode(ISD::SRL, dl, EVT::i64, ShOpHi, ShAmt_Neg);
SDValue Hi_Neg = DAG.getConstant(0, MVT::i64);
SDValue ShAmt_Neg = DAG.getNode(ISD::SUB, dl, MVT::i64,
DAG.getConstant(0, MVT::i64), bm);
SDValue Lo_Neg = DAG.getNode(ISD::SRL, dl, MVT::i64, ShOpHi, ShAmt_Neg);
// else
SDValue carries = DAG.getNode(ISD::SHL, dl, EVT::i64, ShOpHi, bm);
SDValue Hi_Pos = DAG.getNode(ISD::SRL, dl, EVT::i64, ShOpHi, ShAmt);
SDValue Lo_Pos = DAG.getNode(ISD::SRL, dl, EVT::i64, ShOpLo, ShAmt);
Lo_Pos = DAG.getNode(ISD::OR, dl, EVT::i64, Lo_Pos, carries);
SDValue carries = DAG.getNode(ISD::SHL, dl, MVT::i64, ShOpHi, bm);
SDValue Hi_Pos = DAG.getNode(ISD::SRL, dl, MVT::i64, ShOpHi, ShAmt);
SDValue Lo_Pos = DAG.getNode(ISD::SRL, dl, MVT::i64, ShOpLo, ShAmt);
Lo_Pos = DAG.getNode(ISD::OR, dl, MVT::i64, Lo_Pos, carries);
// Merge
SDValue Hi = DAG.getNode(ISD::SELECT, dl, EVT::i64, BMCC, Hi_Neg, Hi_Pos);
SDValue Lo = DAG.getNode(ISD::SELECT, dl, EVT::i64, BMCC, Lo_Neg, Lo_Pos);
SDValue Hi = DAG.getNode(ISD::SELECT, dl, MVT::i64, BMCC, Hi_Neg, Hi_Pos);
SDValue Lo = DAG.getNode(ISD::SELECT, dl, MVT::i64, BMCC, Lo_Neg, Lo_Pos);
SDValue Ops[2] = { Lo, Hi };
return DAG.getMergeValues(Ops, 2, dl);
}
@ -608,35 +608,35 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
case ISD::SINT_TO_FP: {
assert(Op.getOperand(0).getValueType() == EVT::i64 &&
assert(Op.getOperand(0).getValueType() == MVT::i64 &&
"Unhandled SINT_TO_FP type in custom expander!");
SDValue LD;
bool isDouble = Op.getValueType() == EVT::f64;
LD = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::f64, Op.getOperand(0));
bool isDouble = Op.getValueType() == MVT::f64;
LD = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, Op.getOperand(0));
SDValue FP = DAG.getNode(isDouble?AlphaISD::CVTQT_:AlphaISD::CVTQS_, dl,
isDouble?EVT::f64:EVT::f32, LD);
isDouble?MVT::f64:MVT::f32, LD);
return FP;
}
case ISD::FP_TO_SINT: {
bool isDouble = Op.getOperand(0).getValueType() == EVT::f64;
bool isDouble = Op.getOperand(0).getValueType() == MVT::f64;
SDValue src = Op.getOperand(0);
if (!isDouble) //Promote
src = DAG.getNode(ISD::FP_EXTEND, dl, EVT::f64, src);
src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, src);
src = DAG.getNode(AlphaISD::CVTTQ_, dl, EVT::f64, src);
src = DAG.getNode(AlphaISD::CVTTQ_, dl, MVT::f64, src);
return DAG.getNode(ISD::BIT_CONVERT, dl, EVT::i64, src);
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, src);
}
case ISD::ConstantPool: {
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
Constant *C = CP->getConstVal();
SDValue CPI = DAG.getTargetConstantPool(C, EVT::i64, CP->getAlignment());
SDValue CPI = DAG.getTargetConstantPool(C, MVT::i64, CP->getAlignment());
// FIXME there isn't really any debug info here
SDValue Hi = DAG.getNode(AlphaISD::GPRelHi, dl, EVT::i64, CPI,
DAG.getGLOBAL_OFFSET_TABLE(EVT::i64));
SDValue Lo = DAG.getNode(AlphaISD::GPRelLo, dl, EVT::i64, CPI, Hi);
SDValue Hi = DAG.getNode(AlphaISD::GPRelHi, dl, MVT::i64, CPI,
DAG.getGLOBAL_OFFSET_TABLE(MVT::i64));
SDValue Lo = DAG.getNode(AlphaISD::GPRelLo, dl, MVT::i64, CPI, Hi);
return Lo;
}
case ISD::GlobalTLSAddress:
@ -644,24 +644,24 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
case ISD::GlobalAddress: {
GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
GlobalValue *GV = GSDN->getGlobal();
SDValue GA = DAG.getTargetGlobalAddress(GV, EVT::i64, GSDN->getOffset());
SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i64, GSDN->getOffset());
// FIXME there isn't really any debug info here
// if (!GV->hasWeakLinkage() && !GV->isDeclaration() && !GV->hasLinkOnceLinkage()) {
if (GV->hasLocalLinkage()) {
SDValue Hi = DAG.getNode(AlphaISD::GPRelHi, dl, EVT::i64, GA,
DAG.getGLOBAL_OFFSET_TABLE(EVT::i64));
SDValue Lo = DAG.getNode(AlphaISD::GPRelLo, dl, EVT::i64, GA, Hi);
SDValue Hi = DAG.getNode(AlphaISD::GPRelHi, dl, MVT::i64, GA,
DAG.getGLOBAL_OFFSET_TABLE(MVT::i64));
SDValue Lo = DAG.getNode(AlphaISD::GPRelLo, dl, MVT::i64, GA, Hi);
return Lo;
} else
return DAG.getNode(AlphaISD::RelLit, dl, EVT::i64, GA,
DAG.getGLOBAL_OFFSET_TABLE(EVT::i64));
return DAG.getNode(AlphaISD::RelLit, dl, MVT::i64, GA,
DAG.getGLOBAL_OFFSET_TABLE(MVT::i64));
}
case ISD::ExternalSymbol: {
return DAG.getNode(AlphaISD::RelLit, dl, EVT::i64,
return DAG.getNode(AlphaISD::RelLit, dl, MVT::i64,
DAG.getTargetExternalSymbol(cast<ExternalSymbolSDNode>(Op)
->getSymbol(), EVT::i64),
DAG.getGLOBAL_OFFSET_TABLE(EVT::i64));
->getSymbol(), MVT::i64),
DAG.getGLOBAL_OFFSET_TABLE(MVT::i64));
}
case ISD::UREM:
@ -692,8 +692,8 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
}
SDValue Tmp1 = Op.getOperand(0),
Tmp2 = Op.getOperand(1),
Addr = DAG.getExternalSymbol(opstr, EVT::i64);
return DAG.getNode(AlphaISD::DivCall, dl, EVT::i64, Addr, Tmp1, Tmp2);
Addr = DAG.getExternalSymbol(opstr, MVT::i64);
return DAG.getNode(AlphaISD::DivCall, dl, MVT::i64, Addr, Tmp1, Tmp2);
}
break;
@ -702,9 +702,9 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
LowerVAARG(Op.getNode(), Chain, DataPtr, DAG);
SDValue Result;
if (Op.getValueType() == EVT::i32)
Result = DAG.getExtLoad(ISD::SEXTLOAD, dl, EVT::i64, Chain, DataPtr,
NULL, 0, EVT::i32);
if (Op.getValueType() == MVT::i32)
Result = DAG.getExtLoad(ISD::SEXTLOAD, dl, MVT::i64, Chain, DataPtr,
NULL, 0, MVT::i32);
else
Result = DAG.getLoad(Op.getValueType(), dl, Chain, DataPtr, NULL, 0);
return Result;
@ -718,13 +718,13 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
SDValue Val = DAG.getLoad(getPointerTy(), dl, Chain, SrcP, SrcS, 0);
SDValue Result = DAG.getStore(Val.getValue(1), dl, Val, DestP, DestS, 0);
SDValue NP = DAG.getNode(ISD::ADD, dl, EVT::i64, SrcP,
DAG.getConstant(8, EVT::i64));
Val = DAG.getExtLoad(ISD::SEXTLOAD, dl, EVT::i64, Result,
NP, NULL,0, EVT::i32);
SDValue NPD = DAG.getNode(ISD::ADD, dl, EVT::i64, DestP,
DAG.getConstant(8, EVT::i64));
return DAG.getTruncStore(Val.getValue(1), dl, Val, NPD, NULL, 0, EVT::i32);
SDValue NP = DAG.getNode(ISD::ADD, dl, MVT::i64, SrcP,
DAG.getConstant(8, MVT::i64));
Val = DAG.getExtLoad(ISD::SEXTLOAD, dl, MVT::i64, Result,
NP, NULL,0, MVT::i32);
SDValue NPD = DAG.getNode(ISD::ADD, dl, MVT::i64, DestP,
DAG.getConstant(8, MVT::i64));
return DAG.getTruncStore(Val.getValue(1), dl, Val, NPD, NULL, 0, MVT::i32);
}
case ISD::VASTART: {
SDValue Chain = Op.getOperand(0);
@ -732,16 +732,16 @@ SDValue AlphaTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
const Value *VAListS = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
// vastart stores the address of the VarArgsBase and VarArgsOffset
SDValue FR = DAG.getFrameIndex(VarArgsBase, EVT::i64);
SDValue FR = DAG.getFrameIndex(VarArgsBase, MVT::i64);
SDValue S1 = DAG.getStore(Chain, dl, FR, VAListP, VAListS, 0);
SDValue SA2 = DAG.getNode(ISD::ADD, dl, EVT::i64, VAListP,
DAG.getConstant(8, EVT::i64));
return DAG.getTruncStore(S1, dl, DAG.getConstant(VarArgsOffset, EVT::i64),
SA2, NULL, 0, EVT::i32);
SDValue SA2 = DAG.getNode(ISD::ADD, dl, MVT::i64, VAListP,
DAG.getConstant(8, MVT::i64));
return DAG.getTruncStore(S1, dl, DAG.getConstant(VarArgsOffset, MVT::i64),
SA2, NULL, 0, MVT::i32);
}
case ISD::RETURNADDR:
return DAG.getNode(AlphaISD::GlobalRetAddr, DebugLoc::getUnknownLoc(),
EVT::i64);
MVT::i64);
//FIXME: implement
case ISD::FRAMEADDR: break;
}
@ -753,7 +753,7 @@ void AlphaTargetLowering::ReplaceNodeResults(SDNode *N,
SmallVectorImpl<SDValue>&Results,
SelectionDAG &DAG) {
DebugLoc dl = N->getDebugLoc();
assert(N->getValueType(0) == EVT::i32 &&
assert(N->getValueType(0) == MVT::i32 &&
N->getOpcode() == ISD::VAARG &&
"Unknown node to custom promote!");

View File

@ -67,7 +67,7 @@ namespace llvm {
explicit AlphaTargetLowering(TargetMachine &TM);
/// getSetCCResultType - Get the SETCC result ValueType
virtual EVT::SimpleValueType getSetCCResultType(EVT VT) const;
virtual MVT::SimpleValueType getSetCCResultType(EVT VT) const;
/// LowerOperation - Provide custom lowering hooks for some operations.
///

View File

@ -94,9 +94,9 @@ SDNode *BlackfinDAGToDAGISel::Select(SDValue Op) {
// Selects to ADDpp FI, 0 which in turn will become ADDimm7 SP, imm or ADDpp
// SP, Px
int FI = cast<FrameIndexSDNode>(N)->getIndex();
SDValue TFI = CurDAG->getTargetFrameIndex(FI, EVT::i32);
return CurDAG->SelectNodeTo(N, BF::ADDpp, EVT::i32, TFI,
CurDAG->getTargetConstant(0, EVT::i32));
SDValue TFI = CurDAG->getTargetFrameIndex(FI, MVT::i32);
return CurDAG->SelectNodeTo(N, BF::ADDpp, MVT::i32, TFI,
CurDAG->getTargetConstant(0, MVT::i32));
}
}
@ -109,8 +109,8 @@ bool BlackfinDAGToDAGISel::SelectADDRspii(SDValue Op,
SDValue &Offset) {
FrameIndexSDNode *FIN = 0;
if ((FIN = dyn_cast<FrameIndexSDNode>(Addr))) {
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), EVT::i32);
Offset = CurDAG->getTargetConstant(0, EVT::i32);
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
Offset = CurDAG->getTargetConstant(0, MVT::i32);
return true;
}
if (Addr.getOpcode() == ISD::ADD) {
@ -119,8 +119,8 @@ bool BlackfinDAGToDAGISel::SelectADDRspii(SDValue Op,
(CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1))) &&
(CN->getSExtValue() % 4 == 0 && CN->getSExtValue() >= 0)) {
// Constant positive word offset from frame index
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), EVT::i32);
Offset = CurDAG->getTargetConstant(CN->getSExtValue(), EVT::i32);
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
Offset = CurDAG->getTargetConstant(CN->getSExtValue(), MVT::i32);
return true;
}
}
@ -179,9 +179,9 @@ void BlackfinDAGToDAGISel::FixRegisterClasses(SelectionDAG &DAG) {
SDNode *Copy =
DAG.getTargetNode(TargetInstrInfo::COPY_TO_REGCLASS,
NI->getDebugLoc(),
EVT::i32,
MVT::i32,
UI.getUse().get(),
DAG.getTargetConstant(BF::DRegClassID, EVT::i32));
DAG.getTargetConstant(BF::DRegClassID, MVT::i32));
UpdateNodeOperand(DAG, *UI, UI.getOperandNo(), SDValue(Copy, 0));
}
}

View File

@ -40,92 +40,92 @@ using namespace llvm;
BlackfinTargetLowering::BlackfinTargetLowering(TargetMachine &TM)
: TargetLowering(TM, new TargetLoweringObjectFileELF()) {
setShiftAmountType(EVT::i16);
setShiftAmountType(MVT::i16);
setBooleanContents(ZeroOrOneBooleanContent);
setStackPointerRegisterToSaveRestore(BF::SP);
setIntDivIsCheap(false);
// Set up the legal register classes.
addRegisterClass(EVT::i32, BF::DRegisterClass);
addRegisterClass(EVT::i16, BF::D16RegisterClass);
addRegisterClass(MVT::i32, BF::DRegisterClass);
addRegisterClass(MVT::i16, BF::D16RegisterClass);
computeRegisterProperties();
// Blackfin doesn't have i1 loads or stores
setLoadExtAction(ISD::EXTLOAD, EVT::i1, Promote);
setLoadExtAction(ISD::ZEXTLOAD, EVT::i1, Promote);
setLoadExtAction(ISD::SEXTLOAD, EVT::i1, Promote);
setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
setOperationAction(ISD::GlobalAddress, EVT::i32, Custom);
setOperationAction(ISD::JumpTable, EVT::i32, Custom);
setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
setOperationAction(ISD::JumpTable, MVT::i32, Custom);
setOperationAction(ISD::SELECT_CC, EVT::Other, Expand);
setOperationAction(ISD::BR_JT, EVT::Other, Expand);
setOperationAction(ISD::BR_CC, EVT::Other, Expand);
setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
setOperationAction(ISD::BR_CC, MVT::Other, Expand);
// i16 registers don't do much
setOperationAction(ISD::AND, EVT::i16, Promote);
setOperationAction(ISD::OR, EVT::i16, Promote);
setOperationAction(ISD::XOR, EVT::i16, Promote);
setOperationAction(ISD::CTPOP, EVT::i16, Promote);
setOperationAction(ISD::AND, MVT::i16, Promote);
setOperationAction(ISD::OR, MVT::i16, Promote);
setOperationAction(ISD::XOR, MVT::i16, Promote);
setOperationAction(ISD::CTPOP, MVT::i16, Promote);
// The expansion of CTLZ/CTTZ uses AND/OR, so we might as well promote
// immediately.
setOperationAction(ISD::CTLZ, EVT::i16, Promote);
setOperationAction(ISD::CTTZ, EVT::i16, Promote);
setOperationAction(ISD::SETCC, EVT::i16, Promote);
setOperationAction(ISD::CTLZ, MVT::i16, Promote);
setOperationAction(ISD::CTTZ, MVT::i16, Promote);
setOperationAction(ISD::SETCC, MVT::i16, Promote);
// Blackfin has no division
setOperationAction(ISD::SDIV, EVT::i16, Expand);
setOperationAction(ISD::SDIV, EVT::i32, Expand);
setOperationAction(ISD::SDIVREM, EVT::i16, Expand);
setOperationAction(ISD::SDIVREM, EVT::i32, Expand);
setOperationAction(ISD::SREM, EVT::i16, Expand);
setOperationAction(ISD::SREM, EVT::i32, Expand);
setOperationAction(ISD::UDIV, EVT::i16, Expand);
setOperationAction(ISD::UDIV, EVT::i32, Expand);
setOperationAction(ISD::UDIVREM, EVT::i16, Expand);
setOperationAction(ISD::UDIVREM, EVT::i32, Expand);
setOperationAction(ISD::UREM, EVT::i16, Expand);
setOperationAction(ISD::UREM, EVT::i32, Expand);
setOperationAction(ISD::SDIV, MVT::i16, Expand);
setOperationAction(ISD::SDIV, MVT::i32, Expand);
setOperationAction(ISD::SDIVREM, MVT::i16, Expand);
setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
setOperationAction(ISD::SREM, MVT::i16, Expand);
setOperationAction(ISD::SREM, MVT::i32, Expand);
setOperationAction(ISD::UDIV, MVT::i16, Expand);
setOperationAction(ISD::UDIV, MVT::i32, Expand);
setOperationAction(ISD::UDIVREM, MVT::i16, Expand);
setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
setOperationAction(ISD::UREM, MVT::i16, Expand);
setOperationAction(ISD::UREM, MVT::i32, Expand);
setOperationAction(ISD::SMUL_LOHI, EVT::i32, Expand);
setOperationAction(ISD::UMUL_LOHI, EVT::i32, Expand);
setOperationAction(ISD::MULHU, EVT::i32, Expand);
setOperationAction(ISD::MULHS, EVT::i32, Expand);
setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
setOperationAction(ISD::MULHU, MVT::i32, Expand);
setOperationAction(ISD::MULHS, MVT::i32, Expand);
// No carry-in operations.
setOperationAction(ISD::ADDE, EVT::i32, Custom);
setOperationAction(ISD::SUBE, EVT::i32, Custom);
setOperationAction(ISD::ADDE, MVT::i32, Custom);
setOperationAction(ISD::SUBE, MVT::i32, Custom);
// Blackfin has no intrinsics for these particular operations.
setOperationAction(ISD::MEMBARRIER, EVT::Other, Expand);
setOperationAction(ISD::BSWAP, EVT::i32, Expand);
setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
setOperationAction(ISD::BSWAP, MVT::i32, Expand);
setOperationAction(ISD::SHL_PARTS, EVT::i32, Expand);
setOperationAction(ISD::SRA_PARTS, EVT::i32, Expand);
setOperationAction(ISD::SRL_PARTS, EVT::i32, Expand);
setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i1, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
// i32 has native CTPOP, but not CTLZ/CTTZ
setOperationAction(ISD::CTLZ, EVT::i32, Expand);
setOperationAction(ISD::CTTZ, EVT::i32, Expand);
setOperationAction(ISD::CTLZ, MVT::i32, Expand);
setOperationAction(ISD::CTTZ, MVT::i32, Expand);
// READCYCLECOUNTER needs special type legalization.
setOperationAction(ISD::READCYCLECOUNTER, EVT::i64, Custom);
setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
// We don't have line number support yet.
setOperationAction(ISD::DBG_STOPPOINT, EVT::Other, Expand);
setOperationAction(ISD::DEBUG_LOC, EVT::Other, Expand);
setOperationAction(ISD::DBG_LABEL, EVT::Other, Expand);
setOperationAction(ISD::EH_LABEL, EVT::Other, Expand);
setOperationAction(ISD::DECLARE, EVT::Other, Expand);
setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
setOperationAction(ISD::DBG_LABEL, MVT::Other, Expand);
setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
setOperationAction(ISD::DECLARE, MVT::Other, Expand);
// Use the default implementation.
setOperationAction(ISD::VACOPY, EVT::Other, Expand);
setOperationAction(ISD::VAEND, EVT::Other, Expand);
setOperationAction(ISD::STACKSAVE, EVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, EVT::Other, Expand);
setOperationAction(ISD::VACOPY, MVT::Other, Expand);
setOperationAction(ISD::VAEND, MVT::Other, Expand);
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
}
const char *BlackfinTargetLowering::getTargetNodeName(unsigned Opcode) const {
@ -137,10 +137,10 @@ const char *BlackfinTargetLowering::getTargetNodeName(unsigned Opcode) const {
}
}
EVT::SimpleValueType BlackfinTargetLowering::getSetCCResultType(EVT VT) const {
MVT::SimpleValueType BlackfinTargetLowering::getSetCCResultType(EVT VT) const {
// SETCC always sets the CC register. Technically that is an i1 register, but
// that type is not legal, so we treat it as an i32 register.
return EVT::i32;
return MVT::i32;
}
SDValue BlackfinTargetLowering::LowerGlobalAddress(SDValue Op,
@ -148,16 +148,16 @@ SDValue BlackfinTargetLowering::LowerGlobalAddress(SDValue Op,
DebugLoc DL = Op.getDebugLoc();
GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
Op = DAG.getTargetGlobalAddress(GV, EVT::i32);
return DAG.getNode(BFISD::Wrapper, DL, EVT::i32, Op);
Op = DAG.getTargetGlobalAddress(GV, MVT::i32);
return DAG.getNode(BFISD::Wrapper, DL, MVT::i32, Op);
}
SDValue BlackfinTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) {
DebugLoc DL = Op.getDebugLoc();
int JTI = cast<JumpTableSDNode>(Op)->getIndex();
Op = DAG.getTargetJumpTable(JTI, EVT::i32);
return DAG.getNode(BFISD::Wrapper, DL, EVT::i32, Op);
Op = DAG.getTargetJumpTable(JTI, MVT::i32);
return DAG.getNode(BFISD::Wrapper, DL, MVT::i32, Op);
}
SDValue
@ -209,7 +209,7 @@ BlackfinTargetLowering::LowerFormalArguments(SDValue Chain,
assert(VA.isMemLoc() && "CCValAssign must be RegLoc or MemLoc");
unsigned ObjSize = VA.getLocVT().getStoreSizeInBits()/8;
int FI = MFI->CreateFixedObject(ObjSize, VA.getLocMemOffset());
SDValue FIN = DAG.getFrameIndex(FI, EVT::i32);
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN, NULL, 0));
}
}
@ -268,9 +268,9 @@ BlackfinTargetLowering::LowerReturn(SDValue Chain,
}
if (Flag.getNode()) {
return DAG.getNode(BFISD::RET_FLAG, dl, EVT::Other, Chain, Flag);
return DAG.getNode(BFISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
} else {
return DAG.getNode(BFISD::RET_FLAG, dl, EVT::Other, Chain);
return DAG.getNode(BFISD::RET_FLAG, dl, MVT::Other, Chain);
}
}
@ -325,10 +325,10 @@ BlackfinTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
assert(VA.isMemLoc() && "CCValAssign must be RegLoc or MemLoc");
int Offset = VA.getLocMemOffset();
assert(Offset%4 == 0 && "Unaligned LocMemOffset");
assert(VA.getLocVT()==EVT::i32 && "Illegal CCValAssign type");
SDValue SPN = DAG.getCopyFromReg(Chain, dl, BF::SP, EVT::i32);
assert(VA.getLocVT()==MVT::i32 && "Illegal CCValAssign type");
SDValue SPN = DAG.getCopyFromReg(Chain, dl, BF::SP, MVT::i32);
SDValue OffsetN = DAG.getIntPtrConstant(Offset);
OffsetN = DAG.getNode(ISD::ADD, dl, EVT::i32, SPN, OffsetN);
OffsetN = DAG.getNode(ISD::ADD, dl, MVT::i32, SPN, OffsetN);
MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, OffsetN,
PseudoSourceValue::getStack(),
Offset));
@ -338,7 +338,7 @@ BlackfinTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Transform all store nodes into one single node because
// all store nodes are independent of each other.
if (!MemOpChains.empty())
Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&MemOpChains[0], MemOpChains.size());
// Build a sequence of copy-to-reg nodes chained together with token
@ -356,13 +356,13 @@ BlackfinTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
// Likewise ExternalSymbol -> TargetExternalSymbol.
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), EVT::i32);
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i32);
else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
Callee = DAG.getTargetExternalSymbol(E->getSymbol(), EVT::i32);
Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
std::vector<EVT> NodeTys;
NodeTys.push_back(EVT::Other); // Returns a chain
NodeTys.push_back(EVT::Flag); // Returns a flag for retval copy to use.
NodeTys.push_back(MVT::Other); // Returns a chain
NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
SDValue Ops[] = { Chain, Callee, InFlag };
Chain = DAG.getNode(BFISD::CALL, dl, NodeTys, Ops,
InFlag.getNode() ? 3 : 2);
@ -423,25 +423,25 @@ SDValue BlackfinTargetLowering::LowerADDE(SDValue Op, SelectionDAG &DAG) {
unsigned Opcode = Op.getOpcode()==ISD::ADDE ? BF::ADD : BF::SUB;
// zext incoming carry flag in AC0 to 32 bits
SDNode* CarryIn = DAG.getTargetNode(BF::MOVE_cc_ac0, dl, EVT::i32,
SDNode* CarryIn = DAG.getTargetNode(BF::MOVE_cc_ac0, dl, MVT::i32,
/* flag= */ Op.getOperand(2));
CarryIn = DAG.getTargetNode(BF::MOVECC_zext, dl, EVT::i32,
CarryIn = DAG.getTargetNode(BF::MOVECC_zext, dl, MVT::i32,
SDValue(CarryIn, 0));
// Add operands, produce sum and carry flag
SDNode *Sum = DAG.getTargetNode(Opcode, dl, EVT::i32, EVT::Flag,
SDNode *Sum = DAG.getTargetNode(Opcode, dl, MVT::i32, MVT::Flag,
Op.getOperand(0), Op.getOperand(1));
// Store intermediate carry from Sum
SDNode* Carry1 = DAG.getTargetNode(BF::MOVE_cc_ac0, dl, EVT::i32,
SDNode* Carry1 = DAG.getTargetNode(BF::MOVE_cc_ac0, dl, MVT::i32,
/* flag= */ SDValue(Sum, 1));
// Add incoming carry, again producing an output flag
Sum = DAG.getTargetNode(Opcode, dl, EVT::i32, EVT::Flag,
Sum = DAG.getTargetNode(Opcode, dl, MVT::i32, MVT::Flag,
SDValue(Sum, 0), SDValue(CarryIn, 0));
// Update AC0 with the intermediate carry, producing a flag.
SDNode *CarryOut = DAG.getTargetNode(BF::OR_ac0_cc, dl, EVT::Flag,
SDNode *CarryOut = DAG.getTargetNode(BF::OR_ac0_cc, dl, MVT::Flag,
SDValue(Carry1, 0));
// Compose (i32, flag) pair
@ -480,10 +480,10 @@ BlackfinTargetLowering::ReplaceNodeResults(SDNode *N,
// CYCLES2. Reading CYCLES will latch the value of CYCLES2, so we must read
// CYCLES2 last.
SDValue TheChain = N->getOperand(0);
SDValue lo = DAG.getCopyFromReg(TheChain, dl, BF::CYCLES, EVT::i32);
SDValue hi = DAG.getCopyFromReg(lo.getValue(1), dl, BF::CYCLES2, EVT::i32);
SDValue lo = DAG.getCopyFromReg(TheChain, dl, BF::CYCLES, MVT::i32);
SDValue hi = DAG.getCopyFromReg(lo.getValue(1), dl, BF::CYCLES2, MVT::i32);
// Use a buildpair to merge the two 32-bit values into a 64-bit one.
Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, EVT::i64, lo, hi));
Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, lo, hi));
// Outgoing chain. If we were to use the chain from lo instead, it would be
// possible to entirely eliminate the CYCLES2 read in (i32 (trunc
// readcyclecounter)). Unfortunately this could possibly delay the CYCLES2
@ -559,7 +559,7 @@ getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const {
switch (Constraint[0]) {
// Standard constraints
case 'r':
return Pair(0U, VT == EVT::i16 ? D16RegisterClass : DPRegisterClass);
return Pair(0U, VT == MVT::i16 ? D16RegisterClass : DPRegisterClass);
// Blackfin-specific constraints
case 'a': return Pair(0U, PRegisterClass);

View File

@ -33,7 +33,7 @@ namespace llvm {
int VarArgsFrameOffset; // Frame offset to start of varargs area.
public:
BlackfinTargetLowering(TargetMachine &TM);
virtual EVT::SimpleValueType getSetCCResultType(EVT VT) const;
virtual MVT::SimpleValueType getSetCCResultType(EVT VT) const;
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG);
virtual void ReplaceNodeResults(SDNode *N,
SmallVectorImpl<SDValue> &Results,

View File

@ -42,21 +42,21 @@ def BfinWrapper: SDNode<"BFISD::Wrapper", SDTIntUnaryOp>;
def trailingZeros_xform : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(N->getAPIntValue().countTrailingZeros(),
EVT::i32);
MVT::i32);
}]>;
def trailingOnes_xform : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant(N->getAPIntValue().countTrailingOnes(),
EVT::i32);
MVT::i32);
}]>;
def LO16 : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant((unsigned short)N->getZExtValue(), EVT::i16);
return CurDAG->getTargetConstant((unsigned short)N->getZExtValue(), MVT::i16);
}]>;
def HI16 : SDNodeXForm<imm, [{
// Transformation function: shift the immediate value down into the low bits.
return CurDAG->getTargetConstant((unsigned)N->getZExtValue() >> 16, EVT::i16);
return CurDAG->getTargetConstant((unsigned)N->getZExtValue() >> 16, MVT::i16);
}]>;
//===----------------------------------------------------------------------===//

View File

@ -96,7 +96,7 @@ BlackfinRegisterInfo::getPhysicalRegisterRegClass(unsigned reg, EVT VT) const {
for (regclass_iterator I = regclass_begin(), E = regclass_end();
I != E; ++I) {
const TargetRegisterClass* RC = *I;
if ((VT == EVT::Other || RC->hasType(VT)) && RC->contains(reg) &&
if ((VT == MVT::Other || RC->hasType(VT)) && RC->contains(reg) &&
(!BestRC || RC->getNumRegs() < BestRC->getNumRegs()))
BestRC = RC;
}

View File

@ -105,9 +105,9 @@ namespace {
{
EVT vt = CN->getValueType(0);
Imm = (short) CN->getZExtValue();
if (vt.getSimpleVT() >= EVT::i1 && vt.getSimpleVT() <= EVT::i16) {
if (vt.getSimpleVT() >= MVT::i1 && vt.getSimpleVT() <= MVT::i16) {
return true;
} else if (vt == EVT::i32) {
} else if (vt == MVT::i32) {
int32_t i_val = (int32_t) CN->getZExtValue();
short s_val = (short) i_val;
return i_val == s_val;
@ -133,7 +133,7 @@ namespace {
isFPS16Immediate(ConstantFPSDNode *FPN, short &Imm)
{
EVT vt = FPN->getValueType(0);
if (vt == EVT::f32) {
if (vt == MVT::f32) {
int val = FloatToBits(FPN->getValueAPF().convertToFloat());
int sval = (int) ((val << 16) >> 16);
Imm = (short) val;
@ -164,19 +164,19 @@ namespace {
};
const valtype_map_s valtype_map[] = {
{ EVT::i8, SPU::ORBIr8, true, SPU::LRr8 },
{ EVT::i16, SPU::ORHIr16, true, SPU::LRr16 },
{ EVT::i32, SPU::ORIr32, true, SPU::LRr32 },
{ EVT::i64, SPU::ORr64, false, SPU::LRr64 },
{ EVT::f32, SPU::ORf32, false, SPU::LRf32 },
{ EVT::f64, SPU::ORf64, false, SPU::LRf64 },
{ MVT::i8, SPU::ORBIr8, true, SPU::LRr8 },
{ MVT::i16, SPU::ORHIr16, true, SPU::LRr16 },
{ MVT::i32, SPU::ORIr32, true, SPU::LRr32 },
{ MVT::i64, SPU::ORr64, false, SPU::LRr64 },
{ MVT::f32, SPU::ORf32, false, SPU::LRf32 },
{ MVT::f64, SPU::ORf64, false, SPU::LRf64 },
// vector types... (sigh!)
{ EVT::v16i8, 0, false, SPU::LRv16i8 },
{ EVT::v8i16, 0, false, SPU::LRv8i16 },
{ EVT::v4i32, 0, false, SPU::LRv4i32 },
{ EVT::v2i64, 0, false, SPU::LRv2i64 },
{ EVT::v4f32, 0, false, SPU::LRv4f32 },
{ EVT::v2f64, 0, false, SPU::LRv2f64 }
{ MVT::v16i8, 0, false, SPU::LRv16i8 },
{ MVT::v8i16, 0, false, SPU::LRv8i16 },
{ MVT::v4i32, 0, false, SPU::LRv4i32 },
{ MVT::v2i64, 0, false, SPU::LRv2i64 },
{ MVT::v4f32, 0, false, SPU::LRv4f32 },
{ MVT::v2f64, 0, false, SPU::LRv2f64 }
};
const size_t n_valtype_map = sizeof(valtype_map) / sizeof(valtype_map[0]);
@ -211,12 +211,12 @@ namespace {
// Create the shuffle mask for "rotating" the borrow up one register slot
// once the borrow is generated.
ShufBytes.push_back(DAG.getConstant(0x04050607, EVT::i32));
ShufBytes.push_back(DAG.getConstant(0x80808080, EVT::i32));
ShufBytes.push_back(DAG.getConstant(0x0c0d0e0f, EVT::i32));
ShufBytes.push_back(DAG.getConstant(0x80808080, EVT::i32));
ShufBytes.push_back(DAG.getConstant(0x04050607, MVT::i32));
ShufBytes.push_back(DAG.getConstant(0x80808080, MVT::i32));
ShufBytes.push_back(DAG.getConstant(0x0c0d0e0f, MVT::i32));
ShufBytes.push_back(DAG.getConstant(0x80808080, MVT::i32));
return DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v4i32,
return DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
&ShufBytes[0], ShufBytes.size());
}
@ -226,12 +226,12 @@ namespace {
// Create the shuffle mask for "rotating" the borrow up one register slot
// once the borrow is generated.
ShufBytes.push_back(DAG.getConstant(0x04050607, EVT::i32));
ShufBytes.push_back(DAG.getConstant(0xc0c0c0c0, EVT::i32));
ShufBytes.push_back(DAG.getConstant(0x0c0d0e0f, EVT::i32));
ShufBytes.push_back(DAG.getConstant(0xc0c0c0c0, EVT::i32));
ShufBytes.push_back(DAG.getConstant(0x04050607, MVT::i32));
ShufBytes.push_back(DAG.getConstant(0xc0c0c0c0, MVT::i32));
ShufBytes.push_back(DAG.getConstant(0x0c0d0e0f, MVT::i32));
ShufBytes.push_back(DAG.getConstant(0xc0c0c0c0, MVT::i32));
return DAG.getNode(ISD::BUILD_VECTOR, dl, EVT::v4i32,
return DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
&ShufBytes[0], ShufBytes.size());
}
@ -263,13 +263,13 @@ namespace {
/// getI32Imm - Return a target constant with the specified value, of type
/// i32.
inline SDValue getI32Imm(uint32_t Imm) {
return CurDAG->getTargetConstant(Imm, EVT::i32);
return CurDAG->getTargetConstant(Imm, MVT::i32);
}
/// getI64Imm - Return a target constant with the specified value, of type
/// i64.
inline SDValue getI64Imm(uint64_t Imm) {
return CurDAG->getTargetConstant(Imm, EVT::i64);
return CurDAG->getTargetConstant(Imm, MVT::i64);
}
/// getSmallIPtrImm - Return a target constant of pointer type.
@ -285,17 +285,17 @@ namespace {
// Check to see if this vector can be represented as a CellSPU immediate
// constant by invoking all of the instruction selection predicates:
if (((vecVT == EVT::v8i16) &&
(SPU::get_vec_i16imm(bvNode, *CurDAG, EVT::i16).getNode() != 0)) ||
((vecVT == EVT::v4i32) &&
((SPU::get_vec_i16imm(bvNode, *CurDAG, EVT::i32).getNode() != 0) ||
(SPU::get_ILHUvec_imm(bvNode, *CurDAG, EVT::i32).getNode() != 0) ||
(SPU::get_vec_u18imm(bvNode, *CurDAG, EVT::i32).getNode() != 0) ||
if (((vecVT == MVT::v8i16) &&
(SPU::get_vec_i16imm(bvNode, *CurDAG, MVT::i16).getNode() != 0)) ||
((vecVT == MVT::v4i32) &&
((SPU::get_vec_i16imm(bvNode, *CurDAG, MVT::i32).getNode() != 0) ||
(SPU::get_ILHUvec_imm(bvNode, *CurDAG, MVT::i32).getNode() != 0) ||
(SPU::get_vec_u18imm(bvNode, *CurDAG, MVT::i32).getNode() != 0) ||
(SPU::get_v4i32_imm(bvNode, *CurDAG).getNode() != 0))) ||
((vecVT == EVT::v2i64) &&
((SPU::get_vec_i16imm(bvNode, *CurDAG, EVT::i64).getNode() != 0) ||
(SPU::get_ILHUvec_imm(bvNode, *CurDAG, EVT::i64).getNode() != 0) ||
(SPU::get_vec_u18imm(bvNode, *CurDAG, EVT::i64).getNode() != 0))))
((vecVT == MVT::v2i64) &&
((SPU::get_vec_i16imm(bvNode, *CurDAG, MVT::i64).getNode() != 0) ||
(SPU::get_ILHUvec_imm(bvNode, *CurDAG, MVT::i64).getNode() != 0) ||
(SPU::get_vec_u18imm(bvNode, *CurDAG, MVT::i64).getNode() != 0))))
return Select(build_vec);
// No, need to emit a constant pool spill:
@ -434,7 +434,7 @@ bool
SPUDAGToDAGISel::SelectAFormAddr(SDValue Op, SDValue N, SDValue &Base,
SDValue &Index) {
// These match the addr256k operand type:
EVT OffsVT = EVT::i16;
EVT OffsVT = MVT::i16;
SDValue Zero = CurDAG->getTargetConstant(0, OffsVT);
switch (N.getOpcode()) {
@ -717,45 +717,45 @@ SPUDAGToDAGISel::Select(SDValue Op) {
TFI, Imm0), 0);
n_ops = 2;
}
} else if (Opc == ISD::Constant && OpVT == EVT::i64) {
} else if (Opc == ISD::Constant && OpVT == MVT::i64) {
// Catch the i64 constants that end up here. Note: The backend doesn't
// attempt to legalize the constant (it's useless because DAGCombiner
// will insert 64-bit constants and we can't stop it).
return SelectI64Constant(Op, OpVT, Op.getDebugLoc());
} else if ((Opc == ISD::ZERO_EXTEND || Opc == ISD::ANY_EXTEND)
&& OpVT == EVT::i64) {
&& OpVT == MVT::i64) {
SDValue Op0 = Op.getOperand(0);
EVT Op0VT = Op0.getValueType();
EVT Op0VecVT = EVT::getVectorVT(Op0VT, (128 / Op0VT.getSizeInBits()));
EVT OpVecVT = EVT::getVectorVT(OpVT, (128 / OpVT.getSizeInBits()));
SDValue shufMask;
switch (Op0VT.getSimpleVT()) {
switch (Op0VT.getSimpleVT().SimpleTy) {
default:
llvm_report_error("CellSPU Select: Unhandled zero/any extend EVT");
/*NOTREACHED*/
case EVT::i32:
shufMask = CurDAG->getNode(ISD::BUILD_VECTOR, dl, EVT::v4i32,
CurDAG->getConstant(0x80808080, EVT::i32),
CurDAG->getConstant(0x00010203, EVT::i32),
CurDAG->getConstant(0x80808080, EVT::i32),
CurDAG->getConstant(0x08090a0b, EVT::i32));
case MVT::i32:
shufMask = CurDAG->getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
CurDAG->getConstant(0x80808080, MVT::i32),
CurDAG->getConstant(0x00010203, MVT::i32),
CurDAG->getConstant(0x80808080, MVT::i32),
CurDAG->getConstant(0x08090a0b, MVT::i32));
break;
case EVT::i16:
shufMask = CurDAG->getNode(ISD::BUILD_VECTOR, dl, EVT::v4i32,
CurDAG->getConstant(0x80808080, EVT::i32),
CurDAG->getConstant(0x80800203, EVT::i32),
CurDAG->getConstant(0x80808080, EVT::i32),
CurDAG->getConstant(0x80800a0b, EVT::i32));
case MVT::i16:
shufMask = CurDAG->getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
CurDAG->getConstant(0x80808080, MVT::i32),
CurDAG->getConstant(0x80800203, MVT::i32),
CurDAG->getConstant(0x80808080, MVT::i32),
CurDAG->getConstant(0x80800a0b, MVT::i32));
break;
case EVT::i8:
shufMask = CurDAG->getNode(ISD::BUILD_VECTOR, dl, EVT::v4i32,
CurDAG->getConstant(0x80808080, EVT::i32),
CurDAG->getConstant(0x80808003, EVT::i32),
CurDAG->getConstant(0x80808080, EVT::i32),
CurDAG->getConstant(0x8080800b, EVT::i32));
case MVT::i8:
shufMask = CurDAG->getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
CurDAG->getConstant(0x80808080, MVT::i32),
CurDAG->getConstant(0x80808003, MVT::i32),
CurDAG->getConstant(0x80808080, MVT::i32),
CurDAG->getConstant(0x8080800b, MVT::i32));
break;
}
@ -775,21 +775,21 @@ SPUDAGToDAGISel::Select(SDValue Op) {
SelectCode(CurDAG->getNode(ISD::BIT_CONVERT, dl, OpVecVT, zextShuffle));
return SelectCode(CurDAG->getNode(SPUISD::VEC2PREFSLOT, dl, OpVT,
zextShuffle));
} else if (Opc == ISD::ADD && (OpVT == EVT::i64 || OpVT == EVT::v2i64)) {
} else if (Opc == ISD::ADD && (OpVT == MVT::i64 || OpVT == MVT::v2i64)) {
SDNode *CGLoad =
emitBuildVector(getCarryGenerateShufMask(*CurDAG, dl));
return SelectCode(CurDAG->getNode(SPUISD::ADD64_MARKER, dl, OpVT,
Op.getOperand(0), Op.getOperand(1),
SDValue(CGLoad, 0)));
} else if (Opc == ISD::SUB && (OpVT == EVT::i64 || OpVT == EVT::v2i64)) {
} else if (Opc == ISD::SUB && (OpVT == MVT::i64 || OpVT == MVT::v2i64)) {
SDNode *CGLoad =
emitBuildVector(getBorrowGenerateShufMask(*CurDAG, dl));
return SelectCode(CurDAG->getNode(SPUISD::SUB64_MARKER, dl, OpVT,
Op.getOperand(0), Op.getOperand(1),
SDValue(CGLoad, 0)));
} else if (Opc == ISD::MUL && (OpVT == EVT::i64 || OpVT == EVT::v2i64)) {
} else if (Opc == ISD::MUL && (OpVT == MVT::i64 || OpVT == MVT::v2i64)) {
SDNode *CGLoad =
emitBuildVector(getCarryGenerateShufMask(*CurDAG, dl));
@ -799,8 +799,8 @@ SPUDAGToDAGISel::Select(SDValue Op) {
} else if (Opc == ISD::TRUNCATE) {
SDValue Op0 = Op.getOperand(0);
if ((Op0.getOpcode() == ISD::SRA || Op0.getOpcode() == ISD::SRL)
&& OpVT == EVT::i32
&& Op0.getValueType() == EVT::i64) {
&& OpVT == MVT::i32
&& Op0.getValueType() == MVT::i64) {
// Catch (truncate:i32 ([sra|srl]:i64 arg, c), where c >= 32
//
// Take advantage of the fact that the upper 32 bits are in the
@ -817,7 +817,7 @@ SPUDAGToDAGISel::Select(SDValue Op) {
shift_amt -= 32;
if (shift_amt > 0) {
// Take care of the additional shift, if present:
SDValue shift = CurDAG->getTargetConstant(shift_amt, EVT::i32);
SDValue shift = CurDAG->getTargetConstant(shift_amt, MVT::i32);
unsigned Opc = SPU::ROTMAIr32_i32;
if (Op0.getOpcode() == ISD::SRL)
@ -832,19 +832,19 @@ SPUDAGToDAGISel::Select(SDValue Op) {
}
}
} else if (Opc == ISD::SHL) {
if (OpVT == EVT::i64) {
if (OpVT == MVT::i64) {
return SelectSHLi64(Op, OpVT);
}
} else if (Opc == ISD::SRL) {
if (OpVT == EVT::i64) {
if (OpVT == MVT::i64) {
return SelectSRLi64(Op, OpVT);
}
} else if (Opc == ISD::SRA) {
if (OpVT == EVT::i64) {
if (OpVT == MVT::i64) {
return SelectSRAi64(Op, OpVT);
}
} else if (Opc == ISD::FNEG
&& (OpVT == EVT::f64 || OpVT == EVT::v2f64)) {
&& (OpVT == MVT::f64 || OpVT == MVT::v2f64)) {
DebugLoc dl = Op.getDebugLoc();
// Check if the pattern is a special form of DFNMS:
// (fneg (fsub (fmul R64FP:$rA, R64FP:$rB), R64FP:$rC))
@ -853,7 +853,7 @@ SPUDAGToDAGISel::Select(SDValue Op) {
SDValue Op00 = Op0.getOperand(0);
if (Op00.getOpcode() == ISD::FMUL) {
unsigned Opc = SPU::DFNMSf64;
if (OpVT == EVT::v2f64)
if (OpVT == MVT::v2f64)
Opc = SPU::DFNMSv2f64;
return CurDAG->getTargetNode(Opc, dl, OpVT,
@ -863,29 +863,29 @@ SPUDAGToDAGISel::Select(SDValue Op) {
}
}
SDValue negConst = CurDAG->getConstant(0x8000000000000000ULL, EVT::i64);
SDValue negConst = CurDAG->getConstant(0x8000000000000000ULL, MVT::i64);
SDNode *signMask = 0;
unsigned Opc = SPU::XORfneg64;
if (OpVT == EVT::f64) {
signMask = SelectI64Constant(negConst, EVT::i64, dl);
} else if (OpVT == EVT::v2f64) {
if (OpVT == MVT::f64) {
signMask = SelectI64Constant(negConst, MVT::i64, dl);
} else if (OpVT == MVT::v2f64) {
Opc = SPU::XORfnegvec;
signMask = emitBuildVector(CurDAG->getNode(ISD::BUILD_VECTOR, dl,
EVT::v2i64,
MVT::v2i64,
negConst, negConst));
}
return CurDAG->getTargetNode(Opc, dl, OpVT,
Op.getOperand(0), SDValue(signMask, 0));
} else if (Opc == ISD::FABS) {
if (OpVT == EVT::f64) {
SDNode *signMask = SelectI64Constant(0x7fffffffffffffffULL, EVT::i64, dl);
if (OpVT == MVT::f64) {
SDNode *signMask = SelectI64Constant(0x7fffffffffffffffULL, MVT::i64, dl);
return CurDAG->getTargetNode(SPU::ANDfabs64, dl, OpVT,
Op.getOperand(0), SDValue(signMask, 0));
} else if (OpVT == EVT::v2f64) {
SDValue absConst = CurDAG->getConstant(0x7fffffffffffffffULL, EVT::i64);
SDValue absVec = CurDAG->getNode(ISD::BUILD_VECTOR, dl, EVT::v2i64,
} else if (OpVT == MVT::v2f64) {
SDValue absConst = CurDAG->getConstant(0x7fffffffffffffffULL, MVT::i64);
SDValue absVec = CurDAG->getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64,
absConst, absConst);
SDNode *signMask = emitBuildVector(absVec);
return CurDAG->getTargetNode(SPU::ANDfabsvec, dl, OpVT,
@ -911,9 +911,9 @@ SPUDAGToDAGISel::Select(SDValue Op) {
if (vtm->ldresult_imm) {
SDValue Zero = CurDAG->getTargetConstant(0, VT);
Result = CurDAG->getTargetNode(Opc, dl, VT, EVT::Other, Arg, Zero, Chain);
Result = CurDAG->getTargetNode(Opc, dl, VT, MVT::Other, Arg, Zero, Chain);
} else {
Result = CurDAG->getTargetNode(Opc, dl, VT, EVT::Other, Arg, Arg, Chain);
Result = CurDAG->getTargetNode(Opc, dl, VT, MVT::Other, Arg, Arg, Chain);
}
return Result;
@ -977,7 +977,7 @@ SPUDAGToDAGISel::SelectSHLi64(SDValue &Op, EVT OpVT) {
DebugLoc dl = Op.getDebugLoc();
VecOp0 = CurDAG->getTargetNode(SPU::ORv2i64_i64, dl, VecVT, Op0);
SelMaskVal = CurDAG->getTargetConstant(0xff00ULL, EVT::i16);
SelMaskVal = CurDAG->getTargetConstant(0xff00ULL, MVT::i16);
SelMask = CurDAG->getTargetNode(SPU::FSMBIv2i64, dl, VecVT, SelMaskVal);
ZeroFill = CurDAG->getTargetNode(SPU::ILv2i64, dl, VecVT,
CurDAG->getTargetConstant(0, OpVT));
@ -1110,16 +1110,16 @@ SPUDAGToDAGISel::SelectSRAi64(SDValue &Op, EVT OpVT) {
SDValue SignRotAmt = CurDAG->getTargetConstant(31, ShiftAmtVT);
SDNode *SignRot =
CurDAG->getTargetNode(SPU::ROTMAIv2i64_i32, dl, EVT::v2i64,
CurDAG->getTargetNode(SPU::ROTMAIv2i64_i32, dl, MVT::v2i64,
SDValue(VecOp0, 0), SignRotAmt);
SDNode *UpperHalfSign =
CurDAG->getTargetNode(SPU::ORi32_v4i32, dl, EVT::i32, SDValue(SignRot, 0));
CurDAG->getTargetNode(SPU::ORi32_v4i32, dl, MVT::i32, SDValue(SignRot, 0));
SDNode *UpperHalfSignMask =
CurDAG->getTargetNode(SPU::FSM64r32, dl, VecVT, SDValue(UpperHalfSign, 0));
SDNode *UpperLowerMask =
CurDAG->getTargetNode(SPU::FSMBIv2i64, dl, VecVT,
CurDAG->getTargetConstant(0xff00ULL, EVT::i16));
CurDAG->getTargetConstant(0xff00ULL, MVT::i16));
SDNode *UpperLowerSelect =
CurDAG->getTargetNode(SPU::SELBv2i64, dl, VecVT,
SDValue(UpperHalfSignMask, 0),

File diff suppressed because it is too large Load Diff

View File

@ -109,7 +109,7 @@ namespace llvm {
virtual const char *getTargetNodeName(unsigned Opcode) const;
/// getSetCCResultType - Return the ValueType for ISD::SETCC
virtual EVT::SimpleValueType getSetCCResultType(EVT VT) const;
virtual MVT::SimpleValueType getSetCCResultType(EVT VT) const;
//! Custom lowering hooks
virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG);

View File

@ -144,7 +144,7 @@ def imm18 : PatLeaf<(imm), [{
def lo16 : PatLeaf<(imm), [{
// lo16 predicate - returns true if the immediate has all zeros in the
// low order bits and is a 32-bit constant:
if (N->getValueType(0) == EVT::i32) {
if (N->getValueType(0) == MVT::i32) {
uint32_t val = N->getZExtValue();
return ((val & 0x0000ffff) == val);
}
@ -155,10 +155,10 @@ def lo16 : PatLeaf<(imm), [{
def hi16 : PatLeaf<(imm), [{
// hi16 predicate - returns true if the immediate has all zeros in the
// low order bits and is a 32-bit constant:
if (N->getValueType(0) == EVT::i32) {
if (N->getValueType(0) == MVT::i32) {
uint32_t val = uint32_t(N->getZExtValue());
return ((val & 0xffff0000) == val);
} else if (N->getValueType(0) == EVT::i64) {
} else if (N->getValueType(0) == MVT::i64) {
uint64_t val = N->getZExtValue();
return ((val & 0xffff0000ULL) == val);
}
@ -208,7 +208,7 @@ def fpimmSExt16 : PatLeaf<(fpimm), [{
// Does the SFP constant only have upp 16 bits set?
def hi16_f32 : PatLeaf<(fpimm), [{
if (N->getValueType(0) == EVT::f32) {
if (N->getValueType(0) == MVT::f32) {
uint32_t val = FloatToBits(N->getValueAPF().convertToFloat());
return ((val & 0xffff0000) == val);
}
@ -218,7 +218,7 @@ def hi16_f32 : PatLeaf<(fpimm), [{
// Does the SFP constant fit into 18 bits?
def fpimm18 : PatLeaf<(fpimm), [{
if (N->getValueType(0) == EVT::f32) {
if (N->getValueType(0) == MVT::f32) {
uint32_t Value = FloatToBits(N->getValueAPF().convertToFloat());
return ((Value & ((1 << 19) - 1)) == Value);
}
@ -238,7 +238,7 @@ def fpimm18 : PatLeaf<(fpimm), [{
// immediate constant load for v16i8 vectors. N.B.: The incoming constant has
// to be a 16-bit quantity with the upper and lower bytes equal (e.g., 0x2a2a).
def v16i8SExt8Imm_xform: SDNodeXForm<build_vector, [{
return SPU::get_vec_i8imm(N, *CurDAG, EVT::i8);
return SPU::get_vec_i8imm(N, *CurDAG, MVT::i8);
}]>;
// v16i8SExt8Imm: Predicate test for 8-bit sign extended immediate constant
@ -246,14 +246,14 @@ def v16i8SExt8Imm_xform: SDNodeXForm<build_vector, [{
// incoming constant being a 16-bit quantity, where the upper and lower bytes
// are EXACTLY the same (e.g., 0x2a2a)
def v16i8SExt8Imm: PatLeaf<(build_vector), [{
return SPU::get_vec_i8imm(N, *CurDAG, EVT::i8).getNode() != 0;
return SPU::get_vec_i8imm(N, *CurDAG, MVT::i8).getNode() != 0;
}], v16i8SExt8Imm_xform>;
// v16i8U8Imm_xform function: convert build_vector to unsigned 8-bit
// immediate constant load for v16i8 vectors. N.B.: The incoming constant has
// to be a 16-bit quantity with the upper and lower bytes equal (e.g., 0x2a2a).
def v16i8U8Imm_xform: SDNodeXForm<build_vector, [{
return SPU::get_vec_i8imm(N, *CurDAG, EVT::i8);
return SPU::get_vec_i8imm(N, *CurDAG, MVT::i8);
}]>;
// v16i8U8Imm: Predicate test for unsigned 8-bit immediate constant
@ -261,114 +261,114 @@ def v16i8U8Imm_xform: SDNodeXForm<build_vector, [{
// incoming constant being a 16-bit quantity, where the upper and lower bytes
// are EXACTLY the same (e.g., 0x2a2a)
def v16i8U8Imm: PatLeaf<(build_vector), [{
return SPU::get_vec_i8imm(N, *CurDAG, EVT::i8).getNode() != 0;
return SPU::get_vec_i8imm(N, *CurDAG, MVT::i8).getNode() != 0;
}], v16i8U8Imm_xform>;
// v8i16SExt8Imm_xform function: convert build_vector to 8-bit sign extended
// immediate constant load for v8i16 vectors.
def v8i16SExt8Imm_xform: SDNodeXForm<build_vector, [{
return SPU::get_vec_i8imm(N, *CurDAG, EVT::i16);
return SPU::get_vec_i8imm(N, *CurDAG, MVT::i16);
}]>;
// v8i16SExt8Imm: Predicate test for 8-bit sign extended immediate constant
// load, works in conjunction with its transform function.
def v8i16SExt8Imm: PatLeaf<(build_vector), [{
return SPU::get_vec_i8imm(N, *CurDAG, EVT::i16).getNode() != 0;
return SPU::get_vec_i8imm(N, *CurDAG, MVT::i16).getNode() != 0;
}], v8i16SExt8Imm_xform>;
// v8i16SExt10Imm_xform function: convert build_vector to 16-bit sign extended
// immediate constant load for v8i16 vectors.
def v8i16SExt10Imm_xform: SDNodeXForm<build_vector, [{
return SPU::get_vec_i10imm(N, *CurDAG, EVT::i16);
return SPU::get_vec_i10imm(N, *CurDAG, MVT::i16);
}]>;
// v8i16SExt10Imm: Predicate test for 16-bit sign extended immediate constant
// load, works in conjunction with its transform function.
def v8i16SExt10Imm: PatLeaf<(build_vector), [{
return SPU::get_vec_i10imm(N, *CurDAG, EVT::i16).getNode() != 0;
return SPU::get_vec_i10imm(N, *CurDAG, MVT::i16).getNode() != 0;
}], v8i16SExt10Imm_xform>;
// v8i16Uns10Imm_xform function: convert build_vector to 16-bit unsigned
// immediate constant load for v8i16 vectors.
def v8i16Uns10Imm_xform: SDNodeXForm<build_vector, [{
return SPU::get_vec_i10imm(N, *CurDAG, EVT::i16);
return SPU::get_vec_i10imm(N, *CurDAG, MVT::i16);
}]>;
// v8i16Uns10Imm: Predicate test for 16-bit unsigned immediate constant
// load, works in conjunction with its transform function.
def v8i16Uns10Imm: PatLeaf<(build_vector), [{
return SPU::get_vec_i10imm(N, *CurDAG, EVT::i16).getNode() != 0;
return SPU::get_vec_i10imm(N, *CurDAG, MVT::i16).getNode() != 0;
}], v8i16Uns10Imm_xform>;
// v8i16SExt16Imm_xform function: convert build_vector to 16-bit sign extended
// immediate constant load for v8i16 vectors.
def v8i16Uns16Imm_xform: SDNodeXForm<build_vector, [{
return SPU::get_vec_i16imm(N, *CurDAG, EVT::i16);
return SPU::get_vec_i16imm(N, *CurDAG, MVT::i16);
}]>;
// v8i16SExt16Imm: Predicate test for 16-bit sign extended immediate constant
// load, works in conjunction with its transform function.
def v8i16SExt16Imm: PatLeaf<(build_vector), [{
return SPU::get_vec_i16imm(N, *CurDAG, EVT::i16).getNode() != 0;
return SPU::get_vec_i16imm(N, *CurDAG, MVT::i16).getNode() != 0;
}], v8i16Uns16Imm_xform>;
// v4i32SExt10Imm_xform function: convert build_vector to 10-bit sign extended
// immediate constant load for v4i32 vectors.
def v4i32SExt10Imm_xform: SDNodeXForm<build_vector, [{
return SPU::get_vec_i10imm(N, *CurDAG, EVT::i32);
return SPU::get_vec_i10imm(N, *CurDAG, MVT::i32);
}]>;
// v4i32SExt10Imm: Predicate test for 10-bit sign extended immediate constant
// load, works in conjunction with its transform function.
def v4i32SExt10Imm: PatLeaf<(build_vector), [{
return SPU::get_vec_i10imm(N, *CurDAG, EVT::i32).getNode() != 0;
return SPU::get_vec_i10imm(N, *CurDAG, MVT::i32).getNode() != 0;
}], v4i32SExt10Imm_xform>;
// v4i32Uns10Imm_xform function: convert build_vector to 10-bit unsigned
// immediate constant load for v4i32 vectors.
def v4i32Uns10Imm_xform: SDNodeXForm<build_vector, [{
return SPU::get_vec_i10imm(N, *CurDAG, EVT::i32);
return SPU::get_vec_i10imm(N, *CurDAG, MVT::i32);
}]>;
// v4i32Uns10Imm: Predicate test for 10-bit unsigned immediate constant
// load, works in conjunction with its transform function.
def v4i32Uns10Imm: PatLeaf<(build_vector), [{
return SPU::get_vec_i10imm(N, *CurDAG, EVT::i32).getNode() != 0;
return SPU::get_vec_i10imm(N, *CurDAG, MVT::i32).getNode() != 0;
}], v4i32Uns10Imm_xform>;
// v4i32SExt16Imm_xform function: convert build_vector to 16-bit sign extended
// immediate constant load for v4i32 vectors.
def v4i32SExt16Imm_xform: SDNodeXForm<build_vector, [{
return SPU::get_vec_i16imm(N, *CurDAG, EVT::i32);
return SPU::get_vec_i16imm(N, *CurDAG, MVT::i32);
}]>;
// v4i32SExt16Imm: Predicate test for 16-bit sign extended immediate constant
// load, works in conjunction with its transform function.
def v4i32SExt16Imm: PatLeaf<(build_vector), [{
return SPU::get_vec_i16imm(N, *CurDAG, EVT::i32).getNode() != 0;
return SPU::get_vec_i16imm(N, *CurDAG, MVT::i32).getNode() != 0;
}], v4i32SExt16Imm_xform>;
// v4i32Uns18Imm_xform function: convert build_vector to 18-bit unsigned
// immediate constant load for v4i32 vectors.
def v4i32Uns18Imm_xform: SDNodeXForm<build_vector, [{
return SPU::get_vec_u18imm(N, *CurDAG, EVT::i32);
return SPU::get_vec_u18imm(N, *CurDAG, MVT::i32);
}]>;
// v4i32Uns18Imm: Predicate test for 18-bit unsigned immediate constant load,
// works in conjunction with its transform function.
def v4i32Uns18Imm: PatLeaf<(build_vector), [{
return SPU::get_vec_u18imm(N, *CurDAG, EVT::i32).getNode() != 0;
return SPU::get_vec_u18imm(N, *CurDAG, MVT::i32).getNode() != 0;
}], v4i32Uns18Imm_xform>;
// ILHUvec_get_imm xform function: convert build_vector to ILHUvec imm constant
// load.
def ILHUvec_get_imm: SDNodeXForm<build_vector, [{
return SPU::get_ILHUvec_imm(N, *CurDAG, EVT::i32);
return SPU::get_ILHUvec_imm(N, *CurDAG, MVT::i32);
}]>;
/// immILHUvec: Predicate test for a ILHU constant vector.
def immILHUvec: PatLeaf<(build_vector), [{
return SPU::get_ILHUvec_imm(N, *CurDAG, EVT::i32).getNode() != 0;
return SPU::get_ILHUvec_imm(N, *CurDAG, MVT::i32).getNode() != 0;
}], ILHUvec_get_imm>;
// Catch-all for any other i32 vector constants
@ -383,42 +383,42 @@ def v4i32Imm: PatLeaf<(build_vector), [{
// v2i64SExt10Imm_xform function: convert build_vector to 10-bit sign extended
// immediate constant load for v2i64 vectors.
def v2i64SExt10Imm_xform: SDNodeXForm<build_vector, [{
return SPU::get_vec_i10imm(N, *CurDAG, EVT::i64);
return SPU::get_vec_i10imm(N, *CurDAG, MVT::i64);
}]>;
// v2i64SExt10Imm: Predicate test for 10-bit sign extended immediate constant
// load, works in conjunction with its transform function.
def v2i64SExt10Imm: PatLeaf<(build_vector), [{
return SPU::get_vec_i10imm(N, *CurDAG, EVT::i64).getNode() != 0;
return SPU::get_vec_i10imm(N, *CurDAG, MVT::i64).getNode() != 0;
}], v2i64SExt10Imm_xform>;
// v2i64SExt16Imm_xform function: convert build_vector to 16-bit sign extended
// immediate constant load for v2i64 vectors.
def v2i64SExt16Imm_xform: SDNodeXForm<build_vector, [{
return SPU::get_vec_i16imm(N, *CurDAG, EVT::i64);
return SPU::get_vec_i16imm(N, *CurDAG, MVT::i64);
}]>;
// v2i64SExt16Imm: Predicate test for 16-bit sign extended immediate constant
// load, works in conjunction with its transform function.
def v2i64SExt16Imm: PatLeaf<(build_vector), [{
return SPU::get_vec_i16imm(N, *CurDAG, EVT::i64).getNode() != 0;
return SPU::get_vec_i16imm(N, *CurDAG, MVT::i64).getNode() != 0;
}], v2i64SExt16Imm_xform>;
// v2i64Uns18Imm_xform function: convert build_vector to 18-bit unsigned
// immediate constant load for v2i64 vectors.
def v2i64Uns18Imm_xform: SDNodeXForm<build_vector, [{
return SPU::get_vec_u18imm(N, *CurDAG, EVT::i64);
return SPU::get_vec_u18imm(N, *CurDAG, MVT::i64);
}]>;
// v2i64Uns18Imm: Predicate test for 18-bit unsigned immediate constant load,
// works in conjunction with its transform function.
def v2i64Uns18Imm: PatLeaf<(build_vector), [{
return SPU::get_vec_u18imm(N, *CurDAG, EVT::i64).getNode() != 0;
return SPU::get_vec_u18imm(N, *CurDAG, MVT::i64).getNode() != 0;
}], v2i64Uns18Imm_xform>;
/// immILHUvec: Predicate test for a ILHU constant vector.
def immILHUvec_i64: PatLeaf<(build_vector), [{
return SPU::get_ILHUvec_imm(N, *CurDAG, EVT::i64).getNode() != 0;
return SPU::get_ILHUvec_imm(N, *CurDAG, MVT::i64).getNode() != 0;
}], ILHUvec_get_imm>;
// Catch-all for any other i32 vector constants

View File

@ -78,8 +78,8 @@ bool MSP430DAGToDAGISel::SelectAddr(SDValue Op, SDValue Addr,
SDValue &Base, SDValue &Disp) {
// Try to match frame address first.
if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), EVT::i16);
Disp = CurDAG->getTargetConstant(0, EVT::i16);
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i16);
Disp = CurDAG->getTargetConstant(0, MVT::i16);
return true;
}
@ -92,11 +92,11 @@ bool MSP430DAGToDAGISel::SelectAddr(SDValue Op, SDValue Addr,
if (((CVal << 48) >> 48) == CVal) {
SDValue N0 = Addr.getOperand(0);
if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(N0))
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), EVT::i16);
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i16);
else
Base = N0;
Disp = CurDAG->getTargetConstant(CVal, EVT::i16);
Disp = CurDAG->getTargetConstant(CVal, MVT::i16);
return true;
}
}
@ -105,18 +105,18 @@ bool MSP430DAGToDAGISel::SelectAddr(SDValue Op, SDValue Addr,
SDValue N0 = Addr.getOperand(0);
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
Base = CurDAG->getTargetGlobalAddress(G->getGlobal(),
EVT::i16, G->getOffset());
Disp = CurDAG->getTargetConstant(0, EVT::i16);
MVT::i16, G->getOffset());
Disp = CurDAG->getTargetConstant(0, MVT::i16);
return true;
} else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(N0)) {
Base = CurDAG->getTargetExternalSymbol(E->getSymbol(), EVT::i16);
Disp = CurDAG->getTargetConstant(0, EVT::i16);
Base = CurDAG->getTargetExternalSymbol(E->getSymbol(), MVT::i16);
Disp = CurDAG->getTargetConstant(0, MVT::i16);
}
break;
};
Base = Addr;
Disp = CurDAG->getTargetConstant(0, EVT::i16);
Disp = CurDAG->getTargetConstant(0, MVT::i16);
return true;
}
@ -168,14 +168,14 @@ SDNode *MSP430DAGToDAGISel::Select(SDValue Op) {
switch (Node->getOpcode()) {
default: break;
case ISD::FrameIndex: {
assert(Op.getValueType() == EVT::i16);
assert(Op.getValueType() == MVT::i16);
int FI = cast<FrameIndexSDNode>(Node)->getIndex();
SDValue TFI = CurDAG->getTargetFrameIndex(FI, EVT::i16);
SDValue TFI = CurDAG->getTargetFrameIndex(FI, MVT::i16);
if (Node->hasOneUse())
return CurDAG->SelectNodeTo(Node, MSP430::ADD16ri, EVT::i16,
TFI, CurDAG->getTargetConstant(0, EVT::i16));
return CurDAG->getTargetNode(MSP430::ADD16ri, dl, EVT::i16,
TFI, CurDAG->getTargetConstant(0, EVT::i16));
return CurDAG->SelectNodeTo(Node, MSP430::ADD16ri, MVT::i16,
TFI, CurDAG->getTargetConstant(0, MVT::i16));
return CurDAG->getTargetNode(MSP430::ADD16ri, dl, MVT::i16,
TFI, CurDAG->getTargetConstant(0, MVT::i16));
}
}

View File

@ -42,8 +42,8 @@ MSP430TargetLowering::MSP430TargetLowering(MSP430TargetMachine &tm) :
Subtarget(*tm.getSubtargetImpl()), TM(tm) {
// Set up the register classes.
addRegisterClass(EVT::i8, MSP430::GR8RegisterClass);
addRegisterClass(EVT::i16, MSP430::GR16RegisterClass);
addRegisterClass(MVT::i8, MSP430::GR8RegisterClass);
addRegisterClass(MVT::i16, MSP430::GR16RegisterClass);
// Compute derived properties from the register classes
computeRegisterProperties();
@ -55,75 +55,75 @@ MSP430TargetLowering::MSP430TargetLowering(MSP430TargetMachine &tm) :
// Even if we have only 1 bit shift here, we can perform
// shifts of the whole bitwidth 1 bit per step.
setShiftAmountType(EVT::i8);
setShiftAmountType(MVT::i8);
setStackPointerRegisterToSaveRestore(MSP430::SPW);
setBooleanContents(ZeroOrOneBooleanContent);
setSchedulingPreference(SchedulingForLatency);
setLoadExtAction(ISD::EXTLOAD, EVT::i1, Promote);
setLoadExtAction(ISD::SEXTLOAD, EVT::i1, Promote);
setLoadExtAction(ISD::ZEXTLOAD, EVT::i1, Promote);
setLoadExtAction(ISD::SEXTLOAD, EVT::i8, Expand);
setLoadExtAction(ISD::SEXTLOAD, EVT::i16, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
setLoadExtAction(ISD::SEXTLOAD, MVT::i16, Expand);
// We don't have any truncstores
setTruncStoreAction(EVT::i16, EVT::i8, Expand);
setTruncStoreAction(MVT::i16, MVT::i8, Expand);
setOperationAction(ISD::SRA, EVT::i8, Custom);
setOperationAction(ISD::SHL, EVT::i8, Custom);
setOperationAction(ISD::SRL, EVT::i8, Custom);
setOperationAction(ISD::SRA, EVT::i16, Custom);
setOperationAction(ISD::SHL, EVT::i16, Custom);
setOperationAction(ISD::SRL, EVT::i16, Custom);
setOperationAction(ISD::ROTL, EVT::i8, Expand);
setOperationAction(ISD::ROTR, EVT::i8, Expand);
setOperationAction(ISD::ROTL, EVT::i16, Expand);
setOperationAction(ISD::ROTR, EVT::i16, Expand);
setOperationAction(ISD::GlobalAddress, EVT::i16, Custom);
setOperationAction(ISD::ExternalSymbol, EVT::i16, Custom);
setOperationAction(ISD::BR_JT, EVT::Other, Expand);
setOperationAction(ISD::BRIND, EVT::Other, Expand);
setOperationAction(ISD::BR_CC, EVT::i8, Custom);
setOperationAction(ISD::BR_CC, EVT::i16, Custom);
setOperationAction(ISD::BRCOND, EVT::Other, Expand);
setOperationAction(ISD::SETCC, EVT::i8, Expand);
setOperationAction(ISD::SETCC, EVT::i16, Expand);
setOperationAction(ISD::SELECT, EVT::i8, Expand);
setOperationAction(ISD::SELECT, EVT::i16, Expand);
setOperationAction(ISD::SELECT_CC, EVT::i8, Custom);
setOperationAction(ISD::SELECT_CC, EVT::i16, Custom);
setOperationAction(ISD::SIGN_EXTEND, EVT::i16, Custom);
setOperationAction(ISD::SRA, MVT::i8, Custom);
setOperationAction(ISD::SHL, MVT::i8, Custom);
setOperationAction(ISD::SRL, MVT::i8, Custom);
setOperationAction(ISD::SRA, MVT::i16, Custom);
setOperationAction(ISD::SHL, MVT::i16, Custom);
setOperationAction(ISD::SRL, MVT::i16, Custom);
setOperationAction(ISD::ROTL, MVT::i8, Expand);
setOperationAction(ISD::ROTR, MVT::i8, Expand);
setOperationAction(ISD::ROTL, MVT::i16, Expand);
setOperationAction(ISD::ROTR, MVT::i16, Expand);
setOperationAction(ISD::GlobalAddress, MVT::i16, Custom);
setOperationAction(ISD::ExternalSymbol, MVT::i16, Custom);
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
setOperationAction(ISD::BRIND, MVT::Other, Expand);
setOperationAction(ISD::BR_CC, MVT::i8, Custom);
setOperationAction(ISD::BR_CC, MVT::i16, Custom);
setOperationAction(ISD::BRCOND, MVT::Other, Expand);
setOperationAction(ISD::SETCC, MVT::i8, Expand);
setOperationAction(ISD::SETCC, MVT::i16, Expand);
setOperationAction(ISD::SELECT, MVT::i8, Expand);
setOperationAction(ISD::SELECT, MVT::i16, Expand);
setOperationAction(ISD::SELECT_CC, MVT::i8, Custom);
setOperationAction(ISD::SELECT_CC, MVT::i16, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::i16, Custom);
setOperationAction(ISD::CTTZ, EVT::i8, Expand);
setOperationAction(ISD::CTTZ, EVT::i16, Expand);
setOperationAction(ISD::CTLZ, EVT::i8, Expand);
setOperationAction(ISD::CTLZ, EVT::i16, Expand);
setOperationAction(ISD::CTPOP, EVT::i8, Expand);
setOperationAction(ISD::CTPOP, EVT::i16, Expand);
setOperationAction(ISD::CTTZ, MVT::i8, Expand);
setOperationAction(ISD::CTTZ, MVT::i16, Expand);
setOperationAction(ISD::CTLZ, MVT::i8, Expand);
setOperationAction(ISD::CTLZ, MVT::i16, Expand);
setOperationAction(ISD::CTPOP, MVT::i8, Expand);
setOperationAction(ISD::CTPOP, MVT::i16, Expand);
setOperationAction(ISD::SHL_PARTS, EVT::i8, Expand);
setOperationAction(ISD::SHL_PARTS, EVT::i16, Expand);
setOperationAction(ISD::SRL_PARTS, EVT::i8, Expand);
setOperationAction(ISD::SRL_PARTS, EVT::i16, Expand);
setOperationAction(ISD::SRA_PARTS, EVT::i8, Expand);
setOperationAction(ISD::SRA_PARTS, EVT::i16, Expand);
setOperationAction(ISD::SHL_PARTS, MVT::i8, Expand);
setOperationAction(ISD::SHL_PARTS, MVT::i16, Expand);
setOperationAction(ISD::SRL_PARTS, MVT::i8, Expand);
setOperationAction(ISD::SRL_PARTS, MVT::i16, Expand);
setOperationAction(ISD::SRA_PARTS, MVT::i8, Expand);
setOperationAction(ISD::SRA_PARTS, MVT::i16, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i1, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
// FIXME: Implement efficiently multiplication by a constant
setOperationAction(ISD::MUL, EVT::i16, Expand);
setOperationAction(ISD::MULHS, EVT::i16, Expand);
setOperationAction(ISD::MULHU, EVT::i16, Expand);
setOperationAction(ISD::SMUL_LOHI, EVT::i16, Expand);
setOperationAction(ISD::UMUL_LOHI, EVT::i16, Expand);
setOperationAction(ISD::MUL, MVT::i16, Expand);
setOperationAction(ISD::MULHS, MVT::i16, Expand);
setOperationAction(ISD::MULHU, MVT::i16, Expand);
setOperationAction(ISD::SMUL_LOHI, MVT::i16, Expand);
setOperationAction(ISD::UMUL_LOHI, MVT::i16, Expand);
setOperationAction(ISD::UDIV, EVT::i16, Expand);
setOperationAction(ISD::UDIVREM, EVT::i16, Expand);
setOperationAction(ISD::UREM, EVT::i16, Expand);
setOperationAction(ISD::SDIV, EVT::i16, Expand);
setOperationAction(ISD::SDIVREM, EVT::i16, Expand);
setOperationAction(ISD::SREM, EVT::i16, Expand);
setOperationAction(ISD::UDIV, MVT::i16, Expand);
setOperationAction(ISD::UDIVREM, MVT::i16, Expand);
setOperationAction(ISD::UREM, MVT::i16, Expand);
setOperationAction(ISD::SDIV, MVT::i16, Expand);
setOperationAction(ISD::SDIVREM, MVT::i16, Expand);
setOperationAction(ISD::SREM, MVT::i16, Expand);
}
SDValue MSP430TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
@ -221,16 +221,16 @@ MSP430TargetLowering::LowerCCCArguments(SDValue Chain,
if (VA.isRegLoc()) {
// Arguments passed in registers
EVT RegVT = VA.getLocVT();
switch (RegVT.getSimpleVT()) {
switch (RegVT.getSimpleVT().SimpleTy) {
default:
{
#ifndef NDEBUG
cerr << "LowerFormalArguments Unhandled argument type: "
<< RegVT.getSimpleVT() << "\n";
<< RegVT.getSimpleVT().SimpleTy << "\n";
#endif
llvm_unreachable(0);
}
case EVT::i16:
case MVT::i16:
unsigned VReg =
RegInfo.createVirtualRegister(MSP430::GR16RegisterClass);
RegInfo.addLiveIn(VA.getLocReg(), VReg);
@ -258,7 +258,7 @@ MSP430TargetLowering::LowerCCCArguments(SDValue Chain,
unsigned ObjSize = VA.getLocVT().getSizeInBits()/8;
if (ObjSize > 2) {
cerr << "LowerFormalArguments Unhandled argument type: "
<< VA.getLocVT().getSimpleVT()
<< VA.getLocVT().getSimpleVT().SimpleTy
<< "\n";
}
// Create the frame index object for this incoming parameter...
@ -266,7 +266,7 @@ MSP430TargetLowering::LowerCCCArguments(SDValue Chain,
// Create the SelectionDAG nodes corresponding to a load
//from this parameter
SDValue FIN = DAG.getFrameIndex(FI, EVT::i16);
SDValue FIN = DAG.getFrameIndex(FI, MVT::i16);
InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
PseudoSourceValue::getFixedStack(FI), 0));
}
@ -315,10 +315,10 @@ MSP430TargetLowering::LowerReturn(SDValue Chain,
}
if (Flag.getNode())
return DAG.getNode(MSP430ISD::RET_FLAG, dl, EVT::Other, Chain, Flag);
return DAG.getNode(MSP430ISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
// Return Void
return DAG.getNode(MSP430ISD::RET_FLAG, dl, EVT::Other, Chain);
return DAG.getNode(MSP430ISD::RET_FLAG, dl, MVT::Other, Chain);
}
/// LowerCCCCallTo - functions arguments are copied from virtual regs to
@ -395,7 +395,7 @@ MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
// Transform all store nodes into one single node because all store nodes are
// independent of each other.
if (!MemOpChains.empty())
Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&MemOpChains[0], MemOpChains.size());
// Build a sequence of copy-to-reg nodes chained together with token chain and
@ -412,12 +412,12 @@ MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
// Likewise ExternalSymbol -> TargetExternalSymbol.
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), EVT::i16);
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i16);
else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
Callee = DAG.getTargetExternalSymbol(E->getSymbol(), EVT::i16);
Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i16);
// Returns a chain & a flag for retval copy to use.
SDVTList NodeTys = DAG.getVTList(EVT::Other, EVT::Flag);
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
SmallVector<SDValue, 8> Ops;
Ops.push_back(Chain);
Ops.push_back(Callee);
@ -564,7 +564,7 @@ static SDValue EmitCMP(SDValue &LHS, SDValue &RHS, unsigned &TargetCC,
break;
}
return DAG.getNode(MSP430ISD::CMP, dl, EVT::Flag, LHS, RHS);
return DAG.getNode(MSP430ISD::CMP, dl, MVT::Flag, LHS, RHS);
}
@ -581,7 +581,7 @@ SDValue MSP430TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) {
return DAG.getNode(MSP430ISD::BR_CC, dl, Op.getValueType(),
Chain,
Dest, DAG.getConstant(TargetCC, EVT::i8),
Dest, DAG.getConstant(TargetCC, MVT::i8),
Flag);
}
@ -596,11 +596,11 @@ SDValue MSP430TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) {
unsigned TargetCC = MSP430::COND_INVALID;
SDValue Flag = EmitCMP(LHS, RHS, TargetCC, CC, dl, DAG);
SDVTList VTs = DAG.getVTList(Op.getValueType(), EVT::Flag);
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Flag);
SmallVector<SDValue, 4> Ops;
Ops.push_back(TrueV);
Ops.push_back(FalseV);
Ops.push_back(DAG.getConstant(TargetCC, EVT::i8));
Ops.push_back(DAG.getConstant(TargetCC, MVT::i8));
Ops.push_back(Flag);
return DAG.getNode(MSP430ISD::SELECT_CC, dl, VTs, &Ops[0], Ops.size());
@ -612,7 +612,7 @@ SDValue MSP430TargetLowering::LowerSIGN_EXTEND(SDValue Op,
EVT VT = Op.getValueType();
DebugLoc dl = Op.getDebugLoc();
assert(VT == EVT::i16 && "Only support i16 for now!");
assert(VT == MVT::i16 && "Only support i16 for now!");
return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT,
DAG.getNode(ISD::ANY_EXTEND, dl, VT, Val),

View File

@ -95,7 +95,7 @@ private:
// getI32Imm - Return a target constant with the specified
// value, of type i32.
inline SDValue getI32Imm(unsigned Imm) {
return CurDAG->getTargetConstant(Imm, EVT::i32);
return CurDAG->getTargetConstant(Imm, MVT::i32);
}
@ -142,8 +142,8 @@ SelectAddr(SDValue Op, SDValue Addr, SDValue &Offset, SDValue &Base)
{
// if Address is FI, get the TargetFrameIndex.
if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), EVT::i32);
Offset = CurDAG->getTargetConstant(0, EVT::i32);
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
Offset = CurDAG->getTargetConstant(0, MVT::i32);
return true;
}
@ -151,7 +151,7 @@ SelectAddr(SDValue Op, SDValue Addr, SDValue &Offset, SDValue &Base)
if (TM.getRelocationModel() == Reloc::PIC_) {
if ((Addr.getOpcode() == ISD::TargetGlobalAddress) ||
(Addr.getOpcode() == ISD::TargetJumpTable)){
Base = CurDAG->getRegister(Mips::GP, EVT::i32);
Base = CurDAG->getRegister(Mips::GP, MVT::i32);
Offset = Addr;
return true;
}
@ -169,19 +169,19 @@ SelectAddr(SDValue Op, SDValue Addr, SDValue &Offset, SDValue &Base)
// If the first operand is a FI, get the TargetFI Node
if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>
(Addr.getOperand(0))) {
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), EVT::i32);
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
} else {
Base = Addr.getOperand(0);
}
Offset = CurDAG->getTargetConstant(CN->getZExtValue(), EVT::i32);
Offset = CurDAG->getTargetConstant(CN->getZExtValue(), MVT::i32);
return true;
}
}
}
Base = Addr;
Offset = CurDAG->getTargetConstant(0, EVT::i32);
Offset = CurDAG->getTargetConstant(0, MVT::i32);
return true;
}
@ -248,7 +248,7 @@ Select(SDValue N)
SDNode *AddCarry = CurDAG->getTargetNode(Mips::ADDu, dl, VT,
SDValue(Carry,0), RHS);
return CurDAG->SelectNodeTo(N.getNode(), MOp, VT, EVT::Flag,
return CurDAG->SelectNodeTo(N.getNode(), MOp, VT, MVT::Flag,
LHS, SDValue(AddCarry,0));
}
@ -266,13 +266,13 @@ Select(SDValue N)
else
Op = (Opcode == ISD::UDIVREM ? Mips::DIVu : Mips::DIV);
SDNode *Node = CurDAG->getTargetNode(Op, dl, EVT::Flag, Op1, Op2);
SDNode *Node = CurDAG->getTargetNode(Op, dl, MVT::Flag, Op1, Op2);
SDValue InFlag = SDValue(Node, 0);
SDNode *Lo = CurDAG->getTargetNode(Mips::MFLO, dl, EVT::i32,
EVT::Flag, InFlag);
SDNode *Lo = CurDAG->getTargetNode(Mips::MFLO, dl, MVT::i32,
MVT::Flag, InFlag);
InFlag = SDValue(Lo,1);
SDNode *Hi = CurDAG->getTargetNode(Mips::MFHI, dl, EVT::i32, InFlag);
SDNode *Hi = CurDAG->getTargetNode(Mips::MFHI, dl, MVT::i32, InFlag);
if (!N.getValue(0).use_empty())
ReplaceUses(N.getValue(0), SDValue(Lo,0));
@ -292,14 +292,14 @@ Select(SDValue N)
unsigned MulOp = (Opcode == ISD::MULHU ? Mips::MULTu : Mips::MULT);
SDNode *MulNode = CurDAG->getTargetNode(MulOp, dl,
EVT::Flag, MulOp1, MulOp2);
MVT::Flag, MulOp1, MulOp2);
SDValue InFlag = SDValue(MulNode, 0);
if (MulOp == ISD::MUL)
return CurDAG->getTargetNode(Mips::MFLO, dl, EVT::i32, InFlag);
return CurDAG->getTargetNode(Mips::MFLO, dl, MVT::i32, InFlag);
else
return CurDAG->getTargetNode(Mips::MFHI, dl, EVT::i32, InFlag);
return CurDAG->getTargetNode(Mips::MFHI, dl, MVT::i32, InFlag);
}
/// Div/Rem operations
@ -318,10 +318,10 @@ Select(SDValue N)
Op = (Opcode == ISD::SREM ? Mips::DIV : Mips::DIVu);
MOp = Mips::MFHI;
}
SDNode *Node = CurDAG->getTargetNode(Op, dl, EVT::Flag, Op1, Op2);
SDNode *Node = CurDAG->getTargetNode(Op, dl, MVT::Flag, Op1, Op2);
SDValue InFlag = SDValue(Node, 0);
return CurDAG->getTargetNode(MOp, dl, EVT::i32, InFlag);
return CurDAG->getTargetNode(MOp, dl, MVT::i32, InFlag);
}
// Get target GOT address.
@ -337,19 +337,19 @@ Select(SDValue N)
//bool isCodeLarge = (TM.getCodeModel() == CodeModel::Large);
SDValue Chain = Node->getOperand(0);
SDValue Callee = Node->getOperand(1);
SDValue T9Reg = CurDAG->getRegister(Mips::T9, EVT::i32);
SDValue T9Reg = CurDAG->getRegister(Mips::T9, MVT::i32);
SDValue InFlag(0, 0);
if ( (isa<GlobalAddressSDNode>(Callee)) ||
(isa<ExternalSymbolSDNode>(Callee)) )
{
/// Direct call for global addresses and external symbols
SDValue GPReg = CurDAG->getRegister(Mips::GP, EVT::i32);
SDValue GPReg = CurDAG->getRegister(Mips::GP, MVT::i32);
// Use load to get GOT target
SDValue Ops[] = { Callee, GPReg, Chain };
SDValue Load = SDValue(CurDAG->getTargetNode(Mips::LW, dl, EVT::i32,
EVT::Other, Ops, 3), 0);
SDValue Load = SDValue(CurDAG->getTargetNode(Mips::LW, dl, MVT::i32,
MVT::Other, Ops, 3), 0);
Chain = Load.getValue(1);
// Call target must be on T9
@ -359,8 +359,8 @@ Select(SDValue N)
Chain = CurDAG->getCopyToReg(Chain, dl, T9Reg, Callee, InFlag);
// Emit Jump and Link Register
SDNode *ResNode = CurDAG->getTargetNode(Mips::JALR, dl, EVT::Other,
EVT::Flag, T9Reg, Chain);
SDNode *ResNode = CurDAG->getTargetNode(Mips::JALR, dl, MVT::Other,
MVT::Flag, T9Reg, Chain);
Chain = SDValue(ResNode, 0);
InFlag = SDValue(ResNode, 1);
ReplaceUses(SDValue(Node, 0), Chain);

View File

@ -65,108 +65,108 @@ MipsTargetLowering(MipsTargetMachine &TM)
setUsesGlobalOffsetTable(true);
// Set up the register classes
addRegisterClass(EVT::i32, Mips::CPURegsRegisterClass);
addRegisterClass(EVT::f32, Mips::FGR32RegisterClass);
addRegisterClass(MVT::i32, Mips::CPURegsRegisterClass);
addRegisterClass(MVT::f32, Mips::FGR32RegisterClass);
// When dealing with single precision only, use libcalls
if (!Subtarget->isSingleFloat())
if (!Subtarget->isFP64bit())
addRegisterClass(EVT::f64, Mips::AFGR64RegisterClass);
addRegisterClass(MVT::f64, Mips::AFGR64RegisterClass);
// Legal fp constants
addLegalFPImmediate(APFloat(+0.0f));
// Load extented operations for i1 types must be promoted
setLoadExtAction(ISD::EXTLOAD, EVT::i1, Promote);
setLoadExtAction(ISD::ZEXTLOAD, EVT::i1, Promote);
setLoadExtAction(ISD::SEXTLOAD, EVT::i1, Promote);
setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
// MIPS doesn't have extending float->double load/store
setLoadExtAction(ISD::EXTLOAD, EVT::f32, Expand);
setTruncStoreAction(EVT::f64, EVT::f32, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
// Used by legalize types to correctly generate the setcc result.
// Without this, every float setcc comes with a AND/OR with the result,
// we don't want this, since the fpcmp result goes to a flag register,
// which is used implicitly by brcond and select operations.
AddPromotedToType(ISD::SETCC, EVT::i1, EVT::i32);
AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
// Mips Custom Operations
setOperationAction(ISD::GlobalAddress, EVT::i32, Custom);
setOperationAction(ISD::GlobalTLSAddress, EVT::i32, Custom);
setOperationAction(ISD::JumpTable, EVT::i32, Custom);
setOperationAction(ISD::ConstantPool, EVT::i32, Custom);
setOperationAction(ISD::SELECT, EVT::f32, Custom);
setOperationAction(ISD::SELECT, EVT::f64, Custom);
setOperationAction(ISD::SELECT, EVT::i32, Custom);
setOperationAction(ISD::SETCC, EVT::f32, Custom);
setOperationAction(ISD::SETCC, EVT::f64, Custom);
setOperationAction(ISD::BRCOND, EVT::Other, Custom);
setOperationAction(ISD::DYNAMIC_STACKALLOC, EVT::i32, Custom);
setOperationAction(ISD::FP_TO_SINT, EVT::i32, Custom);
setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
setOperationAction(ISD::JumpTable, MVT::i32, Custom);
setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
setOperationAction(ISD::SELECT, MVT::f32, Custom);
setOperationAction(ISD::SELECT, MVT::f64, Custom);
setOperationAction(ISD::SELECT, MVT::i32, Custom);
setOperationAction(ISD::SETCC, MVT::f32, Custom);
setOperationAction(ISD::SETCC, MVT::f64, Custom);
setOperationAction(ISD::BRCOND, MVT::Other, Custom);
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
// We custom lower AND/OR to handle the case where the DAG contain 'ands/ors'
// with operands comming from setcc fp comparions. This is necessary since
// the result from these setcc are in a flag registers (FCR31).
setOperationAction(ISD::AND, EVT::i32, Custom);
setOperationAction(ISD::OR, EVT::i32, Custom);
setOperationAction(ISD::AND, MVT::i32, Custom);
setOperationAction(ISD::OR, MVT::i32, Custom);
// Operations not directly supported by Mips.
setOperationAction(ISD::BR_JT, EVT::Other, Expand);
setOperationAction(ISD::BR_CC, EVT::Other, Expand);
setOperationAction(ISD::SELECT_CC, EVT::Other, Expand);
setOperationAction(ISD::UINT_TO_FP, EVT::i32, Expand);
setOperationAction(ISD::FP_TO_UINT, EVT::i32, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i1, Expand);
setOperationAction(ISD::CTPOP, EVT::i32, Expand);
setOperationAction(ISD::CTTZ, EVT::i32, Expand);
setOperationAction(ISD::ROTL, EVT::i32, Expand);
setOperationAction(ISD::ROTR, EVT::i32, Expand);
setOperationAction(ISD::SHL_PARTS, EVT::i32, Expand);
setOperationAction(ISD::SRA_PARTS, EVT::i32, Expand);
setOperationAction(ISD::SRL_PARTS, EVT::i32, Expand);
setOperationAction(ISD::FCOPYSIGN, EVT::f32, Expand);
setOperationAction(ISD::FCOPYSIGN, EVT::f64, Expand);
setOperationAction(ISD::FSIN, EVT::f32, Expand);
setOperationAction(ISD::FCOS, EVT::f32, Expand);
setOperationAction(ISD::FPOWI, EVT::f32, Expand);
setOperationAction(ISD::FPOW, EVT::f32, Expand);
setOperationAction(ISD::FLOG, EVT::f32, Expand);
setOperationAction(ISD::FLOG2, EVT::f32, Expand);
setOperationAction(ISD::FLOG10, EVT::f32, Expand);
setOperationAction(ISD::FEXP, EVT::f32, Expand);
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
setOperationAction(ISD::BR_CC, MVT::Other, Expand);
setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
setOperationAction(ISD::CTPOP, MVT::i32, Expand);
setOperationAction(ISD::CTTZ, MVT::i32, Expand);
setOperationAction(ISD::ROTL, MVT::i32, Expand);
setOperationAction(ISD::ROTR, MVT::i32, Expand);
setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
setOperationAction(ISD::FSIN, MVT::f32, Expand);
setOperationAction(ISD::FCOS, MVT::f32, Expand);
setOperationAction(ISD::FPOWI, MVT::f32, Expand);
setOperationAction(ISD::FPOW, MVT::f32, Expand);
setOperationAction(ISD::FLOG, MVT::f32, Expand);
setOperationAction(ISD::FLOG2, MVT::f32, Expand);
setOperationAction(ISD::FLOG10, MVT::f32, Expand);
setOperationAction(ISD::FEXP, MVT::f32, Expand);
// We don't have line number support yet.
setOperationAction(ISD::DBG_STOPPOINT, EVT::Other, Expand);
setOperationAction(ISD::DEBUG_LOC, EVT::Other, Expand);
setOperationAction(ISD::DBG_LABEL, EVT::Other, Expand);
setOperationAction(ISD::EH_LABEL, EVT::Other, Expand);
setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
setOperationAction(ISD::DBG_LABEL, MVT::Other, Expand);
setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
// Use the default for now
setOperationAction(ISD::STACKSAVE, EVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, EVT::Other, Expand);
setOperationAction(ISD::MEMBARRIER, EVT::Other, Expand);
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
if (Subtarget->isSingleFloat())
setOperationAction(ISD::SELECT_CC, EVT::f64, Expand);
setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
if (!Subtarget->hasSEInReg()) {
setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i8, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i16, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
}
if (!Subtarget->hasBitCount())
setOperationAction(ISD::CTLZ, EVT::i32, Expand);
setOperationAction(ISD::CTLZ, MVT::i32, Expand);
if (!Subtarget->hasSwap())
setOperationAction(ISD::BSWAP, EVT::i32, Expand);
setOperationAction(ISD::BSWAP, MVT::i32, Expand);
setStackPointerRegisterToSaveRestore(Mips::SP);
computeRegisterProperties();
}
EVT::SimpleValueType MipsTargetLowering::getSetCCResultType(EVT VT) const {
return EVT::i32;
MVT::SimpleValueType MipsTargetLowering::getSetCCResultType(EVT VT) const {
return MVT::i32;
}
/// getFunctionAlignment - Return the Log2 alignment of this function.
@ -358,22 +358,22 @@ LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG)
SDValue Src = Op.getOperand(0);
// Set the condition register
SDValue CondReg = DAG.getCopyFromReg(Chain, dl, CCReg, EVT::i32);
SDValue CondReg = DAG.getCopyFromReg(Chain, dl, CCReg, MVT::i32);
CondReg = DAG.getCopyToReg(Chain, dl, Mips::AT, CondReg);
CondReg = DAG.getCopyFromReg(CondReg, dl, Mips::AT, EVT::i32);
CondReg = DAG.getCopyFromReg(CondReg, dl, Mips::AT, MVT::i32);
SDValue Cst = DAG.getConstant(3, EVT::i32);
SDValue Or = DAG.getNode(ISD::OR, dl, EVT::i32, CondReg, Cst);
Cst = DAG.getConstant(2, EVT::i32);
SDValue Xor = DAG.getNode(ISD::XOR, dl, EVT::i32, Or, Cst);
SDValue Cst = DAG.getConstant(3, MVT::i32);
SDValue Or = DAG.getNode(ISD::OR, dl, MVT::i32, CondReg, Cst);
Cst = DAG.getConstant(2, MVT::i32);
SDValue Xor = DAG.getNode(ISD::XOR, dl, MVT::i32, Or, Cst);
SDValue InFlag(0, 0);
CondReg = DAG.getCopyToReg(Chain, dl, Mips::FCR31, Xor, InFlag);
// Emit the round instruction and bit convert to integer
SDValue Trunc = DAG.getNode(MipsISD::FPRound, dl, EVT::f32,
SDValue Trunc = DAG.getNode(MipsISD::FPRound, dl, MVT::f32,
Src, CondReg.getValue(1));
SDValue BitCvt = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::i32, Trunc);
SDValue BitCvt = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Trunc);
return BitCvt;
}
@ -385,11 +385,11 @@ LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG)
DebugLoc dl = Op.getDebugLoc();
// Get a reference from Mips stack pointer
SDValue StackPointer = DAG.getCopyFromReg(Chain, dl, Mips::SP, EVT::i32);
SDValue StackPointer = DAG.getCopyFromReg(Chain, dl, Mips::SP, MVT::i32);
// Subtract the dynamic size from the actual stack size to
// obtain the new stack size.
SDValue Sub = DAG.getNode(ISD::SUB, dl, EVT::i32, StackPointer, Size);
SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, StackPointer, Size);
// The Sub result contains the new stack start address, so it
// must be placed in the stack pointer register.
@ -411,15 +411,15 @@ LowerANDOR(SDValue Op, SelectionDAG &DAG)
if (LHS.getOpcode() != MipsISD::FPCmp || RHS.getOpcode() != MipsISD::FPCmp)
return Op;
SDValue True = DAG.getConstant(1, EVT::i32);
SDValue False = DAG.getConstant(0, EVT::i32);
SDValue True = DAG.getConstant(1, MVT::i32);
SDValue False = DAG.getConstant(0, MVT::i32);
SDValue LSEL = DAG.getNode(MipsISD::FPSelectCC, dl, True.getValueType(),
LHS, True, False, LHS.getOperand(2));
SDValue RSEL = DAG.getNode(MipsISD::FPSelectCC, dl, True.getValueType(),
RHS, True, False, RHS.getOperand(2));
return DAG.getNode(Op.getOpcode(), dl, EVT::i32, LSEL, RSEL);
return DAG.getNode(Op.getOpcode(), dl, MVT::i32, LSEL, RSEL);
}
SDValue MipsTargetLowering::
@ -438,7 +438,7 @@ LowerBRCOND(SDValue Op, SelectionDAG &DAG)
SDValue CCNode = CondRes.getOperand(2);
Mips::CondCode CC =
(Mips::CondCode)cast<ConstantSDNode>(CCNode)->getZExtValue();
SDValue BrCode = DAG.getConstant(GetFPBranchCodeFromCond(CC), EVT::i32);
SDValue BrCode = DAG.getConstant(GetFPBranchCodeFromCond(CC), MVT::i32);
return DAG.getNode(MipsISD::FPBrcond, dl, Op.getValueType(), Chain, BrCode,
Dest, CondRes);
@ -457,7 +457,7 @@ LowerSETCC(SDValue Op, SelectionDAG &DAG)
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
return DAG.getNode(MipsISD::FPCmp, dl, Op.getValueType(), LHS, RHS,
DAG.getConstant(FPCondCCodeToFCC(CC), EVT::i32));
DAG.getConstant(FPCondCCodeToFCC(CC), MVT::i32));
}
SDValue MipsTargetLowering::
@ -491,23 +491,23 @@ LowerGlobalAddress(SDValue Op, SelectionDAG &DAG)
// FIXME there isn't actually debug info here
DebugLoc dl = Op.getDebugLoc();
GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
SDValue GA = DAG.getTargetGlobalAddress(GV, EVT::i32);
SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32);
if (getTargetMachine().getRelocationModel() != Reloc::PIC_) {
// %hi/%lo relocation
SDValue HiPart = DAG.getNode(MipsISD::Hi, dl, EVT::i32, GA);
SDValue Lo = DAG.getNode(MipsISD::Lo, dl, EVT::i32, GA);
return DAG.getNode(ISD::ADD, dl, EVT::i32, HiPart, Lo);
SDValue HiPart = DAG.getNode(MipsISD::Hi, dl, MVT::i32, GA);
SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, GA);
return DAG.getNode(ISD::ADD, dl, MVT::i32, HiPart, Lo);
} else { // Abicall relocations, TODO: make this cleaner.
SDValue ResNode = DAG.getLoad(EVT::i32, dl,
SDValue ResNode = DAG.getLoad(MVT::i32, dl,
DAG.getEntryNode(), GA, NULL, 0);
// On functions and global targets not internal linked only
// a load from got/GP is necessary for PIC to work.
if (!GV->hasLocalLinkage() || isa<Function>(GV))
return ResNode;
SDValue Lo = DAG.getNode(MipsISD::Lo, dl, EVT::i32, GA);
return DAG.getNode(ISD::ADD, dl, EVT::i32, ResNode, Lo);
SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, GA);
return DAG.getNode(ISD::ADD, dl, MVT::i32, ResNode, Lo);
}
llvm_unreachable("Dont know how to handle GlobalAddress");
@ -534,14 +534,14 @@ LowerJumpTable(SDValue Op, SelectionDAG &DAG)
SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
if (getTargetMachine().getRelocationModel() != Reloc::PIC_) {
SDVTList VTs = DAG.getVTList(EVT::i32);
SDVTList VTs = DAG.getVTList(MVT::i32);
SDValue Ops[] = { JTI };
HiPart = DAG.getNode(MipsISD::Hi, dl, VTs, Ops, 1);
} else // Emit Load from Global Pointer
HiPart = DAG.getLoad(EVT::i32, dl, DAG.getEntryNode(), JTI, NULL, 0);
HiPart = DAG.getLoad(MVT::i32, dl, DAG.getEntryNode(), JTI, NULL, 0);
SDValue Lo = DAG.getNode(MipsISD::Lo, dl, EVT::i32, JTI);
ResNode = DAG.getNode(ISD::ADD, dl, EVT::i32, HiPart, Lo);
SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, JTI);
ResNode = DAG.getNode(ISD::ADD, dl, MVT::i32, HiPart, Lo);
return ResNode;
}
@ -552,7 +552,7 @@ LowerConstantPool(SDValue Op, SelectionDAG &DAG)
SDValue ResNode;
ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
Constant *C = N->getConstVal();
SDValue CP = DAG.getTargetConstantPool(C, EVT::i32, N->getAlignment());
SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment());
// FIXME there isn't actually debug info here
DebugLoc dl = Op.getDebugLoc();
@ -562,13 +562,13 @@ LowerConstantPool(SDValue Op, SelectionDAG &DAG)
// hacking it. This feature should come soon so we can uncomment the
// stuff below.
//if (IsInSmallSection(C->getType())) {
// SDValue GPRelNode = DAG.getNode(MipsISD::GPRel, EVT::i32, CP);
// SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(EVT::i32);
// ResNode = DAG.getNode(ISD::ADD, EVT::i32, GOT, GPRelNode);
// SDValue GPRelNode = DAG.getNode(MipsISD::GPRel, MVT::i32, CP);
// SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(MVT::i32);
// ResNode = DAG.getNode(ISD::ADD, MVT::i32, GOT, GPRelNode);
//} else { // %hi/%lo relocation
SDValue HiPart = DAG.getNode(MipsISD::Hi, dl, EVT::i32, CP);
SDValue Lo = DAG.getNode(MipsISD::Lo, dl, EVT::i32, CP);
ResNode = DAG.getNode(ISD::ADD, dl, EVT::i32, HiPart, Lo);
SDValue HiPart = DAG.getNode(MipsISD::Hi, dl, MVT::i32, CP);
SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, CP);
ResNode = DAG.getNode(ISD::ADD, dl, MVT::i32, HiPart, Lo);
//}
return ResNode;
@ -614,8 +614,8 @@ static bool CC_MipsO32(unsigned ValNo, EVT ValVT,
bool IntRegUsed = (IntRegs[UnallocIntReg] != (unsigned (Mips::A0)));
// Promote i8 and i16
if (LocVT == EVT::i8 || LocVT == EVT::i16) {
LocVT = EVT::i32;
if (LocVT == MVT::i8 || LocVT == MVT::i16) {
LocVT = MVT::i32;
if (ArgFlags.isSExt())
LocInfo = CCValAssign::SExt;
else if (ArgFlags.isZExt())
@ -624,20 +624,20 @@ static bool CC_MipsO32(unsigned ValNo, EVT ValVT,
LocInfo = CCValAssign::AExt;
}
if (ValVT == EVT::i32 || (ValVT == EVT::f32 && IntRegUsed)) {
if (ValVT == MVT::i32 || (ValVT == MVT::f32 && IntRegUsed)) {
Reg = State.AllocateReg(IntRegs, IntRegsSize);
IntRegUsed = true;
LocVT = EVT::i32;
LocVT = MVT::i32;
}
if (ValVT.isFloatingPoint() && !IntRegUsed) {
if (ValVT == EVT::f32)
if (ValVT == MVT::f32)
Reg = State.AllocateReg(F32Regs, FloatRegsSize);
else
Reg = State.AllocateReg(F64Regs, FloatRegsSize);
}
if (ValVT == EVT::f64 && IntRegUsed) {
if (ValVT == MVT::f64 && IntRegUsed) {
if (UnallocIntReg != IntRegsSize) {
// If we hit register A3 as the first not allocated, we must
// mark it as allocated (shadow) and use the stack instead.
@ -646,7 +646,7 @@ static bool CC_MipsO32(unsigned ValNo, EVT ValVT,
for (;UnallocIntReg < IntRegsSize; ++UnallocIntReg)
State.AllocateReg(UnallocIntReg);
}
LocVT = EVT::i32;
LocVT = MVT::i32;
}
if (!Reg) {
@ -686,7 +686,7 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// To meet O32 ABI, Mips must always allocate 16 bytes on
// the stack (even if less than 4 are used as arguments)
if (Subtarget->isABI_O32()) {
int VTsize = EVT(EVT::i32).getSizeInBits()/8;
int VTsize = EVT(MVT::i32).getSizeInBits()/8;
MFI->CreateFixedObject(VTsize, (VTsize*3));
CCInfo.AnalyzeCallOperands(Outs, CC_MipsO32);
} else
@ -715,13 +715,13 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
default: llvm_unreachable("Unknown loc info!");
case CCValAssign::Full:
if (Subtarget->isABI_O32() && VA.isRegLoc()) {
if (VA.getValVT() == EVT::f32 && VA.getLocVT() == EVT::i32)
Arg = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::i32, Arg);
if (VA.getValVT() == EVT::f64 && VA.getLocVT() == EVT::i32) {
Arg = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::i64, Arg);
SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, EVT::i32, Arg,
if (VA.getValVT() == MVT::f32 && VA.getLocVT() == MVT::i32)
Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Arg);
if (VA.getValVT() == MVT::f64 && VA.getLocVT() == MVT::i32) {
Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, Arg);
SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Arg,
DAG.getConstant(0, getPointerTy()));
SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, EVT::i32, Arg,
SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Arg,
DAG.getConstant(1, getPointerTy()));
RegsToPass.push_back(std::make_pair(VA.getLocReg(), Lo));
RegsToPass.push_back(std::make_pair(VA.getLocReg()+1, Hi));
@ -768,7 +768,7 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Transform all store nodes into one single node because all store
// nodes are independent of each other.
if (!MemOpChains.empty())
Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&MemOpChains[0], MemOpChains.size());
// Build a sequence of copy-to-reg nodes chained together with token
@ -794,7 +794,7 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// = Chain, Callee, Reg#1, Reg#2, ...
//
// Returns a chain & a flag for retval copy to use.
SDVTList NodeTys = DAG.getVTList(EVT::Other, EVT::Flag);
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
SmallVector<SDValue, 8> Ops;
Ops.push_back(Chain);
Ops.push_back(Callee);
@ -838,9 +838,9 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Reload GP value.
FI = MipsFI->getGPFI();
SDValue FIN = DAG.getFrameIndex(FI,getPointerTy());
SDValue GPLoad = DAG.getLoad(EVT::i32, dl, Chain, FIN, NULL, 0);
SDValue GPLoad = DAG.getLoad(MVT::i32, dl, Chain, FIN, NULL, 0);
Chain = GPLoad.getValue(1);
Chain = DAG.getCopyToReg(Chain, dl, DAG.getRegister(Mips::GP, EVT::i32),
Chain = DAG.getCopyToReg(Chain, dl, DAG.getRegister(Mips::GP, MVT::i32),
GPLoad, SDValue(0,0));
InFlag = Chain.getValue(1);
}
@ -922,11 +922,11 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
EVT RegVT = VA.getLocVT();
TargetRegisterClass *RC = 0;
if (RegVT == EVT::i32)
if (RegVT == MVT::i32)
RC = Mips::CPURegsRegisterClass;
else if (RegVT == EVT::f32)
else if (RegVT == MVT::f32)
RC = Mips::FGR32RegisterClass;
else if (RegVT == EVT::f64) {
else if (RegVT == MVT::f64) {
if (!Subtarget->isSingleFloat())
RC = Mips::AFGR64RegisterClass;
} else
@ -954,15 +954,15 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
// Handle O32 ABI cases: i32->f32 and (i32,i32)->f64
if (Subtarget->isABI_O32()) {
if (RegVT == EVT::i32 && VA.getValVT() == EVT::f32)
ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::f32, ArgValue);
if (RegVT == EVT::i32 && VA.getValVT() == EVT::f64) {
if (RegVT == MVT::i32 && VA.getValVT() == MVT::f32)
ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, ArgValue);
if (RegVT == MVT::i32 && VA.getValVT() == MVT::f64) {
unsigned Reg2 = AddLiveIn(DAG.getMachineFunction(),
VA.getLocReg()+1, RC);
SDValue ArgValue2 = DAG.getCopyFromReg(Chain, dl, Reg2, RegVT);
SDValue Hi = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::f32, ArgValue);
SDValue Lo = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::f32, ArgValue2);
ArgValue = DAG.getNode(ISD::BUILD_PAIR, dl, EVT::f64, Lo, Hi);
SDValue Hi = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, ArgValue);
SDValue Lo = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, ArgValue2);
ArgValue = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::f64, Lo, Hi);
}
}
@ -1021,11 +1021,11 @@ MipsTargetLowering::LowerFormalArguments(SDValue Chain,
if (DAG.getMachineFunction().getFunction()->hasStructRetAttr()) {
unsigned Reg = MipsFI->getSRetReturnReg();
if (!Reg) {
Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(EVT::i32));
Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i32));
MipsFI->setSRetReturnReg(Reg);
}
SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other, Copy, Chain);
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
}
return Chain;
@ -1094,11 +1094,11 @@ MipsTargetLowering::LowerReturn(SDValue Chain,
// Return on Mips is always a "jr $ra"
if (Flag.getNode())
return DAG.getNode(MipsISD::Ret, dl, EVT::Other,
Chain, DAG.getRegister(Mips::RA, EVT::i32), Flag);
return DAG.getNode(MipsISD::Ret, dl, MVT::Other,
Chain, DAG.getRegister(Mips::RA, MVT::i32), Flag);
else // Return Void
return DAG.getNode(MipsISD::Ret, dl, EVT::Other,
Chain, DAG.getRegister(Mips::RA, EVT::i32));
return DAG.getNode(MipsISD::Ret, dl, MVT::Other,
Chain, DAG.getRegister(Mips::RA, MVT::i32));
}
//===----------------------------------------------------------------------===//
@ -1142,9 +1142,9 @@ getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const
case 'r':
return std::make_pair(0U, Mips::CPURegsRegisterClass);
case 'f':
if (VT == EVT::f32)
if (VT == MVT::f32)
return std::make_pair(0U, Mips::FGR32RegisterClass);
if (VT == EVT::f64)
if (VT == MVT::f64)
if ((!Subtarget->isSingleFloat()) && (!Subtarget->isFP64bit()))
return std::make_pair(0U, Mips::AFGR64RegisterClass);
}
@ -1174,7 +1174,7 @@ getRegClassForInlineAsmConstraint(const std::string &Constraint,
Mips::T8, 0);
case 'f':
if (VT == EVT::f32) {
if (VT == MVT::f32) {
if (Subtarget->isSingleFloat())
return make_vector<unsigned>(Mips::F2, Mips::F3, Mips::F4, Mips::F5,
Mips::F6, Mips::F7, Mips::F8, Mips::F9, Mips::F10, Mips::F11,
@ -1187,7 +1187,7 @@ getRegClassForInlineAsmConstraint(const std::string &Constraint,
Mips::F28, Mips::F30, 0);
}
if (VT == EVT::f64)
if (VT == MVT::f64)
if ((!Subtarget->isSingleFloat()) && (!Subtarget->isFP64bit()))
return make_vector<unsigned>(Mips::D1, Mips::D2, Mips::D3, Mips::D4,
Mips::D5, Mips::D10, Mips::D11, Mips::D12, Mips::D13,

View File

@ -80,7 +80,7 @@ namespace llvm {
virtual const char *getTargetNodeName(unsigned Opcode) const;
/// getSetCCResultType - get the ISD::SETCC result ValueType
EVT::SimpleValueType getSetCCResultType(EVT VT) const;
MVT::SimpleValueType getSetCCResultType(EVT VT) const;
/// getFunctionAlignment - Return the Log2 alignment of this function.
virtual unsigned getFunctionAlignment(const Function *F) const;

View File

@ -96,7 +96,7 @@ def HI16 : SDNodeXForm<imm, [{
// Node immediate fits as 16-bit sign extended on target immediate.
// e.g. addi, andi
def immSExt16 : PatLeaf<(imm), [{
if (N->getValueType(0) == EVT::i32)
if (N->getValueType(0) == MVT::i32)
return (int32_t)N->getZExtValue() == (short)N->getZExtValue();
else
return (int64_t)N->getZExtValue() == (short)N->getZExtValue();
@ -107,7 +107,7 @@ def immSExt16 : PatLeaf<(imm), [{
// immediate are caught.
// e.g. addiu, sltiu
def immZExt16 : PatLeaf<(imm), [{
if (N->getValueType(0) == EVT::i32)
if (N->getValueType(0) == MVT::i32)
return (uint32_t)N->getZExtValue() == (unsigned short)N->getZExtValue();
else
return (uint64_t)N->getZExtValue() == (unsigned short)N->getZExtValue();

View File

@ -147,9 +147,9 @@ PIC16TargetLowering::PIC16TargetLowering(PIC16TargetMachine &TM)
Subtarget = &TM.getSubtarget<PIC16Subtarget>();
addRegisterClass(EVT::i8, PIC16::GPRRegisterClass);
addRegisterClass(MVT::i8, PIC16::GPRRegisterClass);
setShiftAmountType(EVT::i8);
setShiftAmountType(MVT::i8);
// Std lib call names
setLibcallName(RTLIB::COS_F32, getStdLibCallName(RTLIB::COS_F32));
@ -243,65 +243,65 @@ PIC16TargetLowering::PIC16TargetLowering(PIC16TargetMachine &TM)
setCmpLibcallCC(RTLIB::UO_F32, ISD::SETNE);
setCmpLibcallCC(RTLIB::O_F32, ISD::SETEQ);
setOperationAction(ISD::GlobalAddress, EVT::i16, Custom);
setOperationAction(ISD::ExternalSymbol, EVT::i16, Custom);
setOperationAction(ISD::GlobalAddress, MVT::i16, Custom);
setOperationAction(ISD::ExternalSymbol, MVT::i16, Custom);
setOperationAction(ISD::LOAD, EVT::i8, Legal);
setOperationAction(ISD::LOAD, EVT::i16, Custom);
setOperationAction(ISD::LOAD, EVT::i32, Custom);
setOperationAction(ISD::LOAD, MVT::i8, Legal);
setOperationAction(ISD::LOAD, MVT::i16, Custom);
setOperationAction(ISD::LOAD, MVT::i32, Custom);
setOperationAction(ISD::STORE, EVT::i8, Legal);
setOperationAction(ISD::STORE, EVT::i16, Custom);
setOperationAction(ISD::STORE, EVT::i32, Custom);
setOperationAction(ISD::STORE, EVT::i64, Custom);
setOperationAction(ISD::STORE, MVT::i8, Legal);
setOperationAction(ISD::STORE, MVT::i16, Custom);
setOperationAction(ISD::STORE, MVT::i32, Custom);
setOperationAction(ISD::STORE, MVT::i64, Custom);
setOperationAction(ISD::ADDE, EVT::i8, Custom);
setOperationAction(ISD::ADDC, EVT::i8, Custom);
setOperationAction(ISD::SUBE, EVT::i8, Custom);
setOperationAction(ISD::SUBC, EVT::i8, Custom);
setOperationAction(ISD::SUB, EVT::i8, Custom);
setOperationAction(ISD::ADD, EVT::i8, Custom);
setOperationAction(ISD::ADD, EVT::i16, Custom);
setOperationAction(ISD::ADDE, MVT::i8, Custom);
setOperationAction(ISD::ADDC, MVT::i8, Custom);
setOperationAction(ISD::SUBE, MVT::i8, Custom);
setOperationAction(ISD::SUBC, MVT::i8, Custom);
setOperationAction(ISD::SUB, MVT::i8, Custom);
setOperationAction(ISD::ADD, MVT::i8, Custom);
setOperationAction(ISD::ADD, MVT::i16, Custom);
setOperationAction(ISD::OR, EVT::i8, Custom);
setOperationAction(ISD::AND, EVT::i8, Custom);
setOperationAction(ISD::XOR, EVT::i8, Custom);
setOperationAction(ISD::OR, MVT::i8, Custom);
setOperationAction(ISD::AND, MVT::i8, Custom);
setOperationAction(ISD::XOR, MVT::i8, Custom);
setOperationAction(ISD::FrameIndex, EVT::i16, Custom);
setOperationAction(ISD::FrameIndex, MVT::i16, Custom);
setOperationAction(ISD::MUL, EVT::i8, Custom);
setOperationAction(ISD::MUL, MVT::i8, Custom);
setOperationAction(ISD::SMUL_LOHI, EVT::i8, Expand);
setOperationAction(ISD::UMUL_LOHI, EVT::i8, Expand);
setOperationAction(ISD::MULHU, EVT::i8, Expand);
setOperationAction(ISD::MULHS, EVT::i8, Expand);
setOperationAction(ISD::SMUL_LOHI, MVT::i8, Expand);
setOperationAction(ISD::UMUL_LOHI, MVT::i8, Expand);
setOperationAction(ISD::MULHU, MVT::i8, Expand);
setOperationAction(ISD::MULHS, MVT::i8, Expand);
setOperationAction(ISD::SRA, EVT::i8, Custom);
setOperationAction(ISD::SHL, EVT::i8, Custom);
setOperationAction(ISD::SRL, EVT::i8, Custom);
setOperationAction(ISD::SRA, MVT::i8, Custom);
setOperationAction(ISD::SHL, MVT::i8, Custom);
setOperationAction(ISD::SRL, MVT::i8, Custom);
setOperationAction(ISD::ROTL, EVT::i8, Expand);
setOperationAction(ISD::ROTR, EVT::i8, Expand);
setOperationAction(ISD::ROTL, MVT::i8, Expand);
setOperationAction(ISD::ROTR, MVT::i8, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i1, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
// PIC16 does not support shift parts
setOperationAction(ISD::SRA_PARTS, EVT::i8, Expand);
setOperationAction(ISD::SHL_PARTS, EVT::i8, Expand);
setOperationAction(ISD::SRL_PARTS, EVT::i8, Expand);
setOperationAction(ISD::SRA_PARTS, MVT::i8, Expand);
setOperationAction(ISD::SHL_PARTS, MVT::i8, Expand);
setOperationAction(ISD::SRL_PARTS, MVT::i8, Expand);
// PIC16 does not have a SETCC, expand it to SELECT_CC.
setOperationAction(ISD::SETCC, EVT::i8, Expand);
setOperationAction(ISD::SELECT, EVT::i8, Expand);
setOperationAction(ISD::BRCOND, EVT::Other, Expand);
setOperationAction(ISD::BRIND, EVT::Other, Expand);
setOperationAction(ISD::SETCC, MVT::i8, Expand);
setOperationAction(ISD::SELECT, MVT::i8, Expand);
setOperationAction(ISD::BRCOND, MVT::Other, Expand);
setOperationAction(ISD::BRIND, MVT::Other, Expand);
setOperationAction(ISD::SELECT_CC, EVT::i8, Custom);
setOperationAction(ISD::BR_CC, EVT::i8, Custom);
setOperationAction(ISD::SELECT_CC, MVT::i8, Custom);
setOperationAction(ISD::BR_CC, MVT::i8, Custom);
//setOperationAction(ISD::TRUNCATE, EVT::i16, Custom);
setTruncStoreAction(EVT::i16, EVT::i8, Custom);
//setOperationAction(ISD::TRUNCATE, MVT::i16, Custom);
setTruncStoreAction(MVT::i16, MVT::i8, Custom);
// Now deduce the information based on the above mentioned
// actions
@ -313,7 +313,7 @@ static SDValue getOutFlag(SDValue &Op) {
// Flag is the last value of the node.
SDValue Flag = Op.getValue(Op.getNode()->getNumValues() - 1);
assert (Flag.getValueType() == EVT::Flag
assert (Flag.getValueType() == MVT::Flag
&& "Node does not have an out Flag");
return Flag;
@ -340,12 +340,12 @@ static SDValue getChain(SDValue &Op) {
// If the last value returned in Flag then the chain is
// second last value returned.
if (Chain.getValueType() == EVT::Flag)
if (Chain.getValueType() == MVT::Flag)
Chain = Op.getValue(Op.getNode()->getNumValues() - 2);
// All nodes may not produce a chain. Therefore following assert
// verifies that the node is returning a chain only.
assert (Chain.getValueType() == EVT::Other
assert (Chain.getValueType() == MVT::Other
&& "Node does not have a chain");
return Chain;
@ -365,9 +365,9 @@ static void PopulateResults(SDValue N, SmallVectorImpl<SDValue>&Results) {
Results.push_back(N);
}
EVT::SimpleValueType
MVT::SimpleValueType
PIC16TargetLowering::getSetCCResultType(EVT ValType) const {
return EVT::i8;
return MVT::i8;
}
/// The type legalizer framework of generating legalizer can generate libcalls
@ -405,7 +405,7 @@ PIC16TargetLowering::MakePIC16Libcall(PIC16ISD::PIC16Libcall Call,
Args.push_back(Entry);
}
SDValue Callee = DAG.getExternalSymbol(getPIC16LibcallName(Call), EVT::i16);
SDValue Callee = DAG.getExternalSymbol(getPIC16LibcallName(Call), MVT::i16);
const Type *RetTy = RetVT.getTypeForEVT();
std::pair<SDValue,SDValue> CallInfo =
@ -478,11 +478,11 @@ void PIC16TargetLowering::ReplaceNodeResults(SDNode *N,
SDValue PIC16TargetLowering::ExpandFrameIndex(SDNode *N, SelectionDAG &DAG) {
// Currently handling FrameIndex of size EVT::i16 only
// Currently handling FrameIndex of size MVT::i16 only
// One example of this scenario is when return value is written on
// FrameIndex#0
if (N->getValueType(0) != EVT::i16)
if (N->getValueType(0) != MVT::i16)
return SDValue();
// Expand the FrameIndex into ExternalSymbol and a Constant node
@ -504,9 +504,9 @@ SDValue PIC16TargetLowering::ExpandFrameIndex(SDNode *N, SelectionDAG &DAG) {
int FrameOffset;
SDValue FI = SDValue(N,0);
LegalizeFrameIndex(FI, DAG, ES, FrameOffset);
SDValue Offset = DAG.getConstant(FrameOffset, EVT::i8);
SDValue Lo = DAG.getNode(PIC16ISD::Lo, dl, EVT::i8, ES, Offset);
SDValue Hi = DAG.getNode(PIC16ISD::Hi, dl, EVT::i8, ES, Offset);
SDValue Offset = DAG.getConstant(FrameOffset, MVT::i8);
SDValue Lo = DAG.getNode(PIC16ISD::Lo, dl, MVT::i8, ES, Offset);
SDValue Hi = DAG.getNode(PIC16ISD::Hi, dl, MVT::i8, ES, Offset);
return DAG.getNode(ISD::BUILD_PAIR, dl, N->getValueType(0), Lo, Hi);
}
@ -523,12 +523,12 @@ SDValue PIC16TargetLowering::ExpandStore(SDNode *N, SelectionDAG &DAG) {
SDValue PtrLo, PtrHi;
LegalizeAddress(Ptr, DAG, PtrLo, PtrHi, StoreOffset, dl);
if (ValueType == EVT::i8) {
return DAG.getNode (PIC16ISD::PIC16Store, dl, EVT::Other, Chain, Src,
if (ValueType == MVT::i8) {
return DAG.getNode (PIC16ISD::PIC16Store, dl, MVT::Other, Chain, Src,
PtrLo, PtrHi,
DAG.getConstant (0 + StoreOffset, EVT::i8));
DAG.getConstant (0 + StoreOffset, MVT::i8));
}
else if (ValueType == EVT::i16) {
else if (ValueType == MVT::i16) {
// Get the Lo and Hi parts from MERGE_VALUE or BUILD_PAIR.
SDValue SrcLo, SrcHi;
GetExpandedParts(Src, DAG, SrcLo, SrcHi);
@ -537,19 +537,19 @@ SDValue PIC16TargetLowering::ExpandStore(SDNode *N, SelectionDAG &DAG) {
ChainLo = Chain.getOperand(0);
ChainHi = Chain.getOperand(1);
}
SDValue Store1 = DAG.getNode(PIC16ISD::PIC16Store, dl, EVT::Other,
SDValue Store1 = DAG.getNode(PIC16ISD::PIC16Store, dl, MVT::Other,
ChainLo,
SrcLo, PtrLo, PtrHi,
DAG.getConstant (0 + StoreOffset, EVT::i8));
DAG.getConstant (0 + StoreOffset, MVT::i8));
SDValue Store2 = DAG.getNode(PIC16ISD::PIC16Store, dl, EVT::Other, ChainHi,
SDValue Store2 = DAG.getNode(PIC16ISD::PIC16Store, dl, MVT::Other, ChainHi,
SrcHi, PtrLo, PtrHi,
DAG.getConstant (1 + StoreOffset, EVT::i8));
DAG.getConstant (1 + StoreOffset, MVT::i8));
return DAG.getNode(ISD::TokenFactor, dl, EVT::Other, getChain(Store1),
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, getChain(Store1),
getChain(Store2));
}
else if (ValueType == EVT::i32) {
else if (ValueType == MVT::i32) {
// Get the Lo and Hi parts from MERGE_VALUE or BUILD_PAIR.
SDValue SrcLo, SrcHi;
GetExpandedParts(Src, DAG, SrcLo, SrcHi);
@ -574,30 +574,30 @@ SDValue PIC16TargetLowering::ExpandStore(SDNode *N, SelectionDAG &DAG) {
ChainHi1 = ChainHi.getOperand(0);
ChainHi2 = ChainHi.getOperand(1);
}
SDValue Store1 = DAG.getNode(PIC16ISD::PIC16Store, dl, EVT::Other,
SDValue Store1 = DAG.getNode(PIC16ISD::PIC16Store, dl, MVT::Other,
ChainLo1,
SrcLo1, PtrLo, PtrHi,
DAG.getConstant (0 + StoreOffset, EVT::i8));
DAG.getConstant (0 + StoreOffset, MVT::i8));
SDValue Store2 = DAG.getNode(PIC16ISD::PIC16Store, dl, EVT::Other, ChainLo2,
SDValue Store2 = DAG.getNode(PIC16ISD::PIC16Store, dl, MVT::Other, ChainLo2,
SrcLo2, PtrLo, PtrHi,
DAG.getConstant (1 + StoreOffset, EVT::i8));
DAG.getConstant (1 + StoreOffset, MVT::i8));
SDValue Store3 = DAG.getNode(PIC16ISD::PIC16Store, dl, EVT::Other, ChainHi1,
SDValue Store3 = DAG.getNode(PIC16ISD::PIC16Store, dl, MVT::Other, ChainHi1,
SrcHi1, PtrLo, PtrHi,
DAG.getConstant (2 + StoreOffset, EVT::i8));
DAG.getConstant (2 + StoreOffset, MVT::i8));
SDValue Store4 = DAG.getNode(PIC16ISD::PIC16Store, dl, EVT::Other, ChainHi2,
SDValue Store4 = DAG.getNode(PIC16ISD::PIC16Store, dl, MVT::Other, ChainHi2,
SrcHi2, PtrLo, PtrHi,
DAG.getConstant (3 + StoreOffset, EVT::i8));
DAG.getConstant (3 + StoreOffset, MVT::i8));
SDValue RetLo = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
SDValue RetLo = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
getChain(Store1), getChain(Store2));
SDValue RetHi = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
SDValue RetHi = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
getChain(Store3), getChain(Store4));
return DAG.getNode(ISD::TokenFactor, dl, EVT::Other, RetLo, RetHi);
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, RetLo, RetHi);
} else if (ValueType == EVT::i64) {
} else if (ValueType == MVT::i64) {
SDValue SrcLo, SrcHi;
GetExpandedParts(Src, DAG, SrcLo, SrcHi);
SDValue ChainLo = Chain, ChainHi = Chain;
@ -613,7 +613,7 @@ SDValue PIC16TargetLowering::ExpandStore(SDNode *N, SelectionDAG &DAG) {
SDValue Store2 = DAG.getStore(ChainHi, dl, SrcHi, Ptr, NULL,
1 + StoreOffset);
return DAG.getNode(ISD::TokenFactor, dl, EVT::Other, Store1,
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1,
Store2);
} else {
assert (0 && "value type not supported");
@ -627,12 +627,12 @@ SDValue PIC16TargetLowering::ExpandExternalSymbol(SDNode *N, SelectionDAG &DAG)
// FIXME there isn't really debug info here
DebugLoc dl = ES->getDebugLoc();
SDValue TES = DAG.getTargetExternalSymbol(ES->getSymbol(), EVT::i8);
SDValue Offset = DAG.getConstant(0, EVT::i8);
SDValue Lo = DAG.getNode(PIC16ISD::Lo, dl, EVT::i8, TES, Offset);
SDValue Hi = DAG.getNode(PIC16ISD::Hi, dl, EVT::i8, TES, Offset);
SDValue TES = DAG.getTargetExternalSymbol(ES->getSymbol(), MVT::i8);
SDValue Offset = DAG.getConstant(0, MVT::i8);
SDValue Lo = DAG.getNode(PIC16ISD::Lo, dl, MVT::i8, TES, Offset);
SDValue Hi = DAG.getNode(PIC16ISD::Hi, dl, MVT::i8, TES, Offset);
return DAG.getNode(ISD::BUILD_PAIR, dl, EVT::i16, Lo, Hi);
return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i16, Lo, Hi);
}
// ExpandGlobalAddress -
@ -641,14 +641,14 @@ SDValue PIC16TargetLowering::ExpandGlobalAddress(SDNode *N, SelectionDAG &DAG) {
// FIXME there isn't really debug info here
DebugLoc dl = G->getDebugLoc();
SDValue TGA = DAG.getTargetGlobalAddress(G->getGlobal(), EVT::i8,
SDValue TGA = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i8,
G->getOffset());
SDValue Offset = DAG.getConstant(0, EVT::i8);
SDValue Lo = DAG.getNode(PIC16ISD::Lo, dl, EVT::i8, TGA, Offset);
SDValue Hi = DAG.getNode(PIC16ISD::Hi, dl, EVT::i8, TGA, Offset);
SDValue Offset = DAG.getConstant(0, MVT::i8);
SDValue Lo = DAG.getNode(PIC16ISD::Lo, dl, MVT::i8, TGA, Offset);
SDValue Hi = DAG.getNode(PIC16ISD::Hi, dl, MVT::i8, TGA, Offset);
return DAG.getNode(ISD::BUILD_PAIR, dl, EVT::i16, Lo, Hi);
return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i16, Lo, Hi);
}
bool PIC16TargetLowering::isDirectAddress(const SDValue &Op) {
@ -695,11 +695,11 @@ void PIC16TargetLowering::GetExpandedParts(SDValue Op, SelectionDAG &DAG,
// Extract the lo component.
Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, NewVT, Op,
DAG.getConstant(0, EVT::i8));
DAG.getConstant(0, MVT::i8));
// extract the hi component
Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, NewVT, Op,
DAG.getConstant(1, EVT::i8));
DAG.getConstant(1, MVT::i8));
}
// Legalize FrameIndex into ExternalSymbol and offset.
@ -723,7 +723,7 @@ PIC16TargetLowering::LegalizeFrameIndex(SDValue Op, SelectionDAG &DAG,
const char *tmpName;
if (FIndex < ReservedFrameCount) {
tmpName = createESName(PAN::getFrameLabel(Name));
ES = DAG.getTargetExternalSymbol(tmpName, EVT::i8);
ES = DAG.getTargetExternalSymbol(tmpName, MVT::i8);
Offset = 0;
for (unsigned i=0; i<FIndex ; ++i) {
Offset += MFI->getObjectSize(i);
@ -731,7 +731,7 @@ PIC16TargetLowering::LegalizeFrameIndex(SDValue Op, SelectionDAG &DAG,
} else {
// FrameIndex has been made for some temporary storage
tmpName = createESName(PAN::getTempdataLabel(Name));
ES = DAG.getTargetExternalSymbol(tmpName, EVT::i8);
ES = DAG.getTargetExternalSymbol(tmpName, MVT::i8);
Offset = GetTmpOffsetForFI(FIndex, MFI->getObjectSize(FIndex));
}
@ -777,11 +777,11 @@ void PIC16TargetLowering::LegalizeAddress(SDValue Ptr, SelectionDAG &DAG,
// then treat it as direct address.
// One example for such case is storing and loading
// from function frame during a call
if (Ptr.getValueType() == EVT::i8) {
if (Ptr.getValueType() == MVT::i8) {
switch (Ptr.getOpcode()) {
case ISD::TargetExternalSymbol:
Lo = Ptr;
Hi = DAG.getConstant(1, EVT::i8);
Hi = DAG.getConstant(1, MVT::i8);
return;
}
}
@ -792,14 +792,14 @@ void PIC16TargetLowering::LegalizeAddress(SDValue Ptr, SelectionDAG &DAG,
int FrameOffset;
if (TFI.getOpcode() == ISD::TargetFrameIndex) {
LegalizeFrameIndex(TFI, DAG, Lo, FrameOffset);
Hi = DAG.getConstant(1, EVT::i8);
Hi = DAG.getConstant(1, MVT::i8);
Offset += FrameOffset;
return;
} else if (TFI.getOpcode() == ISD::TargetExternalSymbol) {
// FrameIndex has already been expanded.
// Now just make use of its expansion
Lo = TFI;
Hi = DAG.getConstant(1, EVT::i8);
Hi = DAG.getConstant(1, MVT::i8);
SDValue FOffset = Ptr.getOperand(0).getOperand(1);
assert (FOffset.getOpcode() == ISD::Constant &&
"Invalid operand of PIC16ISD::Lo");
@ -817,7 +817,7 @@ void PIC16TargetLowering::LegalizeAddress(SDValue Ptr, SelectionDAG &DAG,
// signifies that banksel needs to generated for it. Value 0 for
// the constant signifies that banksel does not need to be generated
// for it. Mark it as 1 now and optimize later.
Hi = DAG.getConstant(1, EVT::i8);
Hi = DAG.getConstant(1, MVT::i8);
return;
}
@ -825,8 +825,8 @@ void PIC16TargetLowering::LegalizeAddress(SDValue Ptr, SelectionDAG &DAG,
GetExpandedParts(Ptr, DAG, Lo, Hi);
// Put the hi and lo parts into FSR.
Lo = DAG.getNode(PIC16ISD::MTLO, dl, EVT::i8, Lo);
Hi = DAG.getNode(PIC16ISD::MTHI, dl, EVT::i8, Hi);
Lo = DAG.getNode(PIC16ISD::MTLO, dl, MVT::i8, Lo);
Hi = DAG.getNode(PIC16ISD::MTHI, dl, MVT::i8, Hi);
return;
}
@ -856,8 +856,8 @@ SDValue PIC16TargetLowering::ExpandLoad(SDNode *N, SelectionDAG &DAG) {
if(ISD::isNON_EXTLoad(N)) {
for (iter=0; iter<NumLoads ; ++iter) {
// Add the pointer offset if any
Offset = DAG.getConstant(iter + LoadOffset, EVT::i8);
Tys = DAG.getVTList(EVT::i8, EVT::Other);
Offset = DAG.getConstant(iter + LoadOffset, MVT::i8);
Tys = DAG.getVTList(MVT::i8, MVT::Other);
Load = DAG.getNode(PIC16ISD::PIC16Load, dl, Tys, Chain, PtrLo, PtrHi,
Offset);
PICLoads.push_back(Load);
@ -872,18 +872,18 @@ SDValue PIC16TargetLowering::ExpandLoad(SDNode *N, SelectionDAG &DAG) {
// i.e. without any extension
EVT MemVT = LD->getMemoryVT();
unsigned MemBytes = MemVT.getSizeInBits() / 8;
// if EVT::i1 is extended to EVT::i8 then MemBytes will be zero
// if MVT::i1 is extended to MVT::i8 then MemBytes will be zero
// So set it to one
if (MemBytes == 0) MemBytes = 1;
unsigned ExtdBytes = VT.getSizeInBits() / 8;
Offset = DAG.getConstant(LoadOffset, EVT::i8);
Offset = DAG.getConstant(LoadOffset, MVT::i8);
Tys = DAG.getVTList(EVT::i8, EVT::Other);
Tys = DAG.getVTList(MVT::i8, MVT::Other);
// For MemBytes generate PIC16Load with proper offset
for (iter=0; iter < MemBytes; ++iter) {
// Add the pointer offset if any
Offset = DAG.getConstant(iter + LoadOffset, EVT::i8);
Offset = DAG.getConstant(iter + LoadOffset, MVT::i8);
Load = DAG.getNode(PIC16ISD::PIC16Load, dl, Tys, Chain, PtrLo, PtrHi,
Offset);
PICLoads.push_back(Load);
@ -893,15 +893,15 @@ SDValue PIC16TargetLowering::ExpandLoad(SDNode *N, SelectionDAG &DAG) {
if (ISD::isSEXTLoad(N)) {
// For all ExtdBytes use the Right Shifted(Arithmetic) Value of the
// highest MemByte
SDValue SRA = DAG.getNode(ISD::SRA, dl, EVT::i8, Load,
DAG.getConstant(7, EVT::i8));
SDValue SRA = DAG.getNode(ISD::SRA, dl, MVT::i8, Load,
DAG.getConstant(7, MVT::i8));
for (iter=MemBytes; iter<ExtdBytes; ++iter) {
PICLoads.push_back(SRA);
}
} else if (ISD::isZEXTLoad(N) || ISD::isEXTLoad(N)) {
//} else if (ISD::isZEXTLoad(N)) {
// ZeroExtendedLoad -- For all ExtdBytes use constant 0
SDValue ConstZero = DAG.getConstant(0, EVT::i8);
SDValue ConstZero = DAG.getConstant(0, MVT::i8);
for (iter=MemBytes; iter<ExtdBytes; ++iter) {
PICLoads.push_back(ConstZero);
}
@ -909,46 +909,46 @@ SDValue PIC16TargetLowering::ExpandLoad(SDNode *N, SelectionDAG &DAG) {
}
SDValue BP;
if (VT == EVT::i8) {
if (VT == MVT::i8) {
// Operand of Load is illegal -- Load itself is legal
return PICLoads[0];
}
else if (VT == EVT::i16) {
else if (VT == MVT::i16) {
BP = DAG.getNode(ISD::BUILD_PAIR, dl, VT, PICLoads[0], PICLoads[1]);
if (MemVT == EVT::i8)
if (MemVT == MVT::i8)
Chain = getChain(PICLoads[0]);
else
Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
getChain(PICLoads[0]), getChain(PICLoads[1]));
} else if (VT == EVT::i32) {
} else if (VT == MVT::i32) {
SDValue BPs[2];
BPs[0] = DAG.getNode(ISD::BUILD_PAIR, dl, EVT::i16,
BPs[0] = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i16,
PICLoads[0], PICLoads[1]);
BPs[1] = DAG.getNode(ISD::BUILD_PAIR, dl, EVT::i16,
BPs[1] = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i16,
PICLoads[2], PICLoads[3]);
BP = DAG.getNode(ISD::BUILD_PAIR, dl, VT, BPs[0], BPs[1]);
if (MemVT == EVT::i8)
if (MemVT == MVT::i8)
Chain = getChain(PICLoads[0]);
else if (MemVT == EVT::i16)
Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
else if (MemVT == MVT::i16)
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
getChain(PICLoads[0]), getChain(PICLoads[1]));
else {
SDValue Chains[2];
Chains[0] = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
Chains[0] = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
getChain(PICLoads[0]), getChain(PICLoads[1]));
Chains[1] = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
Chains[1] = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
getChain(PICLoads[2]), getChain(PICLoads[3]));
Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
Chains[0], Chains[1]);
}
}
Tys = DAG.getVTList(VT, EVT::Other);
Tys = DAG.getVTList(VT, MVT::Other);
return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, BP, Chain);
}
SDValue PIC16TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) {
// We should have handled larger operands in type legalizer itself.
assert (Op.getValueType() == EVT::i8 && "illegal shift to lower");
assert (Op.getValueType() == MVT::i8 && "illegal shift to lower");
SDNode *N = Op.getNode();
SDValue Value = N->getOperand(0);
@ -978,7 +978,7 @@ SDValue PIC16TargetLowering::LowerShift(SDValue Op, SelectionDAG &DAG) {
SDValue PIC16TargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) {
// We should have handled larger operands in type legalizer itself.
assert (Op.getValueType() == EVT::i8 && "illegal multiply to lower");
assert (Op.getValueType() == MVT::i8 && "illegal multiply to lower");
SDNode *N = Op.getNode();
SmallVector<SDValue, 2> Ops(2);
@ -1051,7 +1051,7 @@ SDValue PIC16TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
SDValue PIC16TargetLowering::ConvertToMemOperand(SDValue Op,
SelectionDAG &DAG,
DebugLoc dl) {
assert (Op.getValueType() == EVT::i8
assert (Op.getValueType() == MVT::i8
&& "illegal value type to store on stack.");
MachineFunction &MF = DAG.getMachineFunction();
@ -1063,22 +1063,22 @@ SDValue PIC16TargetLowering::ConvertToMemOperand(SDValue Op,
// Get a stack slot index and convert to es.
int FI = MF.getFrameInfo()->CreateStackObject(1, 1);
const char *tmpName = createESName(PAN::getTempdataLabel(FuncName));
SDValue ES = DAG.getTargetExternalSymbol(tmpName, EVT::i8);
SDValue ES = DAG.getTargetExternalSymbol(tmpName, MVT::i8);
// Store the value to ES.
SDValue Store = DAG.getNode (PIC16ISD::PIC16Store, dl, EVT::Other,
SDValue Store = DAG.getNode (PIC16ISD::PIC16Store, dl, MVT::Other,
DAG.getEntryNode(),
Op, ES,
DAG.getConstant (1, EVT::i8), // Banksel.
DAG.getConstant (1, MVT::i8), // Banksel.
DAG.getConstant (GetTmpOffsetForFI(FI, 1),
EVT::i8));
MVT::i8));
// Load the value from ES.
SDVTList Tys = DAG.getVTList(EVT::i8, EVT::Other);
SDVTList Tys = DAG.getVTList(MVT::i8, MVT::Other);
SDValue Load = DAG.getNode(PIC16ISD::PIC16Load, dl, Tys, Store,
ES, DAG.getConstant (1, EVT::i8),
ES, DAG.getConstant (1, MVT::i8),
DAG.getConstant (GetTmpOffsetForFI(FI, 1),
EVT::i8));
MVT::i8));
return Load.getValue(0);
}
@ -1096,7 +1096,7 @@ LowerIndirectCallArguments(SDValue Chain, SDValue InFlag,
return Chain;
std::vector<SDValue> Ops;
SDVTList Tys = DAG.getVTList(EVT::Other, EVT::Flag);
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
SDValue Arg, StoreRet;
// For PIC16 ABI the arguments come after the return value.
@ -1110,7 +1110,7 @@ LowerIndirectCallArguments(SDValue Chain, SDValue InFlag,
Ops.push_back(Arg);
Ops.push_back(DataAddr_Lo);
Ops.push_back(DataAddr_Hi);
Ops.push_back(DAG.getConstant(ArgOffset, EVT::i8));
Ops.push_back(DAG.getConstant(ArgOffset, MVT::i8));
Ops.push_back(InFlag);
StoreRet = DAG.getNode (PIC16ISD::PIC16StWF, dl, Tys, &Ops[0], Ops.size());
@ -1147,7 +1147,7 @@ LowerDirectCallArguments(SDValue ArgLabel, SDValue Chain, SDValue InFlag,
SDValue StoreRet;
std::vector<SDValue> Ops;
SDVTList Tys = DAG.getVTList(EVT::Other, EVT::Flag);
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
for (unsigned i=0, Offset = 0; i<NumOps; i++) {
// Get the argument
Arg = Outs[i].Val;
@ -1160,7 +1160,7 @@ LowerDirectCallArguments(SDValue ArgLabel, SDValue Chain, SDValue InFlag,
Ops.push_back(Arg);
Ops.push_back(PtrLo);
Ops.push_back(PtrHi);
Ops.push_back(DAG.getConstant(StoreOffset, EVT::i8));
Ops.push_back(DAG.getConstant(StoreOffset, MVT::i8));
Ops.push_back(InFlag);
StoreRet = DAG.getNode (PIC16ISD::PIC16StWF, dl, Tys, &Ops[0], Ops.size());
@ -1193,10 +1193,10 @@ LowerIndirectCallReturn(SDValue Chain, SDValue InFlag,
// Call has something to return
SDValue LoadRet;
SDVTList Tys = DAG.getVTList(EVT::i8, EVT::Other, EVT::Flag);
SDVTList Tys = DAG.getVTList(MVT::i8, MVT::Other, MVT::Flag);
for(unsigned i=0;i<RetVals;i++) {
LoadRet = DAG.getNode(PIC16ISD::PIC16LdWF, dl, Tys, Chain, DataAddr_Lo,
DataAddr_Hi, DAG.getConstant(i, EVT::i8),
DataAddr_Hi, DAG.getConstant(i, MVT::i8),
InFlag);
InFlag = getOutFlag(LoadRet);
Chain = getChain(LoadRet);
@ -1226,13 +1226,13 @@ LowerDirectCallReturn(SDValue RetLabel, SDValue Chain, SDValue InFlag,
unsigned LdOffset;
LegalizeAddress(RetLabel, DAG, LdLo, LdHi, LdOffset, dl);
SDVTList Tys = DAG.getVTList(EVT::i8, EVT::Other, EVT::Flag);
SDVTList Tys = DAG.getVTList(MVT::i8, MVT::Other, MVT::Flag);
SDValue LoadRet;
for(unsigned i=0, Offset=0;i<RetVals;i++) {
LoadRet = DAG.getNode(PIC16ISD::PIC16LdWF, dl, Tys, Chain, LdLo, LdHi,
DAG.getConstant(LdOffset + Offset, EVT::i8),
DAG.getConstant(LdOffset + Offset, MVT::i8),
InFlag);
InFlag = getOutFlag(LoadRet);
@ -1261,18 +1261,18 @@ PIC16TargetLowering::LowerReturn(SDValue Chain,
std::string FuncName = F->getName();
const char *tmpName = createESName(PAN::getFrameLabel(FuncName));
SDVTList VTs = DAG.getVTList (EVT::i8, EVT::Other);
SDValue ES = DAG.getTargetExternalSymbol(tmpName, EVT::i8);
SDValue BS = DAG.getConstant(1, EVT::i8);
SDVTList VTs = DAG.getVTList (MVT::i8, MVT::Other);
SDValue ES = DAG.getTargetExternalSymbol(tmpName, MVT::i8);
SDValue BS = DAG.getConstant(1, MVT::i8);
SDValue RetVal;
for(unsigned i=0;i<NumRet; ++i) {
RetVal = Outs[i].Val;
Chain = DAG.getNode (PIC16ISD::PIC16Store, dl, EVT::Other, Chain, RetVal,
Chain = DAG.getNode (PIC16ISD::PIC16Store, dl, MVT::Other, Chain, RetVal,
ES, BS,
DAG.getConstant (i, EVT::i8));
DAG.getConstant (i, MVT::i8));
}
return DAG.getNode(PIC16ISD::RET, dl, EVT::Other, Chain);
return DAG.getNode(PIC16ISD::RET, dl, MVT::Other, Chain);
}
void PIC16TargetLowering::
@ -1281,7 +1281,7 @@ GetDataAddress(DebugLoc dl, SDValue Callee, SDValue &Chain,
SelectionDAG &DAG) {
assert (Callee.getOpcode() == PIC16ISD::PIC16Connect
&& "Don't know what to do of such callee!!");
SDValue ZeroOperand = DAG.getConstant(0, EVT::i8);
SDValue ZeroOperand = DAG.getConstant(0, MVT::i8);
SDValue SeqStart = DAG.getCALLSEQ_START(Chain, ZeroOperand);
Chain = getChain(SeqStart);
SDValue OperFlag = getOutFlag(SeqStart); // To manage the data dependency
@ -1291,15 +1291,15 @@ GetDataAddress(DebugLoc dl, SDValue Callee, SDValue &Chain,
SDValue Hi = Callee.getOperand(1);
SDValue Data_Lo, Data_Hi;
SDVTList Tys = DAG.getVTList(EVT::i8, EVT::Other, EVT::Flag);
SDVTList Tys = DAG.getVTList(MVT::i8, MVT::Other, MVT::Flag);
// Subtract 2 from Address to get the Lower part of DataAddress.
SDVTList VTList = DAG.getVTList(EVT::i8, EVT::Flag);
SDVTList VTList = DAG.getVTList(MVT::i8, MVT::Flag);
Data_Lo = DAG.getNode(ISD::SUBC, dl, VTList, Lo,
DAG.getConstant(2, EVT::i8));
SDValue Ops[3] = { Hi, DAG.getConstant(0, EVT::i8), Data_Lo.getValue(1)};
DAG.getConstant(2, MVT::i8));
SDValue Ops[3] = { Hi, DAG.getConstant(0, MVT::i8), Data_Lo.getValue(1)};
Data_Hi = DAG.getNode(ISD::SUBE, dl, VTList, Ops, 3);
SDValue PCLATH = DAG.getNode(PIC16ISD::MTPCLATH, dl, EVT::i8, Data_Hi);
Callee = DAG.getNode(PIC16ISD::PIC16Connect, dl, EVT::i8, Data_Lo, PCLATH);
SDValue PCLATH = DAG.getNode(PIC16ISD::MTPCLATH, dl, MVT::i8, Data_Hi);
Callee = DAG.getNode(PIC16ISD::PIC16Connect, dl, MVT::i8, Data_Lo, PCLATH);
SDValue Call = DAG.getNode(PIC16ISD::CALLW, dl, Tys, Chain, Callee,
OperFlag);
Chain = getChain(Call);
@ -1310,7 +1310,7 @@ GetDataAddress(DebugLoc dl, SDValue Callee, SDValue &Chain,
OperFlag = getOutFlag(SeqEnd);
// Low part of Data Address
DataAddr_Lo = DAG.getNode(PIC16ISD::MTLO, dl, EVT::i8, Call, OperFlag);
DataAddr_Lo = DAG.getNode(PIC16ISD::MTLO, dl, MVT::i8, Call, OperFlag);
// Make the second call.
SeqStart = DAG.getCALLSEQ_START(Chain, ZeroOperand);
@ -1319,13 +1319,13 @@ GetDataAddress(DebugLoc dl, SDValue Callee, SDValue &Chain,
// Subtract 1 from Address to get high part of data address.
Data_Lo = DAG.getNode(ISD::SUBC, dl, VTList, Lo,
DAG.getConstant(1, EVT::i8));
SDValue HiOps[3] = { Hi, DAG.getConstant(0, EVT::i8), Data_Lo.getValue(1)};
DAG.getConstant(1, MVT::i8));
SDValue HiOps[3] = { Hi, DAG.getConstant(0, MVT::i8), Data_Lo.getValue(1)};
Data_Hi = DAG.getNode(ISD::SUBE, dl, VTList, HiOps, 3);
PCLATH = DAG.getNode(PIC16ISD::MTPCLATH, dl, EVT::i8, Data_Hi);
PCLATH = DAG.getNode(PIC16ISD::MTPCLATH, dl, MVT::i8, Data_Hi);
// Use new Lo to make another CALLW
Callee = DAG.getNode(PIC16ISD::PIC16Connect, dl, EVT::i8, Data_Lo, PCLATH);
Callee = DAG.getNode(PIC16ISD::PIC16Connect, dl, MVT::i8, Data_Lo, PCLATH);
Call = DAG.getNode(PIC16ISD::CALLW, dl, Tys, Chain, Callee, OperFlag);
Chain = getChain(Call);
OperFlag = getOutFlag(Call);
@ -1334,7 +1334,7 @@ GetDataAddress(DebugLoc dl, SDValue Callee, SDValue &Chain,
Chain = getChain(SeqEnd);
OperFlag = getOutFlag(SeqEnd);
// Hi part of Data Address
DataAddr_Hi = DAG.getNode(PIC16ISD::MTHI, dl, EVT::i8, Call, OperFlag);
DataAddr_Hi = DAG.getNode(PIC16ISD::MTHI, dl, MVT::i8, Call, OperFlag);
}
SDValue
@ -1346,7 +1346,7 @@ PIC16TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
DebugLoc dl, SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
assert(Callee.getValueType() == EVT::i16 &&
assert(Callee.getValueType() == MVT::i16 &&
"Don't know how to legalize this call node!!!");
// The flag to track if this is a direct or indirect call.
@ -1369,7 +1369,7 @@ PIC16TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Indirect addresses. Get the hi and lo parts of ptr.
GetExpandedParts(Callee, DAG, Lo, Hi);
// Connect Lo and Hi parts of the callee with the PIC16Connect
Callee = DAG.getNode(PIC16ISD::PIC16Connect, dl, EVT::i8, Lo, Hi);
Callee = DAG.getNode(PIC16ISD::PIC16Connect, dl, MVT::i8, Lo, Hi);
// Read DataAddress only if we have to pass arguments or
// read return value.
@ -1377,7 +1377,7 @@ PIC16TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
GetDataAddress(dl, Callee, Chain, DataAddr_Lo, DataAddr_Hi, DAG);
}
SDValue ZeroOperand = DAG.getConstant(0, EVT::i8);
SDValue ZeroOperand = DAG.getConstant(0, MVT::i8);
// Start the call sequence.
// Carring the Constant 0 along the CALLSEQSTART
@ -1394,32 +1394,32 @@ PIC16TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Considering the GlobalAddressNode case here.
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
GlobalValue *GV = G->getGlobal();
Callee = DAG.getTargetGlobalAddress(GV, EVT::i8);
Callee = DAG.getTargetGlobalAddress(GV, MVT::i8);
Name = G->getGlobal()->getName();
} else {// Considering the ExternalSymbol case here
ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Callee);
Callee = DAG.getTargetExternalSymbol(ES->getSymbol(), EVT::i8);
Callee = DAG.getTargetExternalSymbol(ES->getSymbol(), MVT::i8);
Name = ES->getSymbol();
}
// Label for argument passing
const char *argFrame = createESName(PAN::getArgsLabel(Name));
ArgLabel = DAG.getTargetExternalSymbol(argFrame, EVT::i8);
ArgLabel = DAG.getTargetExternalSymbol(argFrame, MVT::i8);
// Label for reading return value
const char *retName = createESName(PAN::getRetvalLabel(Name));
RetLabel = DAG.getTargetExternalSymbol(retName, EVT::i8);
RetLabel = DAG.getTargetExternalSymbol(retName, MVT::i8);
} else {
// if indirect call
SDValue CodeAddr_Lo = Callee.getOperand(0);
SDValue CodeAddr_Hi = Callee.getOperand(1);
/*CodeAddr_Lo = DAG.getNode(ISD::ADD, dl, EVT::i8, CodeAddr_Lo,
DAG.getConstant(2, EVT::i8));*/
/*CodeAddr_Lo = DAG.getNode(ISD::ADD, dl, MVT::i8, CodeAddr_Lo,
DAG.getConstant(2, MVT::i8));*/
// move Hi part in PCLATH
CodeAddr_Hi = DAG.getNode(PIC16ISD::MTPCLATH, dl, EVT::i8, CodeAddr_Hi);
Callee = DAG.getNode(PIC16ISD::PIC16Connect, dl, EVT::i8, CodeAddr_Lo,
CodeAddr_Hi = DAG.getNode(PIC16ISD::MTPCLATH, dl, MVT::i8, CodeAddr_Hi);
Callee = DAG.getNode(PIC16ISD::PIC16Connect, dl, MVT::i8, CodeAddr_Lo,
CodeAddr_Hi);
}
@ -1437,7 +1437,7 @@ PIC16TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
OperFlag = getOutFlag(CallArgs);
}
SDVTList Tys = DAG.getVTList(EVT::Other, EVT::Flag);
SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Flag);
SDValue PICCall = DAG.getNode(PIC16ISD::CALL, dl, Tys, Chain, Callee,
OperFlag);
Chain = getChain(PICCall);
@ -1502,14 +1502,14 @@ SDValue PIC16TargetLowering::LowerBinOp(SDValue Op, SelectionDAG &DAG) {
DebugLoc dl = Op.getDebugLoc();
// We should have handled larger operands in type legalizer itself.
assert (Op.getValueType() == EVT::i8 && "illegal Op to lower");
assert (Op.getValueType() == MVT::i8 && "illegal Op to lower");
unsigned MemOp = 1;
if (NeedToConvertToMemOp(Op, MemOp)) {
// Put one value on stack.
SDValue NewVal = ConvertToMemOperand (Op.getOperand(MemOp), DAG, dl);
return DAG.getNode(Op.getOpcode(), dl, EVT::i8, Op.getOperand(MemOp ^ 1),
return DAG.getNode(Op.getOpcode(), dl, MVT::i8, Op.getOperand(MemOp ^ 1),
NewVal);
}
else {
@ -1521,7 +1521,7 @@ SDValue PIC16TargetLowering::LowerBinOp(SDValue Op, SelectionDAG &DAG) {
// that affects carry.
SDValue PIC16TargetLowering::LowerADD(SDValue Op, SelectionDAG &DAG) {
// We should have handled larger operands in type legalizer itself.
assert (Op.getValueType() == EVT::i8 && "illegal add to lower");
assert (Op.getValueType() == MVT::i8 && "illegal add to lower");
DebugLoc dl = Op.getDebugLoc();
unsigned MemOp = 1;
if (NeedToConvertToMemOp(Op, MemOp)) {
@ -1529,7 +1529,7 @@ SDValue PIC16TargetLowering::LowerADD(SDValue Op, SelectionDAG &DAG) {
SDValue NewVal = ConvertToMemOperand (Op.getOperand(MemOp), DAG, dl);
// ADDC and ADDE produce two results.
SDVTList Tys = DAG.getVTList(EVT::i8, EVT::Flag);
SDVTList Tys = DAG.getVTList(MVT::i8, MVT::Flag);
// ADDE has three operands, the last one is the carry bit.
if (Op.getOpcode() == ISD::ADDE)
@ -1541,7 +1541,7 @@ SDValue PIC16TargetLowering::LowerADD(SDValue Op, SelectionDAG &DAG) {
NewVal);
// ADD it is. It produces only one result.
else
return DAG.getNode(Op.getOpcode(), dl, EVT::i8, Op.getOperand(MemOp ^ 1),
return DAG.getNode(Op.getOpcode(), dl, MVT::i8, Op.getOperand(MemOp ^ 1),
NewVal);
}
else
@ -1551,7 +1551,7 @@ SDValue PIC16TargetLowering::LowerADD(SDValue Op, SelectionDAG &DAG) {
SDValue PIC16TargetLowering::LowerSUB(SDValue Op, SelectionDAG &DAG) {
DebugLoc dl = Op.getDebugLoc();
// We should have handled larger operands in type legalizer itself.
assert (Op.getValueType() == EVT::i8 && "illegal sub to lower");
assert (Op.getValueType() == MVT::i8 && "illegal sub to lower");
// Nothing to do if the first operand is already a direct load and it has
// only one use.
@ -1561,7 +1561,7 @@ SDValue PIC16TargetLowering::LowerSUB(SDValue Op, SelectionDAG &DAG) {
// Put first operand on stack.
SDValue NewVal = ConvertToMemOperand (Op.getOperand(0), DAG, dl);
SDVTList Tys = DAG.getVTList(EVT::i8, EVT::Flag);
SDVTList Tys = DAG.getVTList(MVT::i8, MVT::Flag);
switch (Op.getOpcode()) {
default:
assert (0 && "Opcode unknown.");
@ -1573,7 +1573,7 @@ SDValue PIC16TargetLowering::LowerSUB(SDValue Op, SelectionDAG &DAG) {
return DAG.getNode(Op.getOpcode(), dl, Tys, NewVal, Op.getOperand(1));
break;
case ISD::SUB:
return DAG.getNode(Op.getOpcode(), dl, EVT::i8, NewVal, Op.getOperand(1));
return DAG.getNode(Op.getOpcode(), dl, MVT::i8, NewVal, Op.getOperand(1));
break;
}
}
@ -1615,13 +1615,13 @@ PIC16TargetLowering::LowerFormalArguments(SDValue Chain,
// Create the <fname>.args external symbol.
const char *tmpName = createESName(PAN::getArgsLabel(FuncName));
SDValue ES = DAG.getTargetExternalSymbol(tmpName, EVT::i8);
SDValue ES = DAG.getTargetExternalSymbol(tmpName, MVT::i8);
// Load arg values from the label + offset.
SDVTList VTs = DAG.getVTList (EVT::i8, EVT::Other);
SDValue BS = DAG.getConstant(1, EVT::i8);
SDVTList VTs = DAG.getVTList (MVT::i8, MVT::Other);
SDValue BS = DAG.getConstant(1, MVT::i8);
for (unsigned i = 0; i < NumArgVals ; ++i) {
SDValue Offset = DAG.getConstant(i, EVT::i8);
SDValue Offset = DAG.getConstant(i, MVT::i8);
SDValue PICLoad = DAG.getNode(PIC16ISD::PIC16LdArg, dl, VTs, Chain, ES, BS,
Offset);
Chain = getChain(PICLoad);
@ -1753,16 +1753,16 @@ SDValue PIC16TargetLowering::getPIC16Cmp(SDValue LHS, SDValue RHS,
}
}
PIC16CC = DAG.getConstant(CondCode, EVT::i8);
PIC16CC = DAG.getConstant(CondCode, MVT::i8);
// These are signed comparisons.
SDValue Mask = DAG.getConstant(128, EVT::i8);
SDValue Mask = DAG.getConstant(128, MVT::i8);
if (isSignedComparison(CondCode)) {
LHS = DAG.getNode (ISD::XOR, dl, EVT::i8, LHS, Mask);
RHS = DAG.getNode (ISD::XOR, dl, EVT::i8, RHS, Mask);
LHS = DAG.getNode (ISD::XOR, dl, MVT::i8, LHS, Mask);
RHS = DAG.getNode (ISD::XOR, dl, MVT::i8, RHS, Mask);
}
SDVTList VTs = DAG.getVTList (EVT::i8, EVT::Flag);
SDVTList VTs = DAG.getVTList (MVT::i8, MVT::Flag);
// We can use a subtract operation to set the condition codes. But
// we need to put one operand in memory if required.
// Nothing to do if the first operand is already a valid type (direct load
@ -1877,7 +1877,7 @@ SDValue PIC16TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) {
SDValue PIC16CC;
SDValue Cmp = getPIC16Cmp(LHS, RHS, ORIGCC, PIC16CC, DAG, dl);
return DAG.getNode(PIC16ISD::BRCOND, dl, EVT::Other, Chain, Dest, PIC16CC,
return DAG.getNode(PIC16ISD::BRCOND, dl, MVT::Other, Chain, Dest, PIC16CC,
Cmp.getValue(1));
}

View File

@ -82,7 +82,7 @@ namespace llvm {
/// DAG node.
virtual const char *getTargetNodeName(unsigned Opcode) const;
/// getSetCCResultType - Return the ISD::SETCC ValueType
virtual EVT::SimpleValueType getSetCCResultType(EVT ValType) const;
virtual MVT::SimpleValueType getSetCCResultType(EVT ValType) const;
SDValue LowerShift(SDValue Op, SelectionDAG &DAG);
SDValue LowerMUL(SDValue Op, SelectionDAG &DAG);
SDValue LowerADD(SDValue Op, SelectionDAG &DAG);

View File

@ -64,13 +64,13 @@ namespace {
/// getI32Imm - Return a target constant with the specified value, of type
/// i32.
inline SDValue getI32Imm(unsigned Imm) {
return CurDAG->getTargetConstant(Imm, EVT::i32);
return CurDAG->getTargetConstant(Imm, MVT::i32);
}
/// getI64Imm - Return a target constant with the specified value, of type
/// i64.
inline SDValue getI64Imm(uint64_t Imm) {
return CurDAG->getTargetConstant(Imm, EVT::i64);
return CurDAG->getTargetConstant(Imm, MVT::i64);
}
/// getSmallIPtrImm - Return a target constant of pointer type.
@ -286,7 +286,7 @@ SDNode *PPCDAGToDAGISel::getGlobalBaseReg() {
MachineBasicBlock::iterator MBBI = FirstMBB.begin();
DebugLoc dl = DebugLoc::getUnknownLoc();
if (PPCLowering.getPointerTy() == EVT::i32) {
if (PPCLowering.getPointerTy() == MVT::i32) {
GlobalBaseReg = RegInfo->createVirtualRegister(PPC::GPRCRegisterClass);
BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MovePCtoLR), PPC::LR);
BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MFLR), GlobalBaseReg);
@ -309,7 +309,7 @@ static bool isIntS16Immediate(SDNode *N, short &Imm) {
return false;
Imm = (short)cast<ConstantSDNode>(N)->getZExtValue();
if (N->getValueType(0) == EVT::i32)
if (N->getValueType(0) == MVT::i32)
return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
else
return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
@ -323,7 +323,7 @@ static bool isIntS16Immediate(SDValue Op, short &Imm) {
/// isInt32Immediate - This method tests to see if the node is a 32-bit constant
/// operand. If so Imm will receive the 32-bit value.
static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
if (N->getOpcode() == ISD::Constant && N->getValueType(0) == EVT::i32) {
if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) {
Imm = cast<ConstantSDNode>(N)->getZExtValue();
return true;
}
@ -333,7 +333,7 @@ static bool isInt32Immediate(SDNode *N, unsigned &Imm) {
/// isInt64Immediate - This method tests to see if the node is a 64-bit constant
/// operand. If so Imm will receive the 64-bit value.
static bool isInt64Immediate(SDNode *N, uint64_t &Imm) {
if (N->getOpcode() == ISD::Constant && N->getValueType(0) == EVT::i64) {
if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i64) {
Imm = cast<ConstantSDNode>(N)->getZExtValue();
return true;
}
@ -381,7 +381,7 @@ bool PPCDAGToDAGISel::isRotateAndMask(SDNode *N, unsigned Mask,
unsigned &MB, unsigned &ME) {
// Don't even go down this path for i64, since different logic will be
// necessary for rldicl/rldicr/rldimi.
if (N->getValueType(0) != EVT::i32)
if (N->getValueType(0) != MVT::i32)
return false;
unsigned Shift = 32;
@ -485,7 +485,7 @@ SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) {
SH &= 31;
SDValue Ops[] = { Tmp3, Op1, getI32Imm(SH), getI32Imm(MB),
getI32Imm(ME) };
return CurDAG->getTargetNode(PPC::RLWIMI, dl, EVT::i32, Ops, 5);
return CurDAG->getTargetNode(PPC::RLWIMI, dl, MVT::i32, Ops, 5);
}
}
return 0;
@ -498,17 +498,17 @@ SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS,
// Always select the LHS.
unsigned Opc;
if (LHS.getValueType() == EVT::i32) {
if (LHS.getValueType() == MVT::i32) {
unsigned Imm;
if (CC == ISD::SETEQ || CC == ISD::SETNE) {
if (isInt32Immediate(RHS, Imm)) {
// SETEQ/SETNE comparison with 16-bit immediate, fold it.
if (isUInt16(Imm))
return SDValue(CurDAG->getTargetNode(PPC::CMPLWI, dl, EVT::i32, LHS,
return SDValue(CurDAG->getTargetNode(PPC::CMPLWI, dl, MVT::i32, LHS,
getI32Imm(Imm & 0xFFFF)), 0);
// If this is a 16-bit signed immediate, fold it.
if (isInt16((int)Imm))
return SDValue(CurDAG->getTargetNode(PPC::CMPWI, dl, EVT::i32, LHS,
return SDValue(CurDAG->getTargetNode(PPC::CMPWI, dl, MVT::i32, LHS,
getI32Imm(Imm & 0xFFFF)), 0);
// For non-equality comparisons, the default code would materialize the
@ -520,36 +520,36 @@ SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS,
// xoris r0,r3,0x1234
// cmplwi cr0,r0,0x5678
// beq cr0,L6
SDValue Xor(CurDAG->getTargetNode(PPC::XORIS, dl, EVT::i32, LHS,
SDValue Xor(CurDAG->getTargetNode(PPC::XORIS, dl, MVT::i32, LHS,
getI32Imm(Imm >> 16)), 0);
return SDValue(CurDAG->getTargetNode(PPC::CMPLWI, dl, EVT::i32, Xor,
return SDValue(CurDAG->getTargetNode(PPC::CMPLWI, dl, MVT::i32, Xor,
getI32Imm(Imm & 0xFFFF)), 0);
}
Opc = PPC::CMPLW;
} else if (ISD::isUnsignedIntSetCC(CC)) {
if (isInt32Immediate(RHS, Imm) && isUInt16(Imm))
return SDValue(CurDAG->getTargetNode(PPC::CMPLWI, dl, EVT::i32, LHS,
return SDValue(CurDAG->getTargetNode(PPC::CMPLWI, dl, MVT::i32, LHS,
getI32Imm(Imm & 0xFFFF)), 0);
Opc = PPC::CMPLW;
} else {
short SImm;
if (isIntS16Immediate(RHS, SImm))
return SDValue(CurDAG->getTargetNode(PPC::CMPWI, dl, EVT::i32, LHS,
return SDValue(CurDAG->getTargetNode(PPC::CMPWI, dl, MVT::i32, LHS,
getI32Imm((int)SImm & 0xFFFF)),
0);
Opc = PPC::CMPW;
}
} else if (LHS.getValueType() == EVT::i64) {
} else if (LHS.getValueType() == MVT::i64) {
uint64_t Imm;
if (CC == ISD::SETEQ || CC == ISD::SETNE) {
if (isInt64Immediate(RHS.getNode(), Imm)) {
// SETEQ/SETNE comparison with 16-bit immediate, fold it.
if (isUInt16(Imm))
return SDValue(CurDAG->getTargetNode(PPC::CMPLDI, dl, EVT::i64, LHS,
return SDValue(CurDAG->getTargetNode(PPC::CMPLDI, dl, MVT::i64, LHS,
getI32Imm(Imm & 0xFFFF)), 0);
// If this is a 16-bit signed immediate, fold it.
if (isInt16(Imm))
return SDValue(CurDAG->getTargetNode(PPC::CMPDI, dl, EVT::i64, LHS,
return SDValue(CurDAG->getTargetNode(PPC::CMPDI, dl, MVT::i64, LHS,
getI32Imm(Imm & 0xFFFF)), 0);
// For non-equality comparisons, the default code would materialize the
@ -562,33 +562,33 @@ SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS,
// cmpldi cr0,r0,0x5678
// beq cr0,L6
if (isUInt32(Imm)) {
SDValue Xor(CurDAG->getTargetNode(PPC::XORIS8, dl, EVT::i64, LHS,
SDValue Xor(CurDAG->getTargetNode(PPC::XORIS8, dl, MVT::i64, LHS,
getI64Imm(Imm >> 16)), 0);
return SDValue(CurDAG->getTargetNode(PPC::CMPLDI, dl, EVT::i64, Xor,
return SDValue(CurDAG->getTargetNode(PPC::CMPLDI, dl, MVT::i64, Xor,
getI64Imm(Imm & 0xFFFF)), 0);
}
}
Opc = PPC::CMPLD;
} else if (ISD::isUnsignedIntSetCC(CC)) {
if (isInt64Immediate(RHS.getNode(), Imm) && isUInt16(Imm))
return SDValue(CurDAG->getTargetNode(PPC::CMPLDI, dl, EVT::i64, LHS,
return SDValue(CurDAG->getTargetNode(PPC::CMPLDI, dl, MVT::i64, LHS,
getI64Imm(Imm & 0xFFFF)), 0);
Opc = PPC::CMPLD;
} else {
short SImm;
if (isIntS16Immediate(RHS, SImm))
return SDValue(CurDAG->getTargetNode(PPC::CMPDI, dl, EVT::i64, LHS,
return SDValue(CurDAG->getTargetNode(PPC::CMPDI, dl, MVT::i64, LHS,
getI64Imm(SImm & 0xFFFF)),
0);
Opc = PPC::CMPD;
}
} else if (LHS.getValueType() == EVT::f32) {
} else if (LHS.getValueType() == MVT::f32) {
Opc = PPC::FCMPUS;
} else {
assert(LHS.getValueType() == EVT::f64 && "Unknown vt!");
assert(LHS.getValueType() == MVT::f64 && "Unknown vt!");
Opc = PPC::FCMPUD;
}
return SDValue(CurDAG->getTargetNode(Opc, dl, EVT::i32, LHS, RHS), 0);
return SDValue(CurDAG->getTargetNode(Opc, dl, MVT::i32, LHS, RHS), 0);
}
static PPC::Predicate getPredicateForSetCC(ISD::CondCode CC) {
@ -670,27 +670,27 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDValue Op) {
switch (CC) {
default: break;
case ISD::SETEQ: {
Op = SDValue(CurDAG->getTargetNode(PPC::CNTLZW, dl, EVT::i32, Op), 0);
Op = SDValue(CurDAG->getTargetNode(PPC::CNTLZW, dl, MVT::i32, Op), 0);
SDValue Ops[] = { Op, getI32Imm(27), getI32Imm(5), getI32Imm(31) };
return CurDAG->SelectNodeTo(N, PPC::RLWINM, EVT::i32, Ops, 4);
return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
}
case ISD::SETNE: {
SDValue AD =
SDValue(CurDAG->getTargetNode(PPC::ADDIC, dl, EVT::i32, EVT::Flag,
SDValue(CurDAG->getTargetNode(PPC::ADDIC, dl, MVT::i32, MVT::Flag,
Op, getI32Imm(~0U)), 0);
return CurDAG->SelectNodeTo(N, PPC::SUBFE, EVT::i32, AD, Op,
return CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32, AD, Op,
AD.getValue(1));
}
case ISD::SETLT: {
SDValue Ops[] = { Op, getI32Imm(1), getI32Imm(31), getI32Imm(31) };
return CurDAG->SelectNodeTo(N, PPC::RLWINM, EVT::i32, Ops, 4);
return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
}
case ISD::SETGT: {
SDValue T =
SDValue(CurDAG->getTargetNode(PPC::NEG, dl, EVT::i32, Op), 0);
T = SDValue(CurDAG->getTargetNode(PPC::ANDC, dl, EVT::i32, T, Op), 0);
SDValue(CurDAG->getTargetNode(PPC::NEG, dl, MVT::i32, Op), 0);
T = SDValue(CurDAG->getTargetNode(PPC::ANDC, dl, MVT::i32, T, Op), 0);
SDValue Ops[] = { T, getI32Imm(1), getI32Imm(31), getI32Imm(31) };
return CurDAG->SelectNodeTo(N, PPC::RLWINM, EVT::i32, Ops, 4);
return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
}
}
} else if (Imm == ~0U) { // setcc op, -1
@ -698,33 +698,33 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDValue Op) {
switch (CC) {
default: break;
case ISD::SETEQ:
Op = SDValue(CurDAG->getTargetNode(PPC::ADDIC, dl, EVT::i32, EVT::Flag,
Op = SDValue(CurDAG->getTargetNode(PPC::ADDIC, dl, MVT::i32, MVT::Flag,
Op, getI32Imm(1)), 0);
return CurDAG->SelectNodeTo(N, PPC::ADDZE, EVT::i32,
return CurDAG->SelectNodeTo(N, PPC::ADDZE, MVT::i32,
SDValue(CurDAG->getTargetNode(PPC::LI, dl,
EVT::i32,
MVT::i32,
getI32Imm(0)), 0),
Op.getValue(1));
case ISD::SETNE: {
Op = SDValue(CurDAG->getTargetNode(PPC::NOR, dl, EVT::i32, Op, Op), 0);
SDNode *AD = CurDAG->getTargetNode(PPC::ADDIC, dl, EVT::i32, EVT::Flag,
Op = SDValue(CurDAG->getTargetNode(PPC::NOR, dl, MVT::i32, Op, Op), 0);
SDNode *AD = CurDAG->getTargetNode(PPC::ADDIC, dl, MVT::i32, MVT::Flag,
Op, getI32Imm(~0U));
return CurDAG->SelectNodeTo(N, PPC::SUBFE, EVT::i32, SDValue(AD, 0),
return CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32, SDValue(AD, 0),
Op, SDValue(AD, 1));
}
case ISD::SETLT: {
SDValue AD = SDValue(CurDAG->getTargetNode(PPC::ADDI, dl, EVT::i32, Op,
SDValue AD = SDValue(CurDAG->getTargetNode(PPC::ADDI, dl, MVT::i32, Op,
getI32Imm(1)), 0);
SDValue AN = SDValue(CurDAG->getTargetNode(PPC::AND, dl, EVT::i32, AD,
SDValue AN = SDValue(CurDAG->getTargetNode(PPC::AND, dl, MVT::i32, AD,
Op), 0);
SDValue Ops[] = { AN, getI32Imm(1), getI32Imm(31), getI32Imm(31) };
return CurDAG->SelectNodeTo(N, PPC::RLWINM, EVT::i32, Ops, 4);
return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
}
case ISD::SETGT: {
SDValue Ops[] = { Op, getI32Imm(1), getI32Imm(31), getI32Imm(31) };
Op = SDValue(CurDAG->getTargetNode(PPC::RLWINM, dl, EVT::i32, Ops, 4),
Op = SDValue(CurDAG->getTargetNode(PPC::RLWINM, dl, MVT::i32, Ops, 4),
0);
return CurDAG->SelectNodeTo(N, PPC::XORI, EVT::i32, Op,
return CurDAG->SelectNodeTo(N, PPC::XORI, MVT::i32, Op,
getI32Imm(1));
}
}
@ -738,29 +738,29 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDValue Op) {
SDValue IntCR;
// Force the ccreg into CR7.
SDValue CR7Reg = CurDAG->getRegister(PPC::CR7, EVT::i32);
SDValue CR7Reg = CurDAG->getRegister(PPC::CR7, MVT::i32);
SDValue InFlag(0, 0); // Null incoming flag value.
CCReg = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, CR7Reg, CCReg,
InFlag).getValue(1);
if (PPCSubTarget.isGigaProcessor() && OtherCondIdx == -1)
IntCR = SDValue(CurDAG->getTargetNode(PPC::MFOCRF, dl, EVT::i32, CR7Reg,
IntCR = SDValue(CurDAG->getTargetNode(PPC::MFOCRF, dl, MVT::i32, CR7Reg,
CCReg), 0);
else
IntCR = SDValue(CurDAG->getTargetNode(PPC::MFCR, dl, EVT::i32, CCReg), 0);
IntCR = SDValue(CurDAG->getTargetNode(PPC::MFCR, dl, MVT::i32, CCReg), 0);
SDValue Ops[] = { IntCR, getI32Imm((32-(3-Idx)) & 31),
getI32Imm(31), getI32Imm(31) };
if (OtherCondIdx == -1 && !Inv)
return CurDAG->SelectNodeTo(N, PPC::RLWINM, EVT::i32, Ops, 4);
return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
// Get the specified bit.
SDValue Tmp =
SDValue(CurDAG->getTargetNode(PPC::RLWINM, dl, EVT::i32, Ops, 4), 0);
SDValue(CurDAG->getTargetNode(PPC::RLWINM, dl, MVT::i32, Ops, 4), 0);
if (Inv) {
assert(OtherCondIdx == -1 && "Can't have split plus negation");
return CurDAG->SelectNodeTo(N, PPC::XORI, EVT::i32, Tmp, getI32Imm(1));
return CurDAG->SelectNodeTo(N, PPC::XORI, MVT::i32, Tmp, getI32Imm(1));
}
// Otherwise, we have to turn an operation like SETONE -> SETOLT | SETOGT.
@ -769,9 +769,9 @@ SDNode *PPCDAGToDAGISel::SelectSETCC(SDValue Op) {
// Get the other bit of the comparison.
Ops[1] = getI32Imm((32-(3-OtherCondIdx)) & 31);
SDValue OtherCond =
SDValue(CurDAG->getTargetNode(PPC::RLWINM, dl, EVT::i32, Ops, 4), 0);
SDValue(CurDAG->getTargetNode(PPC::RLWINM, dl, MVT::i32, Ops, 4), 0);
return CurDAG->SelectNodeTo(N, PPC::OR, EVT::i32, Tmp, OtherCond);
return CurDAG->SelectNodeTo(N, PPC::OR, MVT::i32, Tmp, OtherCond);
}
@ -787,7 +787,7 @@ SDNode *PPCDAGToDAGISel::Select(SDValue Op) {
default: break;
case ISD::Constant: {
if (N->getValueType(0) == EVT::i64) {
if (N->getValueType(0) == MVT::i64) {
// Get 64 bit value.
int64_t Imm = cast<ConstantSDNode>(N)->getZExtValue();
// Assume no remaining bits.
@ -822,17 +822,17 @@ SDNode *PPCDAGToDAGISel::Select(SDValue Op) {
// Simple value.
if (isInt16(Imm)) {
// Just the Lo bits.
Result = CurDAG->getTargetNode(PPC::LI8, dl, EVT::i64, getI32Imm(Lo));
Result = CurDAG->getTargetNode(PPC::LI8, dl, MVT::i64, getI32Imm(Lo));
} else if (Lo) {
// Handle the Hi bits.
unsigned OpC = Hi ? PPC::LIS8 : PPC::LI8;
Result = CurDAG->getTargetNode(OpC, dl, EVT::i64, getI32Imm(Hi));
Result = CurDAG->getTargetNode(OpC, dl, MVT::i64, getI32Imm(Hi));
// And Lo bits.
Result = CurDAG->getTargetNode(PPC::ORI8, dl, EVT::i64,
Result = CurDAG->getTargetNode(PPC::ORI8, dl, MVT::i64,
SDValue(Result, 0), getI32Imm(Lo));
} else {
// Just the Hi bits.
Result = CurDAG->getTargetNode(PPC::LIS8, dl, EVT::i64, getI32Imm(Hi));
Result = CurDAG->getTargetNode(PPC::LIS8, dl, MVT::i64, getI32Imm(Hi));
}
// If no shift, we're done.
@ -840,18 +840,18 @@ SDNode *PPCDAGToDAGISel::Select(SDValue Op) {
// Shift for next step if the upper 32-bits were not zero.
if (Imm) {
Result = CurDAG->getTargetNode(PPC::RLDICR, dl, EVT::i64,
Result = CurDAG->getTargetNode(PPC::RLDICR, dl, MVT::i64,
SDValue(Result, 0),
getI32Imm(Shift), getI32Imm(63 - Shift));
}
// Add in the last bits as required.
if ((Hi = (Remainder >> 16) & 0xFFFF)) {
Result = CurDAG->getTargetNode(PPC::ORIS8, dl, EVT::i64,
Result = CurDAG->getTargetNode(PPC::ORIS8, dl, MVT::i64,
SDValue(Result, 0), getI32Imm(Hi));
}
if ((Lo = Remainder & 0xFFFF)) {
Result = CurDAG->getTargetNode(PPC::ORI8, dl, EVT::i64,
Result = CurDAG->getTargetNode(PPC::ORI8, dl, MVT::i64,
SDValue(Result, 0), getI32Imm(Lo));
}
@ -868,7 +868,7 @@ SDNode *PPCDAGToDAGISel::Select(SDValue Op) {
case ISD::FrameIndex: {
int FI = cast<FrameIndexSDNode>(N)->getIndex();
SDValue TFI = CurDAG->getTargetFrameIndex(FI, Op.getValueType());
unsigned Opc = Op.getValueType() == EVT::i32 ? PPC::ADDI : PPC::ADDI8;
unsigned Opc = Op.getValueType() == MVT::i32 ? PPC::ADDI : PPC::ADDI8;
if (N->hasOneUse())
return CurDAG->SelectNodeTo(N, Opc, Op.getValueType(), TFI,
getSmallIPtrImm(0));
@ -880,10 +880,10 @@ SDNode *PPCDAGToDAGISel::Select(SDValue Op) {
SDValue InFlag = N->getOperand(1);
// Use MFOCRF if supported.
if (PPCSubTarget.isGigaProcessor())
return CurDAG->getTargetNode(PPC::MFOCRF, dl, EVT::i32,
return CurDAG->getTargetNode(PPC::MFOCRF, dl, MVT::i32,
N->getOperand(0), InFlag);
else
return CurDAG->getTargetNode(PPC::MFCR, dl, EVT::i32, InFlag);
return CurDAG->getTargetNode(PPC::MFCR, dl, MVT::i32, InFlag);
}
case ISD::SDIV: {
@ -897,19 +897,19 @@ SDNode *PPCDAGToDAGISel::Select(SDValue Op) {
SDValue N0 = N->getOperand(0);
if ((signed)Imm > 0 && isPowerOf2_32(Imm)) {
SDNode *Op =
CurDAG->getTargetNode(PPC::SRAWI, dl, EVT::i32, EVT::Flag,
CurDAG->getTargetNode(PPC::SRAWI, dl, MVT::i32, MVT::Flag,
N0, getI32Imm(Log2_32(Imm)));
return CurDAG->SelectNodeTo(N, PPC::ADDZE, EVT::i32,
return CurDAG->SelectNodeTo(N, PPC::ADDZE, MVT::i32,
SDValue(Op, 0), SDValue(Op, 1));
} else if ((signed)Imm < 0 && isPowerOf2_32(-Imm)) {
SDNode *Op =
CurDAG->getTargetNode(PPC::SRAWI, dl, EVT::i32, EVT::Flag,
CurDAG->getTargetNode(PPC::SRAWI, dl, MVT::i32, MVT::Flag,
N0, getI32Imm(Log2_32(-Imm)));
SDValue PT =
SDValue(CurDAG->getTargetNode(PPC::ADDZE, dl, EVT::i32,
SDValue(CurDAG->getTargetNode(PPC::ADDZE, dl, MVT::i32,
SDValue(Op, 0), SDValue(Op, 1)),
0);
return CurDAG->SelectNodeTo(N, PPC::NEG, EVT::i32, PT);
return CurDAG->SelectNodeTo(N, PPC::NEG, MVT::i32, PT);
}
}
@ -932,28 +932,28 @@ SDNode *PPCDAGToDAGISel::Select(SDValue Op) {
unsigned Opcode;
bool isSExt = LD->getExtensionType() == ISD::SEXTLOAD;
if (LD->getValueType(0) != EVT::i64) {
if (LD->getValueType(0) != MVT::i64) {
// Handle PPC32 integer and normal FP loads.
assert((!isSExt || LoadedVT == EVT::i16) && "Invalid sext update load");
switch (LoadedVT.getSimpleVT()) {
assert((!isSExt || LoadedVT == MVT::i16) && "Invalid sext update load");
switch (LoadedVT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("Invalid PPC load type!");
case EVT::f64: Opcode = PPC::LFDU; break;
case EVT::f32: Opcode = PPC::LFSU; break;
case EVT::i32: Opcode = PPC::LWZU; break;
case EVT::i16: Opcode = isSExt ? PPC::LHAU : PPC::LHZU; break;
case EVT::i1:
case EVT::i8: Opcode = PPC::LBZU; break;
case MVT::f64: Opcode = PPC::LFDU; break;
case MVT::f32: Opcode = PPC::LFSU; break;
case MVT::i32: Opcode = PPC::LWZU; break;
case MVT::i16: Opcode = isSExt ? PPC::LHAU : PPC::LHZU; break;
case MVT::i1:
case MVT::i8: Opcode = PPC::LBZU; break;
}
} else {
assert(LD->getValueType(0) == EVT::i64 && "Unknown load result type!");
assert((!isSExt || LoadedVT == EVT::i16) && "Invalid sext update load");
switch (LoadedVT.getSimpleVT()) {
assert(LD->getValueType(0) == MVT::i64 && "Unknown load result type!");
assert((!isSExt || LoadedVT == MVT::i16) && "Invalid sext update load");
switch (LoadedVT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("Invalid PPC load type!");
case EVT::i64: Opcode = PPC::LDU; break;
case EVT::i32: Opcode = PPC::LWZU8; break;
case EVT::i16: Opcode = isSExt ? PPC::LHAU8 : PPC::LHZU8; break;
case EVT::i1:
case EVT::i8: Opcode = PPC::LBZU8; break;
case MVT::i64: Opcode = PPC::LDU; break;
case MVT::i32: Opcode = PPC::LWZU8; break;
case MVT::i16: Opcode = isSExt ? PPC::LHAU8 : PPC::LHZU8; break;
case MVT::i1:
case MVT::i8: Opcode = PPC::LBZU8; break;
}
}
@ -963,7 +963,7 @@ SDNode *PPCDAGToDAGISel::Select(SDValue Op) {
// FIXME: PPC64
return CurDAG->getTargetNode(Opcode, dl, LD->getValueType(0),
PPCLowering.getPointerTy(),
EVT::Other, Ops, 3);
MVT::Other, Ops, 3);
} else {
llvm_unreachable("R+R preindex loads not supported yet!");
}
@ -978,7 +978,7 @@ SDNode *PPCDAGToDAGISel::Select(SDValue Op) {
isRotateAndMask(N->getOperand(0).getNode(), Imm, false, SH, MB, ME)) {
SDValue Val = N->getOperand(0).getOperand(0);
SDValue Ops[] = { Val, getI32Imm(SH), getI32Imm(MB), getI32Imm(ME) };
return CurDAG->SelectNodeTo(N, PPC::RLWINM, EVT::i32, Ops, 4);
return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
}
// If this is just a masked value where the input is not handled above, and
// is not a rotate-left (handled by a pattern in the .td file), emit rlwinm
@ -987,7 +987,7 @@ SDNode *PPCDAGToDAGISel::Select(SDValue Op) {
N->getOperand(0).getOpcode() != ISD::ROTL) {
SDValue Val = N->getOperand(0);
SDValue Ops[] = { Val, getI32Imm(0), getI32Imm(MB), getI32Imm(ME) };
return CurDAG->SelectNodeTo(N, PPC::RLWINM, EVT::i32, Ops, 4);
return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
}
// AND X, 0 -> 0, not "rlwinm 32".
if (isInt32Immediate(N->getOperand(1), Imm) && (Imm == 0)) {
@ -1005,7 +1005,7 @@ SDNode *PPCDAGToDAGISel::Select(SDValue Op) {
SDValue Ops[] = { N->getOperand(0).getOperand(0),
N->getOperand(0).getOperand(1),
getI32Imm(0), getI32Imm(MB),getI32Imm(ME) };
return CurDAG->getTargetNode(PPC::RLWIMI, dl, EVT::i32, Ops, 5);
return CurDAG->getTargetNode(PPC::RLWIMI, dl, MVT::i32, Ops, 5);
}
}
@ -1013,7 +1013,7 @@ SDNode *PPCDAGToDAGISel::Select(SDValue Op) {
break;
}
case ISD::OR:
if (N->getValueType(0) == EVT::i32)
if (N->getValueType(0) == MVT::i32)
if (SDNode *I = SelectBitfieldInsert(N))
return I;
@ -1025,7 +1025,7 @@ SDNode *PPCDAGToDAGISel::Select(SDValue Op) {
isRotateAndMask(N, Imm, true, SH, MB, ME)) {
SDValue Ops[] = { N->getOperand(0).getOperand(0),
getI32Imm(SH), getI32Imm(MB), getI32Imm(ME) };
return CurDAG->SelectNodeTo(N, PPC::RLWINM, EVT::i32, Ops, 4);
return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
}
// Other cases are autogenerated.
@ -1037,7 +1037,7 @@ SDNode *PPCDAGToDAGISel::Select(SDValue Op) {
isRotateAndMask(N, Imm, true, SH, MB, ME)) {
SDValue Ops[] = { N->getOperand(0).getOperand(0),
getI32Imm(SH), getI32Imm(MB), getI32Imm(ME) };
return CurDAG->SelectNodeTo(N, PPC::RLWINM, EVT::i32, Ops, 4);
return CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops, 4);
}
// Other cases are autogenerated.
@ -1053,11 +1053,11 @@ SDNode *PPCDAGToDAGISel::Select(SDValue Op) {
if (N1C->isNullValue() && N3C->isNullValue() &&
N2C->getZExtValue() == 1ULL && CC == ISD::SETNE &&
// FIXME: Implement this optzn for PPC64.
N->getValueType(0) == EVT::i32) {
N->getValueType(0) == MVT::i32) {
SDNode *Tmp =
CurDAG->getTargetNode(PPC::ADDIC, dl, EVT::i32, EVT::Flag,
CurDAG->getTargetNode(PPC::ADDIC, dl, MVT::i32, MVT::Flag,
N->getOperand(0), getI32Imm(~0U));
return CurDAG->SelectNodeTo(N, PPC::SUBFE, EVT::i32,
return CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32,
SDValue(Tmp, 0), N->getOperand(0),
SDValue(Tmp, 1));
}
@ -1066,13 +1066,13 @@ SDNode *PPCDAGToDAGISel::Select(SDValue Op) {
unsigned BROpc = getPredicateForSetCC(CC);
unsigned SelectCCOp;
if (N->getValueType(0) == EVT::i32)
if (N->getValueType(0) == MVT::i32)
SelectCCOp = PPC::SELECT_CC_I4;
else if (N->getValueType(0) == EVT::i64)
else if (N->getValueType(0) == MVT::i64)
SelectCCOp = PPC::SELECT_CC_I8;
else if (N->getValueType(0) == EVT::f32)
else if (N->getValueType(0) == MVT::f32)
SelectCCOp = PPC::SELECT_CC_F4;
else if (N->getValueType(0) == EVT::f64)
else if (N->getValueType(0) == MVT::f64)
SelectCCOp = PPC::SELECT_CC_F8;
else
SelectCCOp = PPC::SELECT_CC_VRRC;
@ -1092,23 +1092,23 @@ SDNode *PPCDAGToDAGISel::Select(SDValue Op) {
getI32Imm(cast<ConstantSDNode>(N->getOperand(1))->getZExtValue());
SDValue Ops[] = { Pred, N->getOperand(2), N->getOperand(3),
N->getOperand(0), N->getOperand(4) };
return CurDAG->SelectNodeTo(N, PPC::BCC, EVT::Other, Ops, 5);
return CurDAG->SelectNodeTo(N, PPC::BCC, MVT::Other, Ops, 5);
}
case ISD::BR_CC: {
ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
SDValue CondCode = SelectCC(N->getOperand(2), N->getOperand(3), CC, dl);
SDValue Ops[] = { getI32Imm(getPredicateForSetCC(CC)), CondCode,
N->getOperand(4), N->getOperand(0) };
return CurDAG->SelectNodeTo(N, PPC::BCC, EVT::Other, Ops, 4);
return CurDAG->SelectNodeTo(N, PPC::BCC, MVT::Other, Ops, 4);
}
case ISD::BRIND: {
// FIXME: Should custom lower this.
SDValue Chain = N->getOperand(0);
SDValue Target = N->getOperand(1);
unsigned Opc = Target.getValueType() == EVT::i32 ? PPC::MTCTR : PPC::MTCTR8;
Chain = SDValue(CurDAG->getTargetNode(Opc, dl, EVT::Other, Target,
unsigned Opc = Target.getValueType() == MVT::i32 ? PPC::MTCTR : PPC::MTCTR8;
Chain = SDValue(CurDAG->getTargetNode(Opc, dl, MVT::Other, Target,
Chain), 0);
return CurDAG->SelectNodeTo(N, PPC::BCTR, EVT::Other, Chain);
return CurDAG->SelectNodeTo(N, PPC::BCTR, MVT::Other, Chain);
}
case ISD::DECLARE: {
SDValue Chain = N->getOperand(0);
@ -1149,7 +1149,7 @@ SDNode *PPCDAGToDAGISel::Select(SDValue Op) {
SDValue Tmp1 = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
SDValue Tmp2 = CurDAG->getTargetGlobalAddress(GV, TLI.getPointerTy());
return CurDAG->SelectNodeTo(N, TargetInstrInfo::DECLARE,
EVT::Other, Tmp1, Tmp2, Chain);
MVT::Other, Tmp1, Tmp2, Chain);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -230,7 +230,7 @@ namespace llvm {
virtual const char *getTargetNodeName(unsigned Opcode) const;
/// getSetCCResultType - Return the ISD::SETCC ValueType
virtual EVT::SimpleValueType getSetCCResultType(EVT VT) const;
virtual MVT::SimpleValueType getSetCCResultType(EVT VT) const;
/// getPreIndexedAddressParts - returns true by value, base pointer and
/// offset pointer and addressing mode by reference if the node's address

View File

@ -191,7 +191,7 @@ def ME : SDNodeXForm<imm, [{
def maskimm32 : PatLeaf<(imm), [{
// maskImm predicate - True if immediate is a run of ones.
unsigned mb, me;
if (N->getValueType(0) == EVT::i32)
if (N->getValueType(0) == MVT::i32)
return isRunOfOnes((unsigned)N->getZExtValue(), mb, me);
else
return false;
@ -200,7 +200,7 @@ def maskimm32 : PatLeaf<(imm), [{
def immSExt16 : PatLeaf<(imm), [{
// immSExt16 predicate - True if the immediate fits in a 16-bit sign extended
// field. Used by instructions like 'addi'.
if (N->getValueType(0) == EVT::i32)
if (N->getValueType(0) == MVT::i32)
return (int32_t)N->getZExtValue() == (short)N->getZExtValue();
else
return (int64_t)N->getZExtValue() == (short)N->getZExtValue();
@ -227,7 +227,7 @@ def imm16ShiftedSExt : PatLeaf<(imm), [{
// immediate are set. Used by instructions like 'addis'. Identical to
// imm16ShiftedZExt in 32-bit mode.
if (N->getZExtValue() & 0xFFFF) return false;
if (N->getValueType(0) == EVT::i32)
if (N->getValueType(0) == MVT::i32)
return true;
// For 64-bit, make sure it is sext right.
return N->getZExtValue() == (uint64_t)(int)N->getZExtValue();

View File

@ -79,8 +79,8 @@ void SparcDAGToDAGISel::InstructionSelect() {
bool SparcDAGToDAGISel::SelectADDRri(SDValue Op, SDValue Addr,
SDValue &Base, SDValue &Offset) {
if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), EVT::i32);
Offset = CurDAG->getTargetConstant(0, EVT::i32);
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
Offset = CurDAG->getTargetConstant(0, MVT::i32);
return true;
}
if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
@ -93,11 +93,11 @@ bool SparcDAGToDAGISel::SelectADDRri(SDValue Op, SDValue Addr,
if (FrameIndexSDNode *FIN =
dyn_cast<FrameIndexSDNode>(Addr.getOperand(0))) {
// Constant offset from frame ref.
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), EVT::i32);
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
} else {
Base = Addr.getOperand(0);
}
Offset = CurDAG->getTargetConstant(CN->getZExtValue(), EVT::i32);
Offset = CurDAG->getTargetConstant(CN->getZExtValue(), MVT::i32);
return true;
}
}
@ -113,7 +113,7 @@ bool SparcDAGToDAGISel::SelectADDRri(SDValue Op, SDValue Addr,
}
}
Base = Addr;
Offset = CurDAG->getTargetConstant(0, EVT::i32);
Offset = CurDAG->getTargetConstant(0, MVT::i32);
return true;
}
@ -137,7 +137,7 @@ bool SparcDAGToDAGISel::SelectADDRrr(SDValue Op, SDValue Addr,
}
R1 = Addr;
R2 = CurDAG->getRegister(SP::G0, EVT::i32);
R2 = CurDAG->getRegister(SP::G0, MVT::i32);
return true;
}
@ -158,17 +158,17 @@ SDNode *SparcDAGToDAGISel::Select(SDValue Op) {
// Set the Y register to the high-part.
SDValue TopPart;
if (N->getOpcode() == ISD::SDIV) {
TopPart = SDValue(CurDAG->getTargetNode(SP::SRAri, dl, EVT::i32, DivLHS,
CurDAG->getTargetConstant(31, EVT::i32)), 0);
TopPart = SDValue(CurDAG->getTargetNode(SP::SRAri, dl, MVT::i32, DivLHS,
CurDAG->getTargetConstant(31, MVT::i32)), 0);
} else {
TopPart = CurDAG->getRegister(SP::G0, EVT::i32);
TopPart = CurDAG->getRegister(SP::G0, MVT::i32);
}
TopPart = SDValue(CurDAG->getTargetNode(SP::WRYrr, dl, EVT::Flag, TopPart,
CurDAG->getRegister(SP::G0, EVT::i32)), 0);
TopPart = SDValue(CurDAG->getTargetNode(SP::WRYrr, dl, MVT::Flag, TopPart,
CurDAG->getRegister(SP::G0, MVT::i32)), 0);
// FIXME: Handle div by immediate.
unsigned Opcode = N->getOpcode() == ISD::SDIV ? SP::SDIVrr : SP::UDIVrr;
return CurDAG->SelectNodeTo(N, Opcode, EVT::i32, DivLHS, DivRHS,
return CurDAG->SelectNodeTo(N, Opcode, MVT::i32, DivLHS, DivRHS,
TopPart);
}
case ISD::MULHU:
@ -177,10 +177,10 @@ SDNode *SparcDAGToDAGISel::Select(SDValue Op) {
SDValue MulLHS = N->getOperand(0);
SDValue MulRHS = N->getOperand(1);
unsigned Opcode = N->getOpcode() == ISD::MULHU ? SP::UMULrr : SP::SMULrr;
SDNode *Mul = CurDAG->getTargetNode(Opcode, dl, EVT::i32, EVT::Flag,
SDNode *Mul = CurDAG->getTargetNode(Opcode, dl, MVT::i32, MVT::Flag,
MulLHS, MulRHS);
// The high part is in the Y register.
return CurDAG->SelectNodeTo(N, SP::RDY, EVT::i32, SDValue(Mul, 1));
return CurDAG->SelectNodeTo(N, SP::RDY, MVT::i32, SDValue(Mul, 1));
return NULL;
}
}

View File

@ -72,8 +72,8 @@ SparcTargetLowering::LowerReturn(SDValue Chain,
}
if (Flag.getNode())
return DAG.getNode(SPISD::RET_FLAG, dl, EVT::Other, Chain, Flag);
return DAG.getNode(SPISD::RET_FLAG, dl, EVT::Other, Chain);
return DAG.getNode(SPISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
return DAG.getNode(SPISD::RET_FLAG, dl, MVT::Other, Chain);
}
/// LowerFormalArguments - V8 uses a very simple ABI, where all values are
@ -108,40 +108,40 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
// FIXME: We ignore the register assignments of AnalyzeFormalArguments
// because it doesn't know how to split a double into two i32 registers.
EVT ObjectVT = VA.getValVT();
switch (ObjectVT.getSimpleVT()) {
switch (ObjectVT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unhandled argument type!");
case EVT::i1:
case EVT::i8:
case EVT::i16:
case EVT::i32:
case MVT::i1:
case MVT::i8:
case MVT::i16:
case MVT::i32:
if (!Ins[i].Used) { // Argument is dead.
if (CurArgReg < ArgRegEnd) ++CurArgReg;
InVals.push_back(DAG.getUNDEF(ObjectVT));
} else if (CurArgReg < ArgRegEnd) { // Lives in an incoming GPR
unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
MF.getRegInfo().addLiveIn(*CurArgReg++, VReg);
SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, EVT::i32);
if (ObjectVT != EVT::i32) {
SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
if (ObjectVT != MVT::i32) {
unsigned AssertOp = ISD::AssertSext;
Arg = DAG.getNode(AssertOp, dl, EVT::i32, Arg,
Arg = DAG.getNode(AssertOp, dl, MVT::i32, Arg,
DAG.getValueType(ObjectVT));
Arg = DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, Arg);
}
InVals.push_back(Arg);
} else {
int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset);
SDValue FIPtr = DAG.getFrameIndex(FrameIdx, EVT::i32);
SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
SDValue Load;
if (ObjectVT == EVT::i32) {
Load = DAG.getLoad(EVT::i32, dl, Chain, FIPtr, NULL, 0);
if (ObjectVT == MVT::i32) {
Load = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, NULL, 0);
} else {
ISD::LoadExtType LoadOp = ISD::SEXTLOAD;
// Sparc is big endian, so add an offset based on the ObjectVT.
unsigned Offset = 4-std::max(1U, ObjectVT.getSizeInBits()/8);
FIPtr = DAG.getNode(ISD::ADD, dl, EVT::i32, FIPtr,
DAG.getConstant(Offset, EVT::i32));
Load = DAG.getExtLoad(LoadOp, dl, EVT::i32, Chain, FIPtr,
FIPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, FIPtr,
DAG.getConstant(Offset, MVT::i32));
Load = DAG.getExtLoad(LoadOp, dl, MVT::i32, Chain, FIPtr,
NULL, 0, ObjectVT);
Load = DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, Load);
}
@ -150,7 +150,7 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
ArgOffset += 4;
break;
case EVT::f32:
case MVT::f32:
if (!Ins[i].Used) { // Argument is dead.
if (CurArgReg < ArgRegEnd) ++CurArgReg;
InVals.push_back(DAG.getUNDEF(ObjectVT));
@ -158,21 +158,21 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
// FP value is passed in an integer register.
unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
MF.getRegInfo().addLiveIn(*CurArgReg++, VReg);
SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, EVT::i32);
SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
Arg = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::f32, Arg);
Arg = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Arg);
InVals.push_back(Arg);
} else {
int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset);
SDValue FIPtr = DAG.getFrameIndex(FrameIdx, EVT::i32);
SDValue Load = DAG.getLoad(EVT::f32, dl, Chain, FIPtr, NULL, 0);
SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
SDValue Load = DAG.getLoad(MVT::f32, dl, Chain, FIPtr, NULL, 0);
InVals.push_back(Load);
}
ArgOffset += 4;
break;
case EVT::i64:
case EVT::f64:
case MVT::i64:
case MVT::f64:
if (!Ins[i].Used) { // Argument is dead.
if (CurArgReg < ArgRegEnd) ++CurArgReg;
if (CurArgReg < ArgRegEnd) ++CurArgReg;
@ -182,31 +182,31 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
if (CurArgReg < ArgRegEnd) { // Lives in an incoming GPR
unsigned VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
MF.getRegInfo().addLiveIn(*CurArgReg++, VRegHi);
HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, EVT::i32);
HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
} else {
int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset);
SDValue FIPtr = DAG.getFrameIndex(FrameIdx, EVT::i32);
HiVal = DAG.getLoad(EVT::i32, dl, Chain, FIPtr, NULL, 0);
SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
HiVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, NULL, 0);
}
SDValue LoVal;
if (CurArgReg < ArgRegEnd) { // Lives in an incoming GPR
unsigned VRegLo = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
MF.getRegInfo().addLiveIn(*CurArgReg++, VRegLo);
LoVal = DAG.getCopyFromReg(Chain, dl, VRegLo, EVT::i32);
LoVal = DAG.getCopyFromReg(Chain, dl, VRegLo, MVT::i32);
} else {
int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset+4);
SDValue FIPtr = DAG.getFrameIndex(FrameIdx, EVT::i32);
LoVal = DAG.getLoad(EVT::i32, dl, Chain, FIPtr, NULL, 0);
SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, NULL, 0);
}
// Compose the two halves together into an i64 unit.
SDValue WholeValue =
DAG.getNode(ISD::BUILD_PAIR, dl, EVT::i64, LoVal, HiVal);
DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
// If we want a double, do a bit convert.
if (ObjectVT == EVT::f64)
WholeValue = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::f64, WholeValue);
if (ObjectVT == MVT::f64)
WholeValue = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, WholeValue);
InVals.push_back(WholeValue);
}
@ -225,10 +225,10 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
for (; CurArgReg != ArgRegEnd; ++CurArgReg) {
unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
MF.getRegInfo().addLiveIn(*CurArgReg, VReg);
SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, EVT::i32);
SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);
int FrameIdx = MF.getFrameInfo()->CreateFixedObject(4, ArgOffset);
SDValue FIPtr = DAG.getFrameIndex(FrameIdx, EVT::i32);
SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
OutChains.push_back(DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, NULL, 0));
ArgOffset += 4;
@ -236,7 +236,7 @@ SparcTargetLowering::LowerFormalArguments(SDValue Chain,
if (!OutChains.empty()) {
OutChains.push_back(Chain);
Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&OutChains[0], OutChains.size());
}
}
@ -268,17 +268,17 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Count the size of the outgoing arguments.
unsigned ArgsSize = 0;
for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
switch (Outs[i].Val.getValueType().getSimpleVT()) {
switch (Outs[i].Val.getValueType().getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unknown value type!");
case EVT::i1:
case EVT::i8:
case EVT::i16:
case EVT::i32:
case EVT::f32:
case MVT::i1:
case MVT::i8:
case MVT::i16:
case MVT::i32:
case MVT::f32:
ArgsSize += 4;
break;
case EVT::i64:
case EVT::f64:
case MVT::i64:
case MVT::f64:
ArgsSize += 8;
break;
}
@ -328,10 +328,10 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
assert(VA.isMemLoc());
// Create a store off the stack pointer for this argument.
SDValue StackPtr = DAG.getRegister(SP::O6, EVT::i32);
SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
// FIXME: VERIFY THAT 68 IS RIGHT.
SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset()+68);
PtrOff = DAG.getNode(ISD::ADD, EVT::i32, StackPtr, PtrOff);
PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
}
@ -346,9 +346,9 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
EVT ObjectVT = Val.getValueType();
SDValue ValToStore(0, 0);
unsigned ObjSize;
switch (ObjectVT.getSimpleVT()) {
switch (ObjectVT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unhandled argument type!");
case EVT::i32:
case MVT::i32:
ObjSize = 4;
if (RegsToPass.size() >= 6) {
@ -357,17 +357,17 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
RegsToPass.push_back(std::make_pair(ArgRegs[RegsToPass.size()], Val));
}
break;
case EVT::f32:
case MVT::f32:
ObjSize = 4;
if (RegsToPass.size() >= 6) {
ValToStore = Val;
} else {
// Convert this to a FP value in an int reg.
Val = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::i32, Val);
Val = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Val);
RegsToPass.push_back(std::make_pair(ArgRegs[RegsToPass.size()], Val));
}
break;
case EVT::f64: {
case MVT::f64: {
ObjSize = 8;
if (RegsToPass.size() >= 6) {
ValToStore = Val; // Whole thing is passed in memory.
@ -376,16 +376,16 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Break into top and bottom parts by storing to the stack and loading
// out the parts as integers. Top part goes in a reg.
SDValue StackPtr = DAG.CreateStackTemporary(EVT::f64, EVT::i32);
SDValue StackPtr = DAG.CreateStackTemporary(MVT::f64, MVT::i32);
SDValue Store = DAG.getStore(DAG.getEntryNode(), dl,
Val, StackPtr, NULL, 0);
// Sparc is big-endian, so the high part comes first.
SDValue Hi = DAG.getLoad(EVT::i32, dl, Store, StackPtr, NULL, 0, 0);
SDValue Hi = DAG.getLoad(MVT::i32, dl, Store, StackPtr, NULL, 0, 0);
// Increment the pointer to the other half.
StackPtr = DAG.getNode(ISD::ADD, dl, StackPtr.getValueType(), StackPtr,
DAG.getIntPtrConstant(4));
// Load the low part.
SDValue Lo = DAG.getLoad(EVT::i32, dl, Store, StackPtr, NULL, 0, 0);
SDValue Lo = DAG.getLoad(MVT::i32, dl, Store, StackPtr, NULL, 0, 0);
RegsToPass.push_back(std::make_pair(ArgRegs[RegsToPass.size()], Hi));
@ -398,7 +398,7 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
}
break;
}
case EVT::i64: {
case MVT::i64: {
ObjSize = 8;
if (RegsToPass.size() >= 6) {
ValToStore = Val; // Whole thing is passed in memory.
@ -406,10 +406,10 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
}
// Split the value into top and bottom part. Top part goes in a reg.
SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, EVT::i32, Val,
DAG.getConstant(1, EVT::i32));
SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, EVT::i32, Val,
DAG.getConstant(0, EVT::i32));
SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Val,
DAG.getConstant(1, MVT::i32));
SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Val,
DAG.getConstant(0, MVT::i32));
RegsToPass.push_back(std::make_pair(ArgRegs[RegsToPass.size()], Hi));
if (RegsToPass.size() >= 6) {
@ -424,9 +424,9 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
}
if (ValToStore.getNode()) {
SDValue StackPtr = DAG.getRegister(SP::O6, EVT::i32);
SDValue PtrOff = DAG.getConstant(ArgOffset, EVT::i32);
PtrOff = DAG.getNode(ISD::ADD, dl, EVT::i32, StackPtr, PtrOff);
SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
SDValue PtrOff = DAG.getConstant(ArgOffset, MVT::i32);
PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
MemOpChains.push_back(DAG.getStore(Chain, dl, ValToStore,
PtrOff, NULL, 0));
}
@ -436,7 +436,7 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// Emit all stores, make sure the occur before any copies into physregs.
if (!MemOpChains.empty())
Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&MemOpChains[0], MemOpChains.size());
// Build a sequence of copy-to-reg nodes chained together with token
@ -458,13 +458,13 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
// Likewise ExternalSymbol -> TargetExternalSymbol.
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), EVT::i32);
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i32);
else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
Callee = DAG.getTargetExternalSymbol(E->getSymbol(), EVT::i32);
Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
std::vector<EVT> NodeTys;
NodeTys.push_back(EVT::Other); // Returns a chain
NodeTys.push_back(EVT::Flag); // Returns a flag for retval copy to use.
NodeTys.push_back(MVT::Other); // Returns a chain
NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
SDValue Ops[] = { Chain, Callee, InFlag };
Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops, InFlag.getNode() ? 3 : 2);
InFlag = Chain.getValue(1);
@ -553,120 +553,120 @@ SparcTargetLowering::SparcTargetLowering(TargetMachine &TM)
: TargetLowering(TM, new TargetLoweringObjectFileELF()) {
// Set up the register classes.
addRegisterClass(EVT::i32, SP::IntRegsRegisterClass);
addRegisterClass(EVT::f32, SP::FPRegsRegisterClass);
addRegisterClass(EVT::f64, SP::DFPRegsRegisterClass);
addRegisterClass(MVT::i32, SP::IntRegsRegisterClass);
addRegisterClass(MVT::f32, SP::FPRegsRegisterClass);
addRegisterClass(MVT::f64, SP::DFPRegsRegisterClass);
// Turn FP extload into load/fextend
setLoadExtAction(ISD::EXTLOAD, EVT::f32, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
// Sparc doesn't have i1 sign extending load
setLoadExtAction(ISD::SEXTLOAD, EVT::i1, Promote);
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
// Turn FP truncstore into trunc + store.
setTruncStoreAction(EVT::f64, EVT::f32, Expand);
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
// Custom legalize GlobalAddress nodes into LO/HI parts.
setOperationAction(ISD::GlobalAddress, EVT::i32, Custom);
setOperationAction(ISD::GlobalTLSAddress, EVT::i32, Custom);
setOperationAction(ISD::ConstantPool , EVT::i32, Custom);
setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
setOperationAction(ISD::ConstantPool , MVT::i32, Custom);
// Sparc doesn't have sext_inreg, replace them with shl/sra
setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i16, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i8 , Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i1 , Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
// Sparc has no REM or DIVREM operations.
setOperationAction(ISD::UREM, EVT::i32, Expand);
setOperationAction(ISD::SREM, EVT::i32, Expand);
setOperationAction(ISD::SDIVREM, EVT::i32, Expand);
setOperationAction(ISD::UDIVREM, EVT::i32, Expand);
setOperationAction(ISD::UREM, MVT::i32, Expand);
setOperationAction(ISD::SREM, MVT::i32, Expand);
setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
// Custom expand fp<->sint
setOperationAction(ISD::FP_TO_SINT, EVT::i32, Custom);
setOperationAction(ISD::SINT_TO_FP, EVT::i32, Custom);
setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
// Expand fp<->uint
setOperationAction(ISD::FP_TO_UINT, EVT::i32, Expand);
setOperationAction(ISD::UINT_TO_FP, EVT::i32, Expand);
setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
setOperationAction(ISD::BIT_CONVERT, EVT::f32, Expand);
setOperationAction(ISD::BIT_CONVERT, EVT::i32, Expand);
setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand);
setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand);
// Sparc has no select or setcc: expand to SELECT_CC.
setOperationAction(ISD::SELECT, EVT::i32, Expand);
setOperationAction(ISD::SELECT, EVT::f32, Expand);
setOperationAction(ISD::SELECT, EVT::f64, Expand);
setOperationAction(ISD::SETCC, EVT::i32, Expand);
setOperationAction(ISD::SETCC, EVT::f32, Expand);
setOperationAction(ISD::SETCC, EVT::f64, Expand);
setOperationAction(ISD::SELECT, MVT::i32, Expand);
setOperationAction(ISD::SELECT, MVT::f32, Expand);
setOperationAction(ISD::SELECT, MVT::f64, Expand);
setOperationAction(ISD::SETCC, MVT::i32, Expand);
setOperationAction(ISD::SETCC, MVT::f32, Expand);
setOperationAction(ISD::SETCC, MVT::f64, Expand);
// Sparc doesn't have BRCOND either, it has BR_CC.
setOperationAction(ISD::BRCOND, EVT::Other, Expand);
setOperationAction(ISD::BRIND, EVT::Other, Expand);
setOperationAction(ISD::BR_JT, EVT::Other, Expand);
setOperationAction(ISD::BR_CC, EVT::i32, Custom);
setOperationAction(ISD::BR_CC, EVT::f32, Custom);
setOperationAction(ISD::BR_CC, EVT::f64, Custom);
setOperationAction(ISD::BRCOND, MVT::Other, Expand);
setOperationAction(ISD::BRIND, MVT::Other, Expand);
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
setOperationAction(ISD::BR_CC, MVT::i32, Custom);
setOperationAction(ISD::BR_CC, MVT::f32, Custom);
setOperationAction(ISD::BR_CC, MVT::f64, Custom);
setOperationAction(ISD::SELECT_CC, EVT::i32, Custom);
setOperationAction(ISD::SELECT_CC, EVT::f32, Custom);
setOperationAction(ISD::SELECT_CC, EVT::f64, Custom);
setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
// SPARC has no intrinsics for these particular operations.
setOperationAction(ISD::MEMBARRIER, EVT::Other, Expand);
setOperationAction(ISD::MEMBARRIER, MVT::Other, Expand);
setOperationAction(ISD::FSIN , EVT::f64, Expand);
setOperationAction(ISD::FCOS , EVT::f64, Expand);
setOperationAction(ISD::FREM , EVT::f64, Expand);
setOperationAction(ISD::FSIN , EVT::f32, Expand);
setOperationAction(ISD::FCOS , EVT::f32, Expand);
setOperationAction(ISD::FREM , EVT::f32, Expand);
setOperationAction(ISD::CTPOP, EVT::i32, Expand);
setOperationAction(ISD::CTTZ , EVT::i32, Expand);
setOperationAction(ISD::CTLZ , EVT::i32, Expand);
setOperationAction(ISD::ROTL , EVT::i32, Expand);
setOperationAction(ISD::ROTR , EVT::i32, Expand);
setOperationAction(ISD::BSWAP, EVT::i32, Expand);
setOperationAction(ISD::FCOPYSIGN, EVT::f64, Expand);
setOperationAction(ISD::FCOPYSIGN, EVT::f32, Expand);
setOperationAction(ISD::FPOW , EVT::f64, Expand);
setOperationAction(ISD::FPOW , EVT::f32, Expand);
setOperationAction(ISD::FSIN , MVT::f64, Expand);
setOperationAction(ISD::FCOS , MVT::f64, Expand);
setOperationAction(ISD::FREM , MVT::f64, Expand);
setOperationAction(ISD::FSIN , MVT::f32, Expand);
setOperationAction(ISD::FCOS , MVT::f32, Expand);
setOperationAction(ISD::FREM , MVT::f32, Expand);
setOperationAction(ISD::CTPOP, MVT::i32, Expand);
setOperationAction(ISD::CTTZ , MVT::i32, Expand);
setOperationAction(ISD::CTLZ , MVT::i32, Expand);
setOperationAction(ISD::ROTL , MVT::i32, Expand);
setOperationAction(ISD::ROTR , MVT::i32, Expand);
setOperationAction(ISD::BSWAP, MVT::i32, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
setOperationAction(ISD::FPOW , MVT::f64, Expand);
setOperationAction(ISD::FPOW , MVT::f32, Expand);
setOperationAction(ISD::SHL_PARTS, EVT::i32, Expand);
setOperationAction(ISD::SRA_PARTS, EVT::i32, Expand);
setOperationAction(ISD::SRL_PARTS, EVT::i32, Expand);
setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
// FIXME: Sparc provides these multiplies, but we don't have them yet.
setOperationAction(ISD::UMUL_LOHI, EVT::i32, Expand);
setOperationAction(ISD::SMUL_LOHI, EVT::i32, Expand);
setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
// We don't have line number support yet.
setOperationAction(ISD::DBG_STOPPOINT, EVT::Other, Expand);
setOperationAction(ISD::DEBUG_LOC, EVT::Other, Expand);
setOperationAction(ISD::DBG_LABEL, EVT::Other, Expand);
setOperationAction(ISD::EH_LABEL, EVT::Other, Expand);
setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
setOperationAction(ISD::DBG_LABEL, MVT::Other, Expand);
setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
// VASTART needs to be custom lowered to use the VarArgsFrameIndex.
setOperationAction(ISD::VASTART , EVT::Other, Custom);
setOperationAction(ISD::VASTART , MVT::Other, Custom);
// VAARG needs to be lowered to not do unaligned accesses for doubles.
setOperationAction(ISD::VAARG , EVT::Other, Custom);
setOperationAction(ISD::VAARG , MVT::Other, Custom);
// Use the default implementation.
setOperationAction(ISD::VACOPY , EVT::Other, Expand);
setOperationAction(ISD::VAEND , EVT::Other, Expand);
setOperationAction(ISD::STACKSAVE , EVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE , EVT::Other, Expand);
setOperationAction(ISD::DYNAMIC_STACKALLOC, EVT::i32 , Custom);
setOperationAction(ISD::VACOPY , MVT::Other, Expand);
setOperationAction(ISD::VAEND , MVT::Other, Expand);
setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
// No debug info support yet.
setOperationAction(ISD::DBG_STOPPOINT, EVT::Other, Expand);
setOperationAction(ISD::DBG_LABEL, EVT::Other, Expand);
setOperationAction(ISD::EH_LABEL, EVT::Other, Expand);
setOperationAction(ISD::DECLARE, EVT::Other, Expand);
setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
setOperationAction(ISD::DBG_LABEL, MVT::Other, Expand);
setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
setOperationAction(ISD::DECLARE, MVT::Other, Expand);
setStackPointerRegisterToSaveRestore(SP::O6);
if (TM.getSubtarget<SparcSubtarget>().isV9())
setOperationAction(ISD::CTPOP, EVT::i32, Legal);
setOperationAction(ISD::CTPOP, MVT::i32, Legal);
computeRegisterProperties();
}
@ -745,10 +745,10 @@ static SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) {
GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
// FIXME there isn't really any debug info here
DebugLoc dl = Op.getDebugLoc();
SDValue GA = DAG.getTargetGlobalAddress(GV, EVT::i32);
SDValue Hi = DAG.getNode(SPISD::Hi, dl, EVT::i32, GA);
SDValue Lo = DAG.getNode(SPISD::Lo, dl, EVT::i32, GA);
return DAG.getNode(ISD::ADD, dl, EVT::i32, Lo, Hi);
SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32);
SDValue Hi = DAG.getNode(SPISD::Hi, dl, MVT::i32, GA);
SDValue Lo = DAG.getNode(SPISD::Lo, dl, MVT::i32, GA);
return DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi);
}
static SDValue LowerCONSTANTPOOL(SDValue Op, SelectionDAG &DAG) {
@ -756,24 +756,24 @@ static SDValue LowerCONSTANTPOOL(SDValue Op, SelectionDAG &DAG) {
// FIXME there isn't really any debug info here
DebugLoc dl = Op.getDebugLoc();
Constant *C = N->getConstVal();
SDValue CP = DAG.getTargetConstantPool(C, EVT::i32, N->getAlignment());
SDValue Hi = DAG.getNode(SPISD::Hi, dl, EVT::i32, CP);
SDValue Lo = DAG.getNode(SPISD::Lo, dl, EVT::i32, CP);
return DAG.getNode(ISD::ADD, dl, EVT::i32, Lo, Hi);
SDValue CP = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment());
SDValue Hi = DAG.getNode(SPISD::Hi, dl, MVT::i32, CP);
SDValue Lo = DAG.getNode(SPISD::Lo, dl, MVT::i32, CP);
return DAG.getNode(ISD::ADD, dl, MVT::i32, Lo, Hi);
}
static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) {
DebugLoc dl = Op.getDebugLoc();
// Convert the fp value to integer in an FP register.
assert(Op.getValueType() == EVT::i32);
Op = DAG.getNode(SPISD::FTOI, dl, EVT::f32, Op.getOperand(0));
return DAG.getNode(ISD::BIT_CONVERT, dl, EVT::i32, Op);
assert(Op.getValueType() == MVT::i32);
Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
return DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i32, Op);
}
static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
DebugLoc dl = Op.getDebugLoc();
assert(Op.getOperand(0).getValueType() == EVT::i32);
SDValue Tmp = DAG.getNode(ISD::BIT_CONVERT, dl, EVT::f32, Op.getOperand(0));
assert(Op.getOperand(0).getValueType() == MVT::i32);
SDValue Tmp = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, Op.getOperand(0));
// Convert the int value to FP in an FP register.
return DAG.getNode(SPISD::ITOF, dl, Op.getValueType(), Tmp);
}
@ -793,21 +793,21 @@ static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) {
// Get the condition flag.
SDValue CompareFlag;
if (LHS.getValueType() == EVT::i32) {
if (LHS.getValueType() == MVT::i32) {
std::vector<EVT> VTs;
VTs.push_back(EVT::i32);
VTs.push_back(EVT::Flag);
VTs.push_back(MVT::i32);
VTs.push_back(MVT::Flag);
SDValue Ops[2] = { LHS, RHS };
CompareFlag = DAG.getNode(SPISD::CMPICC, dl, VTs, Ops, 2).getValue(1);
if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
Opc = SPISD::BRICC;
} else {
CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, EVT::Flag, LHS, RHS);
CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Flag, LHS, RHS);
if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
Opc = SPISD::BRFCC;
}
return DAG.getNode(Opc, dl, EVT::Other, Chain, Dest,
DAG.getConstant(SPCC, EVT::i32), CompareFlag);
return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
DAG.getConstant(SPCC, MVT::i32), CompareFlag);
}
static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) {
@ -824,21 +824,21 @@ static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) {
LookThroughSetCC(LHS, RHS, CC, SPCC);
SDValue CompareFlag;
if (LHS.getValueType() == EVT::i32) {
if (LHS.getValueType() == MVT::i32) {
std::vector<EVT> VTs;
VTs.push_back(LHS.getValueType()); // subcc returns a value
VTs.push_back(EVT::Flag);
VTs.push_back(MVT::Flag);
SDValue Ops[2] = { LHS, RHS };
CompareFlag = DAG.getNode(SPISD::CMPICC, dl, VTs, Ops, 2).getValue(1);
Opc = SPISD::SELECT_ICC;
if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
} else {
CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, EVT::Flag, LHS, RHS);
CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Flag, LHS, RHS);
Opc = SPISD::SELECT_FCC;
if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
}
return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
DAG.getConstant(SPCC, EVT::i32), CompareFlag);
DAG.getConstant(SPCC, MVT::i32), CompareFlag);
}
static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG,
@ -846,10 +846,10 @@ static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG,
// vastart just stores the address of the VarArgsFrameIndex slot into the
// memory location argument.
DebugLoc dl = Op.getDebugLoc();
SDValue Offset = DAG.getNode(ISD::ADD, dl, EVT::i32,
DAG.getRegister(SP::I6, EVT::i32),
SDValue Offset = DAG.getNode(ISD::ADD, dl, MVT::i32,
DAG.getRegister(SP::I6, MVT::i32),
DAG.getConstant(TLI.getVarArgsFrameOffset(),
EVT::i32));
MVT::i32));
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
return DAG.getStore(Op.getOperand(0), dl, Offset, Op.getOperand(1), SV, 0);
}
@ -861,25 +861,25 @@ static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) {
SDValue VAListPtr = Node->getOperand(1);
const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
DebugLoc dl = Node->getDebugLoc();
SDValue VAList = DAG.getLoad(EVT::i32, dl, InChain, VAListPtr, SV, 0);
SDValue VAList = DAG.getLoad(MVT::i32, dl, InChain, VAListPtr, SV, 0);
// Increment the pointer, VAList, to the next vaarg
SDValue NextPtr = DAG.getNode(ISD::ADD, dl, EVT::i32, VAList,
SDValue NextPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, VAList,
DAG.getConstant(VT.getSizeInBits()/8,
EVT::i32));
MVT::i32));
// Store the incremented VAList to the legalized pointer
InChain = DAG.getStore(VAList.getValue(1), dl, NextPtr,
VAListPtr, SV, 0);
// Load the actual argument out of the pointer VAList, unless this is an
// f64 load.
if (VT != EVT::f64)
if (VT != MVT::f64)
return DAG.getLoad(VT, dl, InChain, VAList, NULL, 0);
// Otherwise, load it as i64, then do a bitconvert.
SDValue V = DAG.getLoad(EVT::i64, dl, InChain, VAList, NULL, 0);
SDValue V = DAG.getLoad(MVT::i64, dl, InChain, VAList, NULL, 0);
// Bit-Convert the value to f64.
SDValue Ops[2] = {
DAG.getNode(ISD::BIT_CONVERT, dl, EVT::f64, V),
DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f64, V),
V.getValue(1)
};
return DAG.getMergeValues(Ops, 2, dl);
@ -891,14 +891,14 @@ static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) {
DebugLoc dl = Op.getDebugLoc();
unsigned SPReg = SP::O6;
SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, EVT::i32);
SDValue NewSP = DAG.getNode(ISD::SUB, dl, EVT::i32, SP, Size); // Value
SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, MVT::i32);
SDValue NewSP = DAG.getNode(ISD::SUB, dl, MVT::i32, SP, Size); // Value
Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP); // Output chain
// The resultant pointer is actually 16 words from the bottom of the stack,
// to provide a register spill area.
SDValue NewVal = DAG.getNode(ISD::ADD, dl, EVT::i32, NewSP,
DAG.getConstant(96, EVT::i32));
SDValue NewVal = DAG.getNode(ISD::ADD, dl, MVT::i32, NewSP,
DAG.getConstant(96, MVT::i32));
SDValue Ops[2] = { NewVal, Chain };
return DAG.getMergeValues(Ops, 2, dl);
}

View File

@ -57,12 +57,12 @@ def simm13 : PatLeaf<(imm), [{
def LO10 : SDNodeXForm<imm, [{
return CurDAG->getTargetConstant((unsigned)N->getZExtValue() & 1023,
EVT::i32);
MVT::i32);
}]>;
def HI22 : SDNodeXForm<imm, [{
// Transformation function: shift the immediate value down into the low bits.
return CurDAG->getTargetConstant((unsigned)N->getZExtValue() >> 10, EVT::i32);
return CurDAG->getTargetConstant((unsigned)N->getZExtValue() >> 10, MVT::i32);
}]>;
def SETHIimm : PatLeaf<(imm), [{

View File

@ -107,19 +107,19 @@ namespace {
/// getI8Imm - Return a target constant with the specified value, of type
/// i8.
inline SDValue getI8Imm(uint64_t Imm) {
return CurDAG->getTargetConstant(Imm, EVT::i8);
return CurDAG->getTargetConstant(Imm, MVT::i8);
}
/// getI16Imm - Return a target constant with the specified value, of type
/// i16.
inline SDValue getI16Imm(uint64_t Imm) {
return CurDAG->getTargetConstant(Imm, EVT::i16);
return CurDAG->getTargetConstant(Imm, MVT::i16);
}
/// getI32Imm - Return a target constant with the specified value, of type
/// i32.
inline SDValue getI32Imm(uint64_t Imm) {
return CurDAG->getTargetConstant(Imm, EVT::i32);
return CurDAG->getTargetConstant(Imm, MVT::i32);
}
// Include the pieces autogenerated from the target description.
@ -353,7 +353,7 @@ void SystemZDAGToDAGISel::getAddressOperandsRI(const SystemZRRIAddressMode &AM,
Base = AM.Base.Reg;
else
Base = CurDAG->getTargetFrameIndex(AM.Base.FrameIndex, TLI.getPointerTy());
Disp = CurDAG->getTargetConstant(AM.Disp, EVT::i64);
Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i64);
}
void SystemZDAGToDAGISel::getAddressOperands(const SystemZRRIAddressMode &AM,
@ -650,16 +650,16 @@ SDNode *SystemZDAGToDAGISel::Select(SDValue Op) {
EVT ResVT;
bool is32Bit = false;
switch (NVT.getSimpleVT()) {
switch (NVT.getSimpleVT().SimpleTy) {
default: assert(0 && "Unsupported VT!");
case EVT::i32:
case MVT::i32:
Opc = SystemZ::SDIVREM32r; MOpc = SystemZ::SDIVREM32m;
ResVT = EVT::v2i64;
ResVT = MVT::v2i64;
is32Bit = true;
break;
case EVT::i64:
case MVT::i64:
Opc = SystemZ::SDIVREM64r; MOpc = SystemZ::SDIVREM64m;
ResVT = EVT::v2i64;
ResVT = MVT::v2i64;
break;
}
@ -669,7 +669,7 @@ SDNode *SystemZDAGToDAGISel::Select(SDValue Op) {
// Prepare the dividend
SDNode *Dividend;
if (is32Bit)
Dividend = CurDAG->getTargetNode(SystemZ::MOVSX64rr32, dl, EVT::i64, N0);
Dividend = CurDAG->getTargetNode(SystemZ::MOVSX64rr32, dl, MVT::i64, N0);
else
Dividend = N0.getNode();
@ -679,7 +679,7 @@ SDNode *SystemZDAGToDAGISel::Select(SDValue Op) {
Dividend =
CurDAG->getTargetNode(TargetInstrInfo::INSERT_SUBREG, dl, ResVT,
SDValue(Tmp, 0), SDValue(Dividend, 0),
CurDAG->getTargetConstant(subreg_odd, EVT::i32));
CurDAG->getTargetConstant(subreg_odd, MVT::i32));
SDNode *Result;
SDValue DivVal = SDValue(Dividend, 0);
@ -699,7 +699,7 @@ SDNode *SystemZDAGToDAGISel::Select(SDValue Op) {
dl, NVT,
SDValue(Result, 0),
CurDAG->getTargetConstant(SubRegIdx,
EVT::i32));
MVT::i32));
ReplaceUses(Op.getValue(0), SDValue(Div, 0));
#ifndef NDEBUG
@ -716,7 +716,7 @@ SDNode *SystemZDAGToDAGISel::Select(SDValue Op) {
dl, NVT,
SDValue(Result, 0),
CurDAG->getTargetConstant(SubRegIdx,
EVT::i32));
MVT::i32));
ReplaceUses(Op.getValue(1), SDValue(Rem, 0));
#ifndef NDEBUG
@ -739,18 +739,18 @@ SDNode *SystemZDAGToDAGISel::Select(SDValue Op) {
EVT ResVT;
bool is32Bit = false;
switch (NVT.getSimpleVT()) {
switch (NVT.getSimpleVT().SimpleTy) {
default: assert(0 && "Unsupported VT!");
case EVT::i32:
case MVT::i32:
Opc = SystemZ::UDIVREM32r; MOpc = SystemZ::UDIVREM32m;
ClrOpc = SystemZ::MOV64Pr0_even;
ResVT = EVT::v2i32;
ResVT = MVT::v2i32;
is32Bit = true;
break;
case EVT::i64:
case MVT::i64:
Opc = SystemZ::UDIVREM64r; MOpc = SystemZ::UDIVREM64m;
ClrOpc = SystemZ::MOV128r0_even;
ResVT = EVT::v2i64;
ResVT = MVT::v2i64;
break;
}
@ -768,7 +768,7 @@ SDNode *SystemZDAGToDAGISel::Select(SDValue Op) {
Dividend =
CurDAG->getTargetNode(TargetInstrInfo::INSERT_SUBREG, dl, ResVT,
SDValue(Tmp, 0), SDValue(Dividend, 0),
CurDAG->getTargetConstant(SubRegIdx, EVT::i32));
CurDAG->getTargetConstant(SubRegIdx, MVT::i32));
}
// Zero out even subreg
@ -793,7 +793,7 @@ SDNode *SystemZDAGToDAGISel::Select(SDValue Op) {
dl, NVT,
SDValue(Result, 0),
CurDAG->getTargetConstant(SubRegIdx,
EVT::i32));
MVT::i32));
ReplaceUses(Op.getValue(0), SDValue(Div, 0));
#ifndef NDEBUG
DOUT << std::string(Indent-2, ' ') << "=> ";
@ -809,7 +809,7 @@ SDNode *SystemZDAGToDAGISel::Select(SDValue Op) {
dl, NVT,
SDValue(Result, 0),
CurDAG->getTargetConstant(SubRegIdx,
EVT::i32));
MVT::i32));
ReplaceUses(Op.getValue(1), SDValue(Rem, 0));
#ifndef NDEBUG
DOUT << std::string(Indent-2, ' ') << "=> ";

View File

@ -44,14 +44,14 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm) :
RegInfo = TM.getRegisterInfo();
// Set up the register classes.
addRegisterClass(EVT::i32, SystemZ::GR32RegisterClass);
addRegisterClass(EVT::i64, SystemZ::GR64RegisterClass);
addRegisterClass(EVT::v2i32,SystemZ::GR64PRegisterClass);
addRegisterClass(EVT::v2i64,SystemZ::GR128RegisterClass);
addRegisterClass(MVT::i32, SystemZ::GR32RegisterClass);
addRegisterClass(MVT::i64, SystemZ::GR64RegisterClass);
addRegisterClass(MVT::v2i32,SystemZ::GR64PRegisterClass);
addRegisterClass(MVT::v2i64,SystemZ::GR128RegisterClass);
if (!UseSoftFloat) {
addRegisterClass(EVT::f32, SystemZ::FP32RegisterClass);
addRegisterClass(EVT::f64, SystemZ::FP64RegisterClass);
addRegisterClass(MVT::f32, SystemZ::FP32RegisterClass);
addRegisterClass(MVT::f64, SystemZ::FP64RegisterClass);
addLegalFPImmediate(APFloat(+0.0)); // lzer
addLegalFPImmediate(APFloat(+0.0f)); // lzdr
@ -63,92 +63,92 @@ SystemZTargetLowering::SystemZTargetLowering(SystemZTargetMachine &tm) :
computeRegisterProperties();
// Set shifts properties
setShiftAmountType(EVT::i64);
setShiftAmountType(MVT::i64);
// Provide all sorts of operation actions
setLoadExtAction(ISD::SEXTLOAD, EVT::i1, Promote);
setLoadExtAction(ISD::ZEXTLOAD, EVT::i1, Promote);
setLoadExtAction(ISD::EXTLOAD, EVT::i1, Promote);
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
setLoadExtAction(ISD::SEXTLOAD, EVT::f32, Expand);
setLoadExtAction(ISD::ZEXTLOAD, EVT::f32, Expand);
setLoadExtAction(ISD::EXTLOAD, EVT::f32, Expand);
setLoadExtAction(ISD::SEXTLOAD, MVT::f32, Expand);
setLoadExtAction(ISD::ZEXTLOAD, MVT::f32, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
setLoadExtAction(ISD::SEXTLOAD, EVT::f64, Expand);
setLoadExtAction(ISD::ZEXTLOAD, EVT::f64, Expand);
setLoadExtAction(ISD::EXTLOAD, EVT::f64, Expand);
setLoadExtAction(ISD::SEXTLOAD, MVT::f64, Expand);
setLoadExtAction(ISD::ZEXTLOAD, MVT::f64, Expand);
setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand);
setStackPointerRegisterToSaveRestore(SystemZ::R15D);
setSchedulingPreference(SchedulingForLatency);
setBooleanContents(ZeroOrOneBooleanContent);
setOperationAction(ISD::BR_JT, EVT::Other, Expand);
setOperationAction(ISD::BRCOND, EVT::Other, Expand);
setOperationAction(ISD::BR_CC, EVT::i32, Custom);
setOperationAction(ISD::BR_CC, EVT::i64, Custom);
setOperationAction(ISD::BR_CC, EVT::f32, Custom);
setOperationAction(ISD::BR_CC, EVT::f64, Custom);
setOperationAction(ISD::ConstantPool, EVT::i32, Custom);
setOperationAction(ISD::ConstantPool, EVT::i64, Custom);
setOperationAction(ISD::GlobalAddress, EVT::i64, Custom);
setOperationAction(ISD::JumpTable, EVT::i64, Custom);
setOperationAction(ISD::DYNAMIC_STACKALLOC, EVT::i64, Expand);
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
setOperationAction(ISD::BRCOND, MVT::Other, Expand);
setOperationAction(ISD::BR_CC, MVT::i32, Custom);
setOperationAction(ISD::BR_CC, MVT::i64, Custom);
setOperationAction(ISD::BR_CC, MVT::f32, Custom);
setOperationAction(ISD::BR_CC, MVT::f64, Custom);
setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
setOperationAction(ISD::JumpTable, MVT::i64, Custom);
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
setOperationAction(ISD::SDIV, EVT::i32, Expand);
setOperationAction(ISD::UDIV, EVT::i32, Expand);
setOperationAction(ISD::SDIV, EVT::i64, Expand);
setOperationAction(ISD::UDIV, EVT::i64, Expand);
setOperationAction(ISD::SREM, EVT::i32, Expand);
setOperationAction(ISD::UREM, EVT::i32, Expand);
setOperationAction(ISD::SREM, EVT::i64, Expand);
setOperationAction(ISD::UREM, EVT::i64, Expand);
setOperationAction(ISD::SDIV, MVT::i32, Expand);
setOperationAction(ISD::UDIV, MVT::i32, Expand);
setOperationAction(ISD::SDIV, MVT::i64, Expand);
setOperationAction(ISD::UDIV, MVT::i64, Expand);
setOperationAction(ISD::SREM, MVT::i32, Expand);
setOperationAction(ISD::UREM, MVT::i32, Expand);
setOperationAction(ISD::SREM, MVT::i64, Expand);
setOperationAction(ISD::UREM, MVT::i64, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, EVT::i1, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
setOperationAction(ISD::CTPOP, EVT::i32, Expand);
setOperationAction(ISD::CTPOP, EVT::i64, Expand);
setOperationAction(ISD::CTTZ, EVT::i32, Expand);
setOperationAction(ISD::CTTZ, EVT::i64, Expand);
setOperationAction(ISD::CTLZ, EVT::i32, Promote);
setOperationAction(ISD::CTLZ, EVT::i64, Legal);
setOperationAction(ISD::CTPOP, MVT::i32, Expand);
setOperationAction(ISD::CTPOP, MVT::i64, Expand);
setOperationAction(ISD::CTTZ, MVT::i32, Expand);
setOperationAction(ISD::CTTZ, MVT::i64, Expand);
setOperationAction(ISD::CTLZ, MVT::i32, Promote);
setOperationAction(ISD::CTLZ, MVT::i64, Legal);
// FIXME: Can we lower these 2 efficiently?
setOperationAction(ISD::SETCC, EVT::i32, Expand);
setOperationAction(ISD::SETCC, EVT::i64, Expand);
setOperationAction(ISD::SETCC, EVT::f32, Expand);
setOperationAction(ISD::SETCC, EVT::f64, Expand);
setOperationAction(ISD::SELECT, EVT::i32, Expand);
setOperationAction(ISD::SELECT, EVT::i64, Expand);
setOperationAction(ISD::SELECT, EVT::f32, Expand);
setOperationAction(ISD::SELECT, EVT::f64, Expand);
setOperationAction(ISD::SELECT_CC, EVT::i32, Custom);
setOperationAction(ISD::SELECT_CC, EVT::i64, Custom);
setOperationAction(ISD::SELECT_CC, EVT::f32, Custom);
setOperationAction(ISD::SELECT_CC, EVT::f64, Custom);
setOperationAction(ISD::SETCC, MVT::i32, Expand);
setOperationAction(ISD::SETCC, MVT::i64, Expand);
setOperationAction(ISD::SETCC, MVT::f32, Expand);
setOperationAction(ISD::SETCC, MVT::f64, Expand);
setOperationAction(ISD::SELECT, MVT::i32, Expand);
setOperationAction(ISD::SELECT, MVT::i64, Expand);
setOperationAction(ISD::SELECT, MVT::f32, Expand);
setOperationAction(ISD::SELECT, MVT::f64, Expand);
setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
// Funny enough: we don't have 64-bit signed versions of these stuff, but have
// unsigned.
setOperationAction(ISD::MULHS, EVT::i64, Expand);
setOperationAction(ISD::SMUL_LOHI, EVT::i64, Expand);
setOperationAction(ISD::MULHS, MVT::i64, Expand);
setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
// Lower some FP stuff
setOperationAction(ISD::FSIN, EVT::f32, Expand);
setOperationAction(ISD::FSIN, EVT::f64, Expand);
setOperationAction(ISD::FCOS, EVT::f32, Expand);
setOperationAction(ISD::FCOS, EVT::f64, Expand);
setOperationAction(ISD::FREM, EVT::f32, Expand);
setOperationAction(ISD::FREM, EVT::f64, Expand);
setOperationAction(ISD::FSIN, MVT::f32, Expand);
setOperationAction(ISD::FSIN, MVT::f64, Expand);
setOperationAction(ISD::FCOS, MVT::f32, Expand);
setOperationAction(ISD::FCOS, MVT::f64, Expand);
setOperationAction(ISD::FREM, MVT::f32, Expand);
setOperationAction(ISD::FREM, MVT::f64, Expand);
// We have only 64-bit bitconverts
setOperationAction(ISD::BIT_CONVERT, EVT::f32, Expand);
setOperationAction(ISD::BIT_CONVERT, EVT::i32, Expand);
setOperationAction(ISD::BIT_CONVERT, MVT::f32, Expand);
setOperationAction(ISD::BIT_CONVERT, MVT::i32, Expand);
setOperationAction(ISD::UINT_TO_FP, EVT::i32, Expand);
setOperationAction(ISD::UINT_TO_FP, EVT::i64, Expand);
setOperationAction(ISD::FP_TO_UINT, EVT::i32, Expand);
setOperationAction(ISD::FP_TO_UINT, EVT::i64, Expand);
setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
setTruncStoreAction(EVT::f64, EVT::f32, Expand);
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
}
SDValue SystemZTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) {
@ -242,21 +242,21 @@ SystemZTargetLowering::LowerCCCArguments(SDValue Chain,
if (VA.isRegLoc()) {
// Arguments passed in registers
TargetRegisterClass *RC;
switch (LocVT.getSimpleVT()) {
switch (LocVT.getSimpleVT().SimpleTy) {
default:
#ifndef NDEBUG
cerr << "LowerFormalArguments Unhandled argument type: "
<< LocVT.getSimpleVT()
<< LocVT.getSimpleVT().SimpleTy
<< "\n";
#endif
llvm_unreachable(0);
case EVT::i64:
case MVT::i64:
RC = SystemZ::GR64RegisterClass;
break;
case EVT::f32:
case MVT::f32:
RC = SystemZ::FP32RegisterClass;
break;
case EVT::f64:
case MVT::f64:
RC = SystemZ::FP64RegisterClass;
break;
}
@ -382,7 +382,7 @@ SystemZTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
// Transform all store nodes into one single node because all store nodes are
// independent of each other.
if (!MemOpChains.empty())
Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&MemOpChains[0], MemOpChains.size());
// Build a sequence of copy-to-reg nodes chained together with token chain and
@ -404,7 +404,7 @@ SystemZTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
Callee = DAG.getTargetExternalSymbol(E->getSymbol(), getPointerTy());
// Returns a chain & a flag for retval copy to use.
SDVTList NodeTys = DAG.getVTList(EVT::Other, EVT::Flag);
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
SmallVector<SDValue, 8> Ops;
Ops.push_back(Chain);
Ops.push_back(Callee);
@ -530,10 +530,10 @@ SystemZTargetLowering::LowerReturn(SDValue Chain,
}
if (Flag.getNode())
return DAG.getNode(SystemZISD::RET_FLAG, dl, EVT::Other, Chain, Flag);
return DAG.getNode(SystemZISD::RET_FLAG, dl, MVT::Other, Chain, Flag);
// Return Void
return DAG.getNode(SystemZISD::RET_FLAG, dl, EVT::Other, Chain);
return DAG.getNode(SystemZISD::RET_FLAG, dl, MVT::Other, Chain);
}
SDValue SystemZTargetLowering::EmitCmp(SDValue LHS, SDValue RHS,
@ -608,11 +608,11 @@ SDValue SystemZTargetLowering::EmitCmp(SDValue LHS, SDValue RHS,
break;
}
SystemZCC = DAG.getConstant(TCC, EVT::i32);
SystemZCC = DAG.getConstant(TCC, MVT::i32);
DebugLoc dl = LHS.getDebugLoc();
return DAG.getNode((isUnsigned ? SystemZISD::UCMP : SystemZISD::CMP),
dl, EVT::Flag, LHS, RHS);
dl, MVT::Flag, LHS, RHS);
}
@ -641,7 +641,7 @@ SDValue SystemZTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) {
SDValue SystemZCC;
SDValue Flag = EmitCmp(LHS, RHS, CC, SystemZCC, DAG);
SDVTList VTs = DAG.getVTList(Op.getValueType(), EVT::Flag);
SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Flag);
SmallVector<SDValue, 4> Ops;
Ops.push_back(TrueV);
Ops.push_back(FalseV);

View File

@ -134,10 +134,10 @@ def i64hh16c : PatLeaf<(i64 imm), [{
def immSExt16 : PatLeaf<(imm), [{
// immSExt16 predicate - true if the immediate fits in a 16-bit sign extended
// field.
if (N->getValueType(0) == EVT::i64) {
if (N->getValueType(0) == MVT::i64) {
uint64_t val = N->getZExtValue();
return ((int64_t)val == (int16_t)val);
} else if (N->getValueType(0) == EVT::i32) {
} else if (N->getValueType(0) == MVT::i32) {
uint32_t val = N->getZExtValue();
return ((int32_t)val == (int16_t)val);
}

View File

@ -51,7 +51,7 @@ TargetRegisterInfo::getPhysicalRegisterRegClass(unsigned reg, EVT VT) const {
const TargetRegisterClass* BestRC = 0;
for (regclass_iterator I = regclass_begin(), E = regclass_end(); I != E; ++I){
const TargetRegisterClass* RC = *I;
if ((VT == EVT::Other || RC->hasType(VT)) && RC->contains(reg) &&
if ((VT == MVT::Other || RC->hasType(VT)) && RC->contains(reg) &&
(!BestRC || BestRC->hasSuperClass(RC)))
BestRC = RC;
}

View File

@ -426,8 +426,8 @@ void X86ATTAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
unsigned Reg = MO.getReg();
if (Modifier && strncmp(Modifier, "subreg", strlen("subreg")) == 0) {
EVT VT = (strcmp(Modifier+6,"64") == 0) ?
EVT::i64 : ((strcmp(Modifier+6, "32") == 0) ? EVT::i32 :
((strcmp(Modifier+6,"16") == 0) ? EVT::i16 : EVT::i8));
MVT::i64 : ((strcmp(Modifier+6, "32") == 0) ? MVT::i32 :
((strcmp(Modifier+6,"16") == 0) ? MVT::i16 : MVT::i8));
Reg = getX86SubSuperRegister(Reg, VT);
}
O << TRI->getAsmName(Reg);
@ -573,19 +573,19 @@ bool X86ATTAsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode) {
switch (Mode) {
default: return true; // Unknown mode.
case 'b': // Print QImode register
Reg = getX86SubSuperRegister(Reg, EVT::i8);
Reg = getX86SubSuperRegister(Reg, MVT::i8);
break;
case 'h': // Print QImode high register
Reg = getX86SubSuperRegister(Reg, EVT::i8, true);
Reg = getX86SubSuperRegister(Reg, MVT::i8, true);
break;
case 'w': // Print HImode register
Reg = getX86SubSuperRegister(Reg, EVT::i16);
Reg = getX86SubSuperRegister(Reg, MVT::i16);
break;
case 'k': // Print SImode register
Reg = getX86SubSuperRegister(Reg, EVT::i32);
Reg = getX86SubSuperRegister(Reg, MVT::i32);
break;
case 'q': // Print DImode register
Reg = getX86SubSuperRegister(Reg, EVT::i64);
Reg = getX86SubSuperRegister(Reg, MVT::i64);
break;
}
@ -685,7 +685,7 @@ static void lower_lea64_32mem(MCInst *MI, unsigned OpNo) {
unsigned Reg = MI->getOperand(i).getReg();
if (Reg == 0) continue;
MI->getOperand(i).setReg(getX86SubSuperRegister(Reg, EVT::i64));
MI->getOperand(i).setReg(getX86SubSuperRegister(Reg, MVT::i64));
}
}

View File

@ -210,8 +210,8 @@ void X86IntelAsmPrinter::printOp(const MachineOperand &MO,
unsigned Reg = MO.getReg();
if (Modifier && strncmp(Modifier, "subreg", strlen("subreg")) == 0) {
EVT VT = (strcmp(Modifier,"subreg64") == 0) ?
EVT::i64 : ((strcmp(Modifier, "subreg32") == 0) ? EVT::i32 :
((strcmp(Modifier,"subreg16") == 0) ? EVT::i16 :EVT::i8));
MVT::i64 : ((strcmp(Modifier, "subreg32") == 0) ? MVT::i32 :
((strcmp(Modifier,"subreg16") == 0) ? MVT::i16 :MVT::i8));
Reg = getX86SubSuperRegister(Reg, VT);
}
O << TRI->getName(Reg);
@ -376,16 +376,16 @@ bool X86IntelAsmPrinter::printAsmMRegister(const MachineOperand &MO,
switch (Mode) {
default: return true; // Unknown mode.
case 'b': // Print QImode register
Reg = getX86SubSuperRegister(Reg, EVT::i8);
Reg = getX86SubSuperRegister(Reg, MVT::i8);
break;
case 'h': // Print QImode high register
Reg = getX86SubSuperRegister(Reg, EVT::i8, true);
Reg = getX86SubSuperRegister(Reg, MVT::i8, true);
break;
case 'w': // Print HImode register
Reg = getX86SubSuperRegister(Reg, EVT::i16);
Reg = getX86SubSuperRegister(Reg, MVT::i16);
break;
case 'k': // Print SImode register
Reg = getX86SubSuperRegister(Reg, EVT::i32);
Reg = getX86SubSuperRegister(Reg, MVT::i32);
break;
}

View File

@ -134,8 +134,8 @@ private:
/// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
/// computed in an SSE register, not on the X87 floating point stack.
bool isScalarFPTypeInSSEReg(EVT VT) const {
return (VT == EVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
(VT == EVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
(VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
}
bool isTypeLegal(const Type *Ty, EVT &VT, bool AllowI1 = false);
@ -145,24 +145,24 @@ private:
bool X86FastISel::isTypeLegal(const Type *Ty, EVT &VT, bool AllowI1) {
VT = TLI.getValueType(Ty, /*HandleUnknown=*/true);
if (VT == EVT::Other || !VT.isSimple())
if (VT == MVT::Other || !VT.isSimple())
// Unhandled type. Halt "fast" selection and bail.
return false;
// For now, require SSE/SSE2 for performing floating-point operations,
// since x87 requires additional work.
if (VT == EVT::f64 && !X86ScalarSSEf64)
if (VT == MVT::f64 && !X86ScalarSSEf64)
return false;
if (VT == EVT::f32 && !X86ScalarSSEf32)
if (VT == MVT::f32 && !X86ScalarSSEf32)
return false;
// Similarly, no f80 support yet.
if (VT == EVT::f80)
if (VT == MVT::f80)
return false;
// We only handle legal types. For example, on x86-32 the instruction
// selector contains all of the 64-bit instructions from x86-64,
// under the assumption that i64 won't be used if the target doesn't
// support it.
return (AllowI1 && VT == EVT::i1) || TLI.isTypeLegal(VT);
return (AllowI1 && VT == MVT::i1) || TLI.isTypeLegal(VT);
}
#include "X86GenCallingConv.inc"
@ -193,26 +193,26 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM,
// Get opcode and regclass of the output for the given load instruction.
unsigned Opc = 0;
const TargetRegisterClass *RC = NULL;
switch (VT.getSimpleVT()) {
switch (VT.getSimpleVT().SimpleTy) {
default: return false;
case EVT::i8:
case MVT::i8:
Opc = X86::MOV8rm;
RC = X86::GR8RegisterClass;
break;
case EVT::i16:
case MVT::i16:
Opc = X86::MOV16rm;
RC = X86::GR16RegisterClass;
break;
case EVT::i32:
case MVT::i32:
Opc = X86::MOV32rm;
RC = X86::GR32RegisterClass;
break;
case EVT::i64:
case MVT::i64:
// Must be in x86-64 mode.
Opc = X86::MOV64rm;
RC = X86::GR64RegisterClass;
break;
case EVT::f32:
case MVT::f32:
if (Subtarget->hasSSE1()) {
Opc = X86::MOVSSrm;
RC = X86::FR32RegisterClass;
@ -221,7 +221,7 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM,
RC = X86::RFP32RegisterClass;
}
break;
case EVT::f64:
case MVT::f64:
if (Subtarget->hasSSE2()) {
Opc = X86::MOVSDrm;
RC = X86::FR64RegisterClass;
@ -230,7 +230,7 @@ bool X86FastISel::X86FastEmitLoad(EVT VT, const X86AddressMode &AM,
RC = X86::RFP64RegisterClass;
}
break;
case EVT::f80:
case MVT::f80:
// No f80 support yet.
return false;
}
@ -249,17 +249,17 @@ X86FastISel::X86FastEmitStore(EVT VT, unsigned Val,
const X86AddressMode &AM) {
// Get opcode and regclass of the output for the given store instruction.
unsigned Opc = 0;
switch (VT.getSimpleVT()) {
case EVT::f80: // No f80 support yet.
switch (VT.getSimpleVT().SimpleTy) {
case MVT::f80: // No f80 support yet.
default: return false;
case EVT::i8: Opc = X86::MOV8mr; break;
case EVT::i16: Opc = X86::MOV16mr; break;
case EVT::i32: Opc = X86::MOV32mr; break;
case EVT::i64: Opc = X86::MOV64mr; break; // Must be in x86-64 mode.
case EVT::f32:
case MVT::i8: Opc = X86::MOV8mr; break;
case MVT::i16: Opc = X86::MOV16mr; break;
case MVT::i32: Opc = X86::MOV32mr; break;
case MVT::i64: Opc = X86::MOV64mr; break; // Must be in x86-64 mode.
case MVT::f32:
Opc = Subtarget->hasSSE1() ? X86::MOVSSmr : X86::ST_Fp32m;
break;
case EVT::f64:
case MVT::f64:
Opc = Subtarget->hasSSE2() ? X86::MOVSDmr : X86::ST_Fp64m;
break;
}
@ -277,12 +277,12 @@ bool X86FastISel::X86FastEmitStore(EVT VT, Value *Val,
// If this is a store of a simple constant, fold the constant into the store.
if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
unsigned Opc = 0;
switch (VT.getSimpleVT()) {
switch (VT.getSimpleVT().SimpleTy) {
default: break;
case EVT::i8: Opc = X86::MOV8mi; break;
case EVT::i16: Opc = X86::MOV16mi; break;
case EVT::i32: Opc = X86::MOV32mi; break;
case EVT::i64:
case MVT::i8: Opc = X86::MOV8mi; break;
case MVT::i16: Opc = X86::MOV16mi; break;
case MVT::i32: Opc = X86::MOV32mi; break;
case MVT::i64:
// Must be a 32-bit sign extended value.
if ((int)CI->getSExtValue() == CI->getSExtValue())
Opc = X86::MOV64mi32;
@ -478,7 +478,7 @@ bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM) {
StubAM.GV = GV;
StubAM.GVOpFlags = GVFlags;
if (TLI.getPointerTy() == EVT::i64) {
if (TLI.getPointerTy() == MVT::i64) {
Opc = X86::MOV64rm;
RC = X86::GR64RegisterClass;
@ -636,14 +636,14 @@ bool X86FastISel::X86SelectLoad(Instruction *I) {
}
static unsigned X86ChooseCmpOpcode(EVT VT) {
switch (VT.getSimpleVT()) {
switch (VT.getSimpleVT().SimpleTy) {
default: return 0;
case EVT::i8: return X86::CMP8rr;
case EVT::i16: return X86::CMP16rr;
case EVT::i32: return X86::CMP32rr;
case EVT::i64: return X86::CMP64rr;
case EVT::f32: return X86::UCOMISSrr;
case EVT::f64: return X86::UCOMISDrr;
case MVT::i8: return X86::CMP8rr;
case MVT::i16: return X86::CMP16rr;
case MVT::i32: return X86::CMP32rr;
case MVT::i64: return X86::CMP64rr;
case MVT::f32: return X86::UCOMISSrr;
case MVT::f64: return X86::UCOMISDrr;
}
}
@ -651,13 +651,13 @@ static unsigned X86ChooseCmpOpcode(EVT VT) {
/// of the comparison, return an opcode that works for the compare (e.g.
/// CMP32ri) otherwise return 0.
static unsigned X86ChooseCmpImmediateOpcode(EVT VT, ConstantInt *RHSC) {
switch (VT.getSimpleVT()) {
switch (VT.getSimpleVT().SimpleTy) {
// Otherwise, we can't fold the immediate into this comparison.
default: return 0;
case EVT::i8: return X86::CMP8ri;
case EVT::i16: return X86::CMP16ri;
case EVT::i32: return X86::CMP32ri;
case EVT::i64:
case MVT::i8: return X86::CMP8ri;
case MVT::i16: return X86::CMP16ri;
case MVT::i32: return X86::CMP32ri;
case MVT::i64:
// 64-bit comparisons are only valid if the immediate fits in a 32-bit sext
// field.
if ((int)RHSC->getSExtValue() == RHSC->getSExtValue())
@ -778,7 +778,7 @@ bool X86FastISel::X86SelectZExt(Instruction *I) {
unsigned ResultReg = getRegForValue(I->getOperand(0));
if (ResultReg == 0) return false;
// Set the high bits to zero.
ResultReg = FastEmitZExtFromI1(EVT::i8, ResultReg);
ResultReg = FastEmitZExtFromI1(MVT::i8, ResultReg);
if (ResultReg == 0) return false;
UpdateValueMap(I, ResultReg);
return true;
@ -976,7 +976,7 @@ bool X86FastISel::X86SelectShift(Instruction *I) {
}
EVT VT = TLI.getValueType(I->getType(), /*HandleUnknown=*/true);
if (VT == EVT::Other || !isTypeLegal(I->getType(), VT))
if (VT == MVT::Other || !isTypeLegal(I->getType(), VT))
return false;
unsigned Op0Reg = getRegForValue(I->getOperand(0));
@ -1010,18 +1010,18 @@ bool X86FastISel::X86SelectShift(Instruction *I) {
bool X86FastISel::X86SelectSelect(Instruction *I) {
EVT VT = TLI.getValueType(I->getType(), /*HandleUnknown=*/true);
if (VT == EVT::Other || !isTypeLegal(I->getType(), VT))
if (VT == MVT::Other || !isTypeLegal(I->getType(), VT))
return false;
unsigned Opc = 0;
const TargetRegisterClass *RC = NULL;
if (VT.getSimpleVT() == EVT::i16) {
if (VT.getSimpleVT() == MVT::i16) {
Opc = X86::CMOVE16rr;
RC = &X86::GR16RegClass;
} else if (VT.getSimpleVT() == EVT::i32) {
} else if (VT.getSimpleVT() == MVT::i32) {
Opc = X86::CMOVE32rr;
RC = &X86::GR32RegClass;
} else if (VT.getSimpleVT() == EVT::i64) {
} else if (VT.getSimpleVT() == MVT::i64) {
Opc = X86::CMOVE64rr;
RC = &X86::GR64RegClass;
} else {
@ -1085,10 +1085,10 @@ bool X86FastISel::X86SelectTrunc(Instruction *I) {
EVT DstVT = TLI.getValueType(I->getType());
// This code only handles truncation to byte right now.
if (DstVT != EVT::i8 && DstVT != EVT::i1)
if (DstVT != MVT::i8 && DstVT != MVT::i1)
// All other cases should be handled by the tblgen generated code.
return false;
if (SrcVT != EVT::i16 && SrcVT != EVT::i32)
if (SrcVT != MVT::i16 && SrcVT != MVT::i32)
// All other cases should be handled by the tblgen generated code.
return false;
@ -1098,14 +1098,14 @@ bool X86FastISel::X86SelectTrunc(Instruction *I) {
return false;
// First issue a copy to GR16_ABCD or GR32_ABCD.
unsigned CopyOpc = (SrcVT == EVT::i16) ? X86::MOV16rr : X86::MOV32rr;
const TargetRegisterClass *CopyRC = (SrcVT == EVT::i16)
unsigned CopyOpc = (SrcVT == MVT::i16) ? X86::MOV16rr : X86::MOV32rr;
const TargetRegisterClass *CopyRC = (SrcVT == MVT::i16)
? X86::GR16_ABCDRegisterClass : X86::GR32_ABCDRegisterClass;
unsigned CopyReg = createResultReg(CopyRC);
BuildMI(MBB, DL, TII.get(CopyOpc), CopyReg).addReg(InputReg);
// Then issue an extract_subreg.
unsigned ResultReg = FastEmitInst_extractsubreg(EVT::i8,
unsigned ResultReg = FastEmitInst_extractsubreg(MVT::i8,
CopyReg, X86::SUBREG_8BIT);
if (!ResultReg)
return false;
@ -1164,9 +1164,9 @@ bool X86FastISel::X86VisitIntrinsicCall(IntrinsicInst &I) {
return false;
unsigned OpC = 0;
if (VT == EVT::i32)
if (VT == MVT::i32)
OpC = X86::ADD32rr;
else if (VT == EVT::i64)
else if (VT == MVT::i64)
OpC = X86::ADD64rr;
else
return false;
@ -1185,7 +1185,7 @@ bool X86FastISel::X86VisitIntrinsicCall(IntrinsicInst &I) {
if (DestReg1 != ResultReg)
ResultReg = DestReg1+1;
else
ResultReg = createResultReg(TLI.getRegClassFor(EVT::i8));
ResultReg = createResultReg(TLI.getRegClassFor(MVT::i8));
unsigned Opc = X86::SETBr;
if (I.getIntrinsicID() == Intrinsic::sadd_with_overflow)
@ -1231,7 +1231,7 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
const Type *RetTy = CS.getType();
EVT RetVT;
if (RetTy == Type::VoidTy)
RetVT = EVT::isVoid;
RetVT = MVT::isVoid;
else if (!isTypeLegal(RetTy, RetVT, true))
return false;
@ -1251,8 +1251,8 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
// Allow calls which produce i1 results.
bool AndToI1 = false;
if (RetVT == EVT::i1) {
RetVT = EVT::i8;
if (RetVT == MVT::i1) {
RetVT = MVT::i8;
AndToI1 = true;
}
@ -1445,7 +1445,7 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
BuildMI(MBB, DL, TII.get(AdjStackUp)).addImm(NumBytes).addImm(0);
// Now handle call return value (if any).
if (RetVT.getSimpleVT() != EVT::isVoid) {
if (RetVT.getSimpleVT().SimpleTy != MVT::isVoid) {
SmallVector<CCValAssign, 16> RVLocs;
CCState CCInfo(CC, false, TM, RVLocs, I->getParent()->getContext());
CCInfo.AnalyzeCallResult(RetVT, RetCC_X86);
@ -1462,7 +1462,7 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
if ((RVLocs[0].getLocReg() == X86::ST0 ||
RVLocs[0].getLocReg() == X86::ST1) &&
isScalarFPTypeInSSEReg(RVLocs[0].getValVT())) {
CopyVT = EVT::f80;
CopyVT = MVT::f80;
SrcRC = X86::RSTRegisterClass;
DstRC = X86::RFP80RegisterClass;
}
@ -1477,13 +1477,13 @@ bool X86FastISel::X86SelectCall(Instruction *I) {
// register. This is accomplished by storing the F80 value in memory and
// then loading it back. Ewww...
EVT ResVT = RVLocs[0].getValVT();
unsigned Opc = ResVT == EVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
unsigned Opc = ResVT == MVT::f32 ? X86::ST_Fp80m32 : X86::ST_Fp80m64;
unsigned MemSize = ResVT.getSizeInBits()/8;
int FI = MFI.CreateStackObject(MemSize, MemSize);
addFrameReference(BuildMI(MBB, DL, TII.get(Opc)), FI).addReg(ResultReg);
DstRC = ResVT == EVT::f32
DstRC = ResVT == MVT::f32
? X86::FR32RegisterClass : X86::FR64RegisterClass;
Opc = ResVT == EVT::f32 ? X86::MOVSSrm : X86::MOVSDrm;
Opc = ResVT == MVT::f32 ? X86::MOVSSrm : X86::MOVSDrm;
ResultReg = createResultReg(DstRC);
addFrameReference(BuildMI(MBB, DL, TII.get(Opc), ResultReg), FI);
}
@ -1560,26 +1560,26 @@ unsigned X86FastISel::TargetMaterializeConstant(Constant *C) {
// Get opcode and regclass of the output for the given load instruction.
unsigned Opc = 0;
const TargetRegisterClass *RC = NULL;
switch (VT.getSimpleVT()) {
switch (VT.getSimpleVT().SimpleTy) {
default: return false;
case EVT::i8:
case MVT::i8:
Opc = X86::MOV8rm;
RC = X86::GR8RegisterClass;
break;
case EVT::i16:
case MVT::i16:
Opc = X86::MOV16rm;
RC = X86::GR16RegisterClass;
break;
case EVT::i32:
case MVT::i32:
Opc = X86::MOV32rm;
RC = X86::GR32RegisterClass;
break;
case EVT::i64:
case MVT::i64:
// Must be in x86-64 mode.
Opc = X86::MOV64rm;
RC = X86::GR64RegisterClass;
break;
case EVT::f32:
case MVT::f32:
if (Subtarget->hasSSE1()) {
Opc = X86::MOVSSrm;
RC = X86::FR32RegisterClass;
@ -1588,7 +1588,7 @@ unsigned X86FastISel::TargetMaterializeConstant(Constant *C) {
RC = X86::RFP32RegisterClass;
}
break;
case EVT::f64:
case MVT::f64:
if (Subtarget->hasSSE2()) {
Opc = X86::MOVSDrm;
RC = X86::FR64RegisterClass;
@ -1597,7 +1597,7 @@ unsigned X86FastISel::TargetMaterializeConstant(Constant *C) {
RC = X86::RFP64RegisterClass;
}
break;
case EVT::f80:
case MVT::f80:
// No f80 support yet.
return false;
}
@ -1606,7 +1606,7 @@ unsigned X86FastISel::TargetMaterializeConstant(Constant *C) {
if (isa<GlobalValue>(C)) {
X86AddressMode AM;
if (X86SelectAddress(C, AM)) {
if (TLI.getPointerTy() == EVT::i32)
if (TLI.getPointerTy() == MVT::i32)
Opc = X86::LEA32r;
else
Opc = X86::LEA64r;

View File

@ -233,40 +233,40 @@ namespace {
// These are 32-bit even in 64-bit mode since RIP relative offset
// is 32-bit.
if (AM.GV)
Disp = CurDAG->getTargetGlobalAddress(AM.GV, EVT::i32, AM.Disp,
Disp = CurDAG->getTargetGlobalAddress(AM.GV, MVT::i32, AM.Disp,
AM.SymbolFlags);
else if (AM.CP)
Disp = CurDAG->getTargetConstantPool(AM.CP, EVT::i32,
Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
AM.Align, AM.Disp, AM.SymbolFlags);
else if (AM.ES)
Disp = CurDAG->getTargetExternalSymbol(AM.ES, EVT::i32, AM.SymbolFlags);
Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
else if (AM.JT != -1)
Disp = CurDAG->getTargetJumpTable(AM.JT, EVT::i32, AM.SymbolFlags);
Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
else
Disp = CurDAG->getTargetConstant(AM.Disp, EVT::i32);
Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32);
if (AM.Segment.getNode())
Segment = AM.Segment;
else
Segment = CurDAG->getRegister(0, EVT::i32);
Segment = CurDAG->getRegister(0, MVT::i32);
}
/// getI8Imm - Return a target constant with the specified value, of type
/// i8.
inline SDValue getI8Imm(unsigned Imm) {
return CurDAG->getTargetConstant(Imm, EVT::i8);
return CurDAG->getTargetConstant(Imm, MVT::i8);
}
/// getI16Imm - Return a target constant with the specified value, of type
/// i16.
inline SDValue getI16Imm(unsigned Imm) {
return CurDAG->getTargetConstant(Imm, EVT::i16);
return CurDAG->getTargetConstant(Imm, MVT::i16);
}
/// getI32Imm - Return a target constant with the specified value, of type
/// i32.
inline SDValue getI32Imm(unsigned Imm) {
return CurDAG->getTargetConstant(Imm, EVT::i32);
return CurDAG->getTargetConstant(Imm, MVT::i32);
}
/// getGlobalBaseReg - Return an SDNode that returns the value of
@ -408,7 +408,7 @@ static void MoveBelowCallSeqStart(SelectionDAG *CurDAG, SDValue Load,
Ops.push_back(Chain.getOperand(i));
SDValue NewChain =
CurDAG->getNode(ISD::TokenFactor, Load.getDebugLoc(),
EVT::Other, &Ops[0], Ops.size());
MVT::Other, &Ops[0], Ops.size());
Ops.clear();
Ops.push_back(NewChain);
}
@ -764,7 +764,7 @@ bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
}
if (N.getOpcode() == X86ISD::WrapperRIP)
AM.setBaseReg(CurDAG->getRegister(X86::RIP, EVT::i64));
AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
return false;
}
@ -1001,7 +1001,7 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
RHS.getNode()->getOpcode() == ISD::TRUNCATE ||
RHS.getNode()->getOpcode() == ISD::ANY_EXTEND ||
(RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND &&
RHS.getNode()->getOperand(0).getValueType() == EVT::i32))
RHS.getNode()->getOperand(0).getValueType() == MVT::i32))
++Cost;
// If the base is a register with multiple uses, this
// transformation may save a mov.
@ -1111,13 +1111,13 @@ bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
unsigned ScaleLog = 8 - C1->getZExtValue();
if (ScaleLog > 0 && ScaleLog < 4 &&
C2->getZExtValue() == (UINT64_C(0xff) << ScaleLog)) {
SDValue Eight = CurDAG->getConstant(8, EVT::i8);
SDValue Eight = CurDAG->getConstant(8, MVT::i8);
SDValue Mask = CurDAG->getConstant(0xff, N.getValueType());
SDValue Srl = CurDAG->getNode(ISD::SRL, dl, N.getValueType(),
X, Eight);
SDValue And = CurDAG->getNode(ISD::AND, dl, N.getValueType(),
Srl, Mask);
SDValue ShlCount = CurDAG->getConstant(ScaleLog, EVT::i8);
SDValue ShlCount = CurDAG->getConstant(ScaleLog, MVT::i8);
SDValue Shl = CurDAG->getNode(ISD::SHL, dl, N.getValueType(),
And, ShlCount);
@ -1333,7 +1333,7 @@ bool X86DAGToDAGISel::SelectLEAAddr(SDValue Op, SDValue N,
// Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
// segments.
SDValue Copy = AM.Segment;
SDValue T = CurDAG->getRegister(0, EVT::i32);
SDValue T = CurDAG->getRegister(0, MVT::i32);
AM.Segment = T;
if (MatchAddress(N, AM))
return false;
@ -1400,11 +1400,11 @@ bool X86DAGToDAGISel::SelectTLSADDRAddr(SDValue Op, SDValue N, SDValue &Base,
AM.Base.Reg = CurDAG->getRegister(0, N.getValueType());
AM.SymbolFlags = GA->getTargetFlags();
if (N.getValueType() == EVT::i32) {
if (N.getValueType() == MVT::i32) {
AM.Scale = 1;
AM.IndexReg = CurDAG->getRegister(X86::EBX, EVT::i32);
AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32);
} else {
AM.IndexReg = CurDAG->getRegister(0, EVT::i64);
AM.IndexReg = CurDAG->getRegister(0, MVT::i64);
}
SDValue Segment;
@ -1435,7 +1435,7 @@ SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
static SDNode *FindCallStartFromCall(SDNode *Node) {
if (Node->getOpcode() == ISD::CALLSEQ_START) return Node;
assert(Node->getOperand(0).getValueType() == EVT::Other &&
assert(Node->getOperand(0).getValueType() == MVT::Other &&
"Node doesn't have a token chain argument!");
return FindCallStartFromCall(Node->getOperand(0).getNode());
}
@ -1451,7 +1451,7 @@ SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
SDValue LSI = Node->getOperand(4); // MemOperand
const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, LSI, Chain};
return CurDAG->getTargetNode(Opc, Node->getDebugLoc(),
EVT::i32, EVT::i32, EVT::Other, Ops,
MVT::i32, MVT::i32, MVT::Other, Ops,
array_lengthof(Ops));
}
@ -1495,9 +1495,9 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) {
}
unsigned Opc = 0;
switch (NVT.getSimpleVT()) {
switch (NVT.getSimpleVT().SimpleTy) {
default: return 0;
case EVT::i8:
case MVT::i8:
if (isInc)
Opc = X86::LOCK_INC8m;
else if (isDec)
@ -1514,7 +1514,7 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) {
Opc = X86::LOCK_ADD8mr;
}
break;
case EVT::i16:
case MVT::i16:
if (isInc)
Opc = X86::LOCK_INC16m;
else if (isDec)
@ -1537,7 +1537,7 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) {
Opc = X86::LOCK_ADD16mr;
}
break;
case EVT::i32:
case MVT::i32:
if (isInc)
Opc = X86::LOCK_INC32m;
else if (isDec)
@ -1560,7 +1560,7 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) {
Opc = X86::LOCK_ADD32mr;
}
break;
case EVT::i64:
case MVT::i64:
if (isInc)
Opc = X86::LOCK_INC64m;
else if (isDec)
@ -1591,12 +1591,12 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) {
SDValue MemOp = CurDAG->getMemOperand(cast<MemSDNode>(Node)->getMemOperand());
if (isInc || isDec) {
SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, MemOp, Chain };
SDValue Ret = SDValue(CurDAG->getTargetNode(Opc, dl, EVT::Other, Ops, 7), 0);
SDValue Ret = SDValue(CurDAG->getTargetNode(Opc, dl, MVT::Other, Ops, 7), 0);
SDValue RetVals[] = { Undef, Ret };
return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
} else {
SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, MemOp, Chain };
SDValue Ret = SDValue(CurDAG->getTargetNode(Opc, dl, EVT::Other, Ops, 8), 0);
SDValue Ret = SDValue(CurDAG->getTargetNode(Opc, dl, MVT::Other, Ops, 8), 0);
SDValue RetVals[] = { Undef, Ret };
return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
}
@ -1664,30 +1664,30 @@ SDNode *X86DAGToDAGISel::Select(SDValue N) {
bool isSigned = Opcode == ISD::SMUL_LOHI;
if (!isSigned) {
switch (NVT.getSimpleVT()) {
switch (NVT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unsupported VT!");
case EVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break;
case EVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
case EVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break;
case EVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break;
case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break;
case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break;
case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break;
}
} else {
switch (NVT.getSimpleVT()) {
switch (NVT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unsupported VT!");
case EVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break;
case EVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
case EVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
case EVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break;
case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
}
}
unsigned LoReg, HiReg;
switch (NVT.getSimpleVT()) {
switch (NVT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unsupported VT!");
case EVT::i8: LoReg = X86::AL; HiReg = X86::AH; break;
case EVT::i16: LoReg = X86::AX; HiReg = X86::DX; break;
case EVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break;
case EVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break;
case MVT::i8: LoReg = X86::AL; HiReg = X86::AH; break;
case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; break;
case MVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break;
case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break;
}
SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
@ -1706,14 +1706,14 @@ SDNode *X86DAGToDAGISel::Select(SDValue N) {
SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
InFlag };
SDNode *CNode =
CurDAG->getTargetNode(MOpc, dl, EVT::Other, EVT::Flag, Ops,
CurDAG->getTargetNode(MOpc, dl, MVT::Other, MVT::Flag, Ops,
array_lengthof(Ops));
InFlag = SDValue(CNode, 1);
// Update the chain.
ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
} else {
InFlag =
SDValue(CurDAG->getTargetNode(Opc, dl, EVT::Flag, N1, InFlag), 0);
SDValue(CurDAG->getTargetNode(Opc, dl, MVT::Flag, N1, InFlag), 0);
}
// Copy the low half of the result, if it is needed.
@ -1737,15 +1737,15 @@ SDNode *X86DAGToDAGISel::Select(SDValue N) {
// Prevent use of AH in a REX instruction by referencing AX instead.
// Shift it down 8 bits.
Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
X86::AX, EVT::i16, InFlag);
X86::AX, MVT::i16, InFlag);
InFlag = Result.getValue(2);
Result = SDValue(CurDAG->getTargetNode(X86::SHR16ri, dl, EVT::i16,
Result = SDValue(CurDAG->getTargetNode(X86::SHR16ri, dl, MVT::i16,
Result,
CurDAG->getTargetConstant(8, EVT::i8)), 0);
CurDAG->getTargetConstant(8, MVT::i8)), 0);
// Then truncate it down to i8.
SDValue SRIdx = CurDAG->getTargetConstant(X86::SUBREG_8BIT, EVT::i32);
SDValue SRIdx = CurDAG->getTargetConstant(X86::SUBREG_8BIT, MVT::i32);
Result = SDValue(CurDAG->getTargetNode(X86::EXTRACT_SUBREG, dl,
EVT::i8, Result, SRIdx), 0);
MVT::i8, Result, SRIdx), 0);
} else {
Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
HiReg, NVT, InFlag);
@ -1775,43 +1775,43 @@ SDNode *X86DAGToDAGISel::Select(SDValue N) {
bool isSigned = Opcode == ISD::SDIVREM;
if (!isSigned) {
switch (NVT.getSimpleVT()) {
switch (NVT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unsupported VT!");
case EVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
case EVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
case EVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
case EVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
}
} else {
switch (NVT.getSimpleVT()) {
switch (NVT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unsupported VT!");
case EVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
case EVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
case EVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
case EVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
}
}
unsigned LoReg, HiReg;
unsigned ClrOpcode, SExtOpcode;
switch (NVT.getSimpleVT()) {
switch (NVT.getSimpleVT().SimpleTy) {
default: llvm_unreachable("Unsupported VT!");
case EVT::i8:
case MVT::i8:
LoReg = X86::AL; HiReg = X86::AH;
ClrOpcode = 0;
SExtOpcode = X86::CBW;
break;
case EVT::i16:
case MVT::i16:
LoReg = X86::AX; HiReg = X86::DX;
ClrOpcode = X86::MOV16r0;
SExtOpcode = X86::CWD;
break;
case EVT::i32:
case MVT::i32:
LoReg = X86::EAX; HiReg = X86::EDX;
ClrOpcode = X86::MOV32r0;
SExtOpcode = X86::CDQ;
break;
case EVT::i64:
case MVT::i64:
LoReg = X86::RAX; HiReg = X86::RDX;
ClrOpcode = ~0U; // NOT USED.
SExtOpcode = X86::CQO;
@ -1823,21 +1823,21 @@ SDNode *X86DAGToDAGISel::Select(SDValue N) {
bool signBitIsZero = CurDAG->SignBitIsZero(N0);
SDValue InFlag;
if (NVT == EVT::i8 && (!isSigned || signBitIsZero)) {
if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) {
// Special case for div8, just use a move with zero extension to AX to
// clear the upper 8 bits (AH).
SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain;
if (TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
Move =
SDValue(CurDAG->getTargetNode(X86::MOVZX16rm8, dl, EVT::i16,
EVT::Other, Ops,
SDValue(CurDAG->getTargetNode(X86::MOVZX16rm8, dl, MVT::i16,
MVT::Other, Ops,
array_lengthof(Ops)), 0);
Chain = Move.getValue(1);
ReplaceUses(N0.getValue(1), Chain);
} else {
Move =
SDValue(CurDAG->getTargetNode(X86::MOVZX16rr8, dl, EVT::i16, N0),0);
SDValue(CurDAG->getTargetNode(X86::MOVZX16rr8, dl, MVT::i16, N0),0);
Chain = CurDAG->getEntryNode();
}
Chain = CurDAG->getCopyToReg(Chain, dl, X86::AX, Move, SDValue());
@ -1849,24 +1849,24 @@ SDNode *X86DAGToDAGISel::Select(SDValue N) {
if (isSigned && !signBitIsZero) {
// Sign extend the low part into the high part.
InFlag =
SDValue(CurDAG->getTargetNode(SExtOpcode, dl, EVT::Flag, InFlag),0);
SDValue(CurDAG->getTargetNode(SExtOpcode, dl, MVT::Flag, InFlag),0);
} else {
// Zero out the high part, effectively zero extending the input.
SDValue ClrNode;
if (NVT.getSimpleVT() == EVT::i64) {
ClrNode = SDValue(CurDAG->getTargetNode(X86::MOV32r0, dl, EVT::i32),
if (NVT.getSimpleVT() == MVT::i64) {
ClrNode = SDValue(CurDAG->getTargetNode(X86::MOV32r0, dl, MVT::i32),
0);
// We just did a 32-bit clear, insert it into a 64-bit register to
// clear the whole 64-bit reg.
SDValue Undef =
SDValue(CurDAG->getTargetNode(TargetInstrInfo::IMPLICIT_DEF,
dl, EVT::i64), 0);
dl, MVT::i64), 0);
SDValue SubRegNo =
CurDAG->getTargetConstant(X86::SUBREG_32BIT, EVT::i32);
CurDAG->getTargetConstant(X86::SUBREG_32BIT, MVT::i32);
ClrNode =
SDValue(CurDAG->getTargetNode(TargetInstrInfo::INSERT_SUBREG, dl,
EVT::i64, Undef, ClrNode, SubRegNo),
MVT::i64, Undef, ClrNode, SubRegNo),
0);
} else {
ClrNode = SDValue(CurDAG->getTargetNode(ClrOpcode, dl, NVT), 0);
@ -1881,14 +1881,14 @@ SDNode *X86DAGToDAGISel::Select(SDValue N) {
SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
InFlag };
SDNode *CNode =
CurDAG->getTargetNode(MOpc, dl, EVT::Other, EVT::Flag, Ops,
CurDAG->getTargetNode(MOpc, dl, MVT::Other, MVT::Flag, Ops,
array_lengthof(Ops));
InFlag = SDValue(CNode, 1);
// Update the chain.
ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
} else {
InFlag =
SDValue(CurDAG->getTargetNode(Opc, dl, EVT::Flag, N1, InFlag), 0);
SDValue(CurDAG->getTargetNode(Opc, dl, MVT::Flag, N1, InFlag), 0);
}
// Copy the division (low) result, if it is needed.
@ -1912,16 +1912,16 @@ SDNode *X86DAGToDAGISel::Select(SDValue N) {
// Prevent use of AH in a REX instruction by referencing AX instead.
// Shift it down 8 bits.
Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
X86::AX, EVT::i16, InFlag);
X86::AX, MVT::i16, InFlag);
InFlag = Result.getValue(2);
Result = SDValue(CurDAG->getTargetNode(X86::SHR16ri, dl, EVT::i16,
Result = SDValue(CurDAG->getTargetNode(X86::SHR16ri, dl, MVT::i16,
Result,
CurDAG->getTargetConstant(8, EVT::i8)),
CurDAG->getTargetConstant(8, MVT::i8)),
0);
// Then truncate it down to i8.
SDValue SRIdx = CurDAG->getTargetConstant(X86::SUBREG_8BIT, EVT::i32);
SDValue SRIdx = CurDAG->getTargetConstant(X86::SUBREG_8BIT, MVT::i32);
Result = SDValue(CurDAG->getTargetNode(X86::EXTRACT_SUBREG, dl,
EVT::i8, Result, SRIdx), 0);
MVT::i8, Result, SRIdx), 0);
} else {
Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
HiReg, NVT, InFlag);
@ -1981,7 +1981,7 @@ SDNode *X86DAGToDAGISel::Select(SDValue N) {
TLI.getPointerTy());
SDValue Ops[] = { Tmp1, Tmp2, Chain };
return CurDAG->getTargetNode(TargetInstrInfo::DECLARE, dl,
EVT::Other, Ops,
MVT::Other, Ops,
array_lengthof(Ops));
}
}

File diff suppressed because it is too large Load Diff

View File

@ -411,7 +411,7 @@ namespace llvm {
virtual const char *getTargetNodeName(unsigned Opcode) const;
/// getSetCCResultType - Return the ISD::SETCC ValueType
virtual EVT::SimpleValueType getSetCCResultType(EVT VT) const;
virtual MVT::SimpleValueType getSetCCResultType(EVT VT) const;
/// computeMaskedBitsForTargetNode - Determine which of the bits specified
/// in Mask are known to be either zero or one and return them in the
@ -503,7 +503,7 @@ namespace llvm {
// Don't shrink FP constpool if SSE2 is available since cvtss2sd is more
// expensive than a straight movsd. On the other hand, it's important to
// shrink long double fp constant since fldt is very slow.
return !X86ScalarSSEf64 || VT == EVT::f80;
return !X86ScalarSSEf64 || VT == MVT::f80;
}
/// IsEligibleForTailCallOptimization - Check whether the call is eligible
@ -523,8 +523,8 @@ namespace llvm {
/// isScalarFPTypeInSSEReg - Return true if the specified scalar FP type is
/// computed in an SSE register, not on the X87 floating point stack.
bool isScalarFPTypeInSSEReg(EVT VT) const {
return (VT == EVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
(VT == EVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
return (VT == MVT::f64 && X86ScalarSSEf64) || // f64 is when SSE2
(VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
}
/// getWidenVectorType: given a vector type, returns the type to widen

View File

@ -2525,7 +2525,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
bool isAligned = (RI.getStackAlignment() >= 16) ||
RI.needsStackRealignment(MF);
Load = DAG.getTargetNode(getLoadRegOpcode(0, RC, isAligned, TM), dl,
VT, EVT::Other, &AddrOps[0], AddrOps.size());
VT, MVT::Other, &AddrOps[0], AddrOps.size());
NewNodes.push_back(Load);
}
@ -2538,7 +2538,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
}
for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
EVT VT = N->getValueType(i);
if (VT != EVT::Other && i >= (unsigned)TID.getNumDefs())
if (VT != MVT::Other && i >= (unsigned)TID.getNumDefs())
VTs.push_back(VT);
}
if (Load)
@ -2557,7 +2557,7 @@ X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
RI.needsStackRealignment(MF);
SDNode *Store = DAG.getTargetNode(getStoreRegOpcode(0, DstRC,
isAligned, TM),
dl, EVT::Other,
dl, MVT::Other,
&AddrOps[0], AddrOps.size());
NewNodes.push_back(Store);
}

View File

@ -1263,9 +1263,9 @@ unsigned X86RegisterInfo::getEHHandlerRegister() const {
namespace llvm {
unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) {
switch (VT.getSimpleVT()) {
switch (VT.getSimpleVT().SimpleTy) {
default: return Reg;
case EVT::i8:
case MVT::i8:
if (High) {
switch (Reg) {
default: return 0;
@ -1315,7 +1315,7 @@ unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) {
return X86::R15B;
}
}
case EVT::i16:
case MVT::i16:
switch (Reg) {
default: return Reg;
case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
@ -1351,7 +1351,7 @@ unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) {
case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
return X86::R15W;
}
case EVT::i32:
case MVT::i32:
switch (Reg) {
default: return Reg;
case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
@ -1387,7 +1387,7 @@ unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) {
case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
return X86::R15D;
}
case EVT::i64:
case MVT::i64:
switch (Reg) {
default: return Reg;
case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:

View File

@ -54,7 +54,7 @@ namespace {
/// getI32Imm - Return a target constant with the specified value, of type
/// i32.
inline SDValue getI32Imm(unsigned Imm) {
return CurDAG->getTargetConstant(Imm, EVT::i32);
return CurDAG->getTargetConstant(Imm, MVT::i32);
}
// Complex Pattern Selectors.
@ -87,8 +87,8 @@ bool XCoreDAGToDAGISel::SelectADDRspii(SDValue Op, SDValue Addr,
SDValue &Base, SDValue &Offset) {
FrameIndexSDNode *FIN = 0;
if ((FIN = dyn_cast<FrameIndexSDNode>(Addr))) {
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), EVT::i32);
Offset = CurDAG->getTargetConstant(0, EVT::i32);
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
Offset = CurDAG->getTargetConstant(0, MVT::i32);
return true;
}
if (Addr.getOpcode() == ISD::ADD) {
@ -97,8 +97,8 @@ bool XCoreDAGToDAGISel::SelectADDRspii(SDValue Op, SDValue Addr,
&& (CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
&& (CN->getSExtValue() % 4 == 0 && CN->getSExtValue() >= 0)) {
// Constant positive word offset from frame index
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), EVT::i32);
Offset = CurDAG->getTargetConstant(CN->getSExtValue(), EVT::i32);
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32);
Offset = CurDAG->getTargetConstant(CN->getSExtValue(), MVT::i32);
return true;
}
}
@ -109,7 +109,7 @@ bool XCoreDAGToDAGISel::SelectADDRdpii(SDValue Op, SDValue Addr,
SDValue &Base, SDValue &Offset) {
if (Addr.getOpcode() == XCoreISD::DPRelativeWrapper) {
Base = Addr.getOperand(0);
Offset = CurDAG->getTargetConstant(0, EVT::i32);
Offset = CurDAG->getTargetConstant(0, MVT::i32);
return true;
}
if (Addr.getOpcode() == ISD::ADD) {
@ -119,7 +119,7 @@ bool XCoreDAGToDAGISel::SelectADDRdpii(SDValue Op, SDValue Addr,
&& (CN->getSExtValue() % 4 == 0)) {
// Constant word offset from a object in the data region
Base = Addr.getOperand(0).getOperand(0);
Offset = CurDAG->getTargetConstant(CN->getSExtValue(), EVT::i32);
Offset = CurDAG->getTargetConstant(CN->getSExtValue(), MVT::i32);
return true;
}
}
@ -130,7 +130,7 @@ bool XCoreDAGToDAGISel::SelectADDRcpii(SDValue Op, SDValue Addr,
SDValue &Base, SDValue &Offset) {
if (Addr.getOpcode() == XCoreISD::CPRelativeWrapper) {
Base = Addr.getOperand(0);
Offset = CurDAG->getTargetConstant(0, EVT::i32);
Offset = CurDAG->getTargetConstant(0, MVT::i32);
return true;
}
if (Addr.getOpcode() == ISD::ADD) {
@ -140,7 +140,7 @@ bool XCoreDAGToDAGISel::SelectADDRcpii(SDValue Op, SDValue Addr,
&& (CN->getSExtValue() % 4 == 0)) {
// Constant word offset from a object in the data region
Base = Addr.getOperand(0).getOperand(0);
Offset = CurDAG->getTargetConstant(CN->getSExtValue(), EVT::i32);
Offset = CurDAG->getTargetConstant(CN->getSExtValue(), MVT::i32);
return true;
}
}
@ -163,21 +163,21 @@ SDNode *XCoreDAGToDAGISel::Select(SDValue Op) {
SDNode *N = Op.getNode();
DebugLoc dl = N->getDebugLoc();
EVT NVT = N->getValueType(0);
if (NVT == EVT::i32) {
if (NVT == MVT::i32) {
switch (N->getOpcode()) {
default: break;
case ISD::Constant: {
if (Predicate_immMskBitp(N)) {
SDValue MskSize = Transform_msksize_xform(N);
return CurDAG->getTargetNode(XCore::MKMSK_rus, dl, EVT::i32, MskSize);
return CurDAG->getTargetNode(XCore::MKMSK_rus, dl, MVT::i32, MskSize);
}
else if (! Predicate_immU16(N)) {
unsigned Val = cast<ConstantSDNode>(N)->getZExtValue();
SDValue CPIdx =
CurDAG->getTargetConstantPool(ConstantInt::get(Type::Int32Ty, Val),
TLI.getPointerTy());
return CurDAG->getTargetNode(XCore::LDWCP_lru6, dl, EVT::i32,
EVT::Other, CPIdx,
return CurDAG->getTargetNode(XCore::LDWCP_lru6, dl, MVT::i32,
MVT::Other, CPIdx,
CurDAG->getEntryNode());
}
break;
@ -185,11 +185,11 @@ SDNode *XCoreDAGToDAGISel::Select(SDValue Op) {
case ISD::SMUL_LOHI: {
// FIXME fold addition into the macc instruction
if (!Subtarget.isXS1A()) {
SDValue Zero(CurDAG->getTargetNode(XCore::LDC_ru6, dl, EVT::i32,
CurDAG->getTargetConstant(0, EVT::i32)), 0);
SDValue Zero(CurDAG->getTargetNode(XCore::LDC_ru6, dl, MVT::i32,
CurDAG->getTargetConstant(0, MVT::i32)), 0);
SDValue Ops[] = { Zero, Zero, Op.getOperand(0), Op.getOperand(1) };
SDNode *ResNode = CurDAG->getTargetNode(XCore::MACCS_l4r, dl,
EVT::i32, EVT::i32, Ops, 4);
MVT::i32, MVT::i32, Ops, 4);
ReplaceUses(SDValue(N, 0), SDValue(ResNode, 1));
ReplaceUses(SDValue(N, 1), SDValue(ResNode, 0));
return NULL;
@ -198,12 +198,12 @@ SDNode *XCoreDAGToDAGISel::Select(SDValue Op) {
}
case ISD::UMUL_LOHI: {
// FIXME fold addition into the macc / lmul instruction
SDValue Zero(CurDAG->getTargetNode(XCore::LDC_ru6, dl, EVT::i32,
CurDAG->getTargetConstant(0, EVT::i32)), 0);
SDValue Zero(CurDAG->getTargetNode(XCore::LDC_ru6, dl, MVT::i32,
CurDAG->getTargetConstant(0, MVT::i32)), 0);
SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1),
Zero, Zero };
SDNode *ResNode = CurDAG->getTargetNode(XCore::LMUL_l6r, dl, EVT::i32,
EVT::i32, Ops, 4);
SDNode *ResNode = CurDAG->getTargetNode(XCore::LMUL_l6r, dl, MVT::i32,
MVT::i32, Ops, 4);
ReplaceUses(SDValue(N, 0), SDValue(ResNode, 1));
ReplaceUses(SDValue(N, 1), SDValue(ResNode, 0));
return NULL;
@ -212,7 +212,7 @@ SDNode *XCoreDAGToDAGISel::Select(SDValue Op) {
if (!Subtarget.isXS1A()) {
SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1),
Op.getOperand(2) };
return CurDAG->getTargetNode(XCore::LADD_l5r, dl, EVT::i32, EVT::i32,
return CurDAG->getTargetNode(XCore::LADD_l5r, dl, MVT::i32, MVT::i32,
Ops, 3);
}
break;
@ -221,7 +221,7 @@ SDNode *XCoreDAGToDAGISel::Select(SDValue Op) {
if (!Subtarget.isXS1A()) {
SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1),
Op.getOperand(2) };
return CurDAG->getTargetNode(XCore::LSUB_l5r, dl, EVT::i32, EVT::i32,
return CurDAG->getTargetNode(XCore::LSUB_l5r, dl, MVT::i32, MVT::i32,
Ops, 3);
}
break;

View File

@ -61,7 +61,7 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
Subtarget(*XTM.getSubtargetImpl()) {
// Set up the register classes.
addRegisterClass(EVT::i32, XCore::GRRegsRegisterClass);
addRegisterClass(MVT::i32, XCore::GRRegsRegisterClass);
// Compute derived properties from the register classes
computeRegisterProperties();
@ -69,7 +69,7 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
// Division is expensive
setIntDivIsCheap(false);
setShiftAmountType(EVT::i32);
setShiftAmountType(MVT::i32);
setStackPointerRegisterToSaveRestore(XCore::SP);
setSchedulingPreference(SchedulingForRegPressure);
@ -78,75 +78,75 @@ XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
setBooleanContents(ZeroOrOneBooleanContent);
// XCore does not have the NodeTypes below.
setOperationAction(ISD::BR_CC, EVT::Other, Expand);
setOperationAction(ISD::SELECT_CC, EVT::i32, Custom);
setOperationAction(ISD::ADDC, EVT::i32, Expand);
setOperationAction(ISD::ADDE, EVT::i32, Expand);
setOperationAction(ISD::SUBC, EVT::i32, Expand);
setOperationAction(ISD::SUBE, EVT::i32, Expand);
setOperationAction(ISD::BR_CC, MVT::Other, Expand);
setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
setOperationAction(ISD::ADDC, MVT::i32, Expand);
setOperationAction(ISD::ADDE, MVT::i32, Expand);
setOperationAction(ISD::SUBC, MVT::i32, Expand);
setOperationAction(ISD::SUBE, MVT::i32, Expand);
// Stop the combiner recombining select and set_cc
setOperationAction(ISD::SELECT_CC, EVT::Other, Expand);
setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
// 64bit
if (!Subtarget.isXS1A()) {
setOperationAction(ISD::ADD, EVT::i64, Custom);
setOperationAction(ISD::SUB, EVT::i64, Custom);
setOperationAction(ISD::ADD, MVT::i64, Custom);
setOperationAction(ISD::SUB, MVT::i64, Custom);
}
if (Subtarget.isXS1A()) {
setOperationAction(ISD::SMUL_LOHI, EVT::i32, Expand);
setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
}
setOperationAction(ISD::MULHS, EVT::i32, Expand);
setOperationAction(ISD::MULHU, EVT::i32, Expand);
setOperationAction(ISD::SHL_PARTS, EVT::i32, Expand);
setOperationAction(ISD::SRA_PARTS, EVT::i32, Expand);
setOperationAction(ISD::SRL_PARTS, EVT::i32, Expand);
setOperationAction(ISD::MULHS, MVT::i32, Expand);
setOperationAction(ISD::MULHU, MVT::i32, Expand);
setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
// Bit Manipulation
setOperationAction(ISD::CTPOP, EVT::i32, Expand);
setOperationAction(ISD::ROTL , EVT::i32, Expand);
setOperationAction(ISD::ROTR , EVT::i32, Expand);
setOperationAction(ISD::CTPOP, MVT::i32, Expand);
setOperationAction(ISD::ROTL , MVT::i32, Expand);
setOperationAction(ISD::ROTR , MVT::i32, Expand);
setOperationAction(ISD::TRAP, EVT::Other, Legal);
setOperationAction(ISD::TRAP, MVT::Other, Legal);
// Expand jump tables for now
setOperationAction(ISD::BR_JT, EVT::Other, Expand);
setOperationAction(ISD::JumpTable, EVT::i32, Custom);
setOperationAction(ISD::BR_JT, MVT::Other, Expand);
setOperationAction(ISD::JumpTable, MVT::i32, Custom);
setOperationAction(ISD::GlobalAddress, EVT::i32, Custom);
setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
// Thread Local Storage
setOperationAction(ISD::GlobalTLSAddress, EVT::i32, Custom);
setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
// Conversion of i64 -> double produces constantpool nodes
setOperationAction(ISD::ConstantPool, EVT::i32, Custom);
setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
// Loads
setLoadExtAction(ISD::EXTLOAD, EVT::i1, Promote);
setLoadExtAction(ISD::ZEXTLOAD, EVT::i1, Promote);
setLoadExtAction(ISD::SEXTLOAD, EVT::i1, Promote);
setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
setLoadExtAction(ISD::SEXTLOAD, EVT::i8, Expand);
setLoadExtAction(ISD::ZEXTLOAD, EVT::i16, Expand);
setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Expand);
// Custom expand misaligned loads / stores.
setOperationAction(ISD::LOAD, EVT::i32, Custom);
setOperationAction(ISD::STORE, EVT::i32, Custom);
setOperationAction(ISD::LOAD, MVT::i32, Custom);
setOperationAction(ISD::STORE, MVT::i32, Custom);
// Varargs
setOperationAction(ISD::VAEND, EVT::Other, Expand);
setOperationAction(ISD::VACOPY, EVT::Other, Expand);
setOperationAction(ISD::VAARG, EVT::Other, Custom);
setOperationAction(ISD::VASTART, EVT::Other, Custom);
setOperationAction(ISD::VAEND, MVT::Other, Expand);
setOperationAction(ISD::VACOPY, MVT::Other, Expand);
setOperationAction(ISD::VAARG, MVT::Other, Custom);
setOperationAction(ISD::VASTART, MVT::Other, Custom);
// Dynamic stack
setOperationAction(ISD::STACKSAVE, EVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, EVT::Other, Expand);
setOperationAction(ISD::DYNAMIC_STACKALLOC, EVT::i32, Expand);
setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
// Debug
setOperationAction(ISD::DBG_STOPPOINT, EVT::Other, Expand);
setOperationAction(ISD::DEBUG_LOC, EVT::Other, Expand);
setOperationAction(ISD::DBG_STOPPOINT, MVT::Other, Expand);
setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
maxStoresPerMemset = 4;
maxStoresPerMemmove = maxStoresPerMemcpy = 2;
@ -208,9 +208,9 @@ SDValue XCoreTargetLowering::
LowerSELECT_CC(SDValue Op, SelectionDAG &DAG)
{
DebugLoc dl = Op.getDebugLoc();
SDValue Cond = DAG.getNode(ISD::SETCC, dl, EVT::i32, Op.getOperand(2),
SDValue Cond = DAG.getNode(ISD::SETCC, dl, MVT::i32, Op.getOperand(2),
Op.getOperand(3), Op.getOperand(4));
return DAG.getNode(ISD::SELECT, dl, EVT::i32, Cond, Op.getOperand(0),
return DAG.getNode(ISD::SELECT, dl, MVT::i32, Cond, Op.getOperand(0),
Op.getOperand(1));
}
@ -220,7 +220,7 @@ getGlobalAddressWrapper(SDValue GA, GlobalValue *GV, SelectionDAG &DAG)
// FIXME there is no actual debug info here
DebugLoc dl = GA.getDebugLoc();
if (isa<Function>(GV)) {
return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, EVT::i32, GA);
return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA);
} else if (!Subtarget.isXS1A()) {
const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
if (!GVar) {
@ -230,17 +230,17 @@ getGlobalAddressWrapper(SDValue GA, GlobalValue *GV, SelectionDAG &DAG)
}
bool isConst = GVar && GVar->isConstant();
if (isConst) {
return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, EVT::i32, GA);
return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA);
}
}
return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, EVT::i32, GA);
return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA);
}
SDValue XCoreTargetLowering::
LowerGlobalAddress(SDValue Op, SelectionDAG &DAG)
{
GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
SDValue GA = DAG.getTargetGlobalAddress(GV, EVT::i32);
SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32);
// If it's a debug information descriptor, don't mess with it.
if (DAG.isVerifiedDebugInfoDesc(Op))
return GA;
@ -248,8 +248,8 @@ LowerGlobalAddress(SDValue Op, SelectionDAG &DAG)
}
static inline SDValue BuildGetId(SelectionDAG &DAG, DebugLoc dl) {
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, EVT::i32,
DAG.getConstant(Intrinsic::xcore_getid, EVT::i32));
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32,
DAG.getConstant(Intrinsic::xcore_getid, MVT::i32));
}
static inline bool isZeroLengthArray(const Type *Ty) {
@ -264,7 +264,7 @@ LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG)
DebugLoc dl = Op.getDebugLoc();
// transform to label + getid() * size
GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
SDValue GA = DAG.getTargetGlobalAddress(GV, EVT::i32);
SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32);
const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
if (!GVar) {
// If GV is an alias then use the aliasee to determine size
@ -286,9 +286,9 @@ LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG)
SDValue base = getGlobalAddressWrapper(GA, GV, DAG);
const TargetData *TD = TM.getTargetData();
unsigned Size = TD->getTypeAllocSize(Ty);
SDValue offset = DAG.getNode(ISD::MUL, dl, EVT::i32, BuildGetId(DAG, dl),
DAG.getConstant(Size, EVT::i32));
return DAG.getNode(ISD::ADD, dl, EVT::i32, base, offset);
SDValue offset = DAG.getNode(ISD::MUL, dl, MVT::i32, BuildGetId(DAG, dl),
DAG.getConstant(Size, MVT::i32));
return DAG.getNode(ISD::ADD, dl, MVT::i32, base, offset);
}
SDValue XCoreTargetLowering::
@ -310,7 +310,7 @@ LowerConstantPool(SDValue Op, SelectionDAG &DAG)
Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
CP->getAlignment());
}
return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, EVT::i32, Res);
return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res);
}
}
@ -322,7 +322,7 @@ LowerJumpTable(SDValue Op, SelectionDAG &DAG)
EVT PtrVT = Op.getValueType();
JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, EVT::i32, JTI);
return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, JTI);
}
static bool
@ -368,7 +368,7 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG)
{
LoadSDNode *LD = cast<LoadSDNode>(Op);
assert(LD->getExtensionType() == ISD::NON_EXTLOAD && "Unexpected extension type");
assert(LD->getMemoryVT() == EVT::i32 && "Unexpected load EVT");
assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT");
if (allowsUnalignedMemoryAccesses()) {
return SDValue();
}
@ -397,22 +397,22 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG)
// shr low_shifted, low, (offset & 0x3) * 8
// shl high_shifted, high, 32 - (offset & 0x3) * 8
// or result, low_shifted, high_shifted
SDValue LowOffset = DAG.getConstant(Offset & ~0x3, EVT::i32);
SDValue HighOffset = DAG.getConstant((Offset & ~0x3) + 4, EVT::i32);
SDValue LowShift = DAG.getConstant((Offset & 0x3) * 8, EVT::i32);
SDValue HighShift = DAG.getConstant(32 - (Offset & 0x3) * 8, EVT::i32);
SDValue LowOffset = DAG.getConstant(Offset & ~0x3, MVT::i32);
SDValue HighOffset = DAG.getConstant((Offset & ~0x3) + 4, MVT::i32);
SDValue LowShift = DAG.getConstant((Offset & 0x3) * 8, MVT::i32);
SDValue HighShift = DAG.getConstant(32 - (Offset & 0x3) * 8, MVT::i32);
SDValue LowAddr = DAG.getNode(ISD::ADD, dl, EVT::i32, Base, LowOffset);
SDValue HighAddr = DAG.getNode(ISD::ADD, dl, EVT::i32, Base, HighOffset);
SDValue LowAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, Base, LowOffset);
SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, Base, HighOffset);
SDValue Low = DAG.getLoad(getPointerTy(), dl, Chain,
LowAddr, NULL, 4);
SDValue High = DAG.getLoad(getPointerTy(), dl, Chain,
HighAddr, NULL, 4);
SDValue LowShifted = DAG.getNode(ISD::SRL, dl, EVT::i32, Low, LowShift);
SDValue HighShifted = DAG.getNode(ISD::SHL, dl, EVT::i32, High, HighShift);
SDValue Result = DAG.getNode(ISD::OR, dl, EVT::i32, LowShifted, HighShifted);
Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other, Low.getValue(1),
SDValue LowShifted = DAG.getNode(ISD::SRL, dl, MVT::i32, Low, LowShift);
SDValue HighShifted = DAG.getNode(ISD::SHL, dl, MVT::i32, High, HighShift);
SDValue Result = DAG.getNode(ISD::OR, dl, MVT::i32, LowShifted, HighShifted);
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Low.getValue(1),
High.getValue(1));
SDValue Ops[] = { Result, Chain };
return DAG.getMergeValues(Ops, 2, dl);
@ -420,18 +420,18 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG)
if (LD->getAlignment() == 2) {
int SVOffset = LD->getSrcValueOffset();
SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, dl, EVT::i32, Chain,
BasePtr, LD->getSrcValue(), SVOffset, EVT::i16,
SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, Chain,
BasePtr, LD->getSrcValue(), SVOffset, MVT::i16,
LD->isVolatile(), 2);
SDValue HighAddr = DAG.getNode(ISD::ADD, dl, EVT::i32, BasePtr,
DAG.getConstant(2, EVT::i32));
SDValue High = DAG.getExtLoad(ISD::EXTLOAD, dl, EVT::i32, Chain,
SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr,
DAG.getConstant(2, MVT::i32));
SDValue High = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::i32, Chain,
HighAddr, LD->getSrcValue(), SVOffset + 2,
EVT::i16, LD->isVolatile(), 2);
SDValue HighShifted = DAG.getNode(ISD::SHL, dl, EVT::i32, High,
DAG.getConstant(16, EVT::i32));
SDValue Result = DAG.getNode(ISD::OR, dl, EVT::i32, Low, HighShifted);
Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other, Low.getValue(1),
MVT::i16, LD->isVolatile(), 2);
SDValue HighShifted = DAG.getNode(ISD::SHL, dl, MVT::i32, High,
DAG.getConstant(16, MVT::i32));
SDValue Result = DAG.getNode(ISD::OR, dl, MVT::i32, Low, HighShifted);
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Low.getValue(1),
High.getValue(1));
SDValue Ops[] = { Result, Chain };
return DAG.getMergeValues(Ops, 2, dl);
@ -464,7 +464,7 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG)
{
StoreSDNode *ST = cast<StoreSDNode>(Op);
assert(!ST->isTruncatingStore() && "Unexpected store type");
assert(ST->getMemoryVT() == EVT::i32 && "Unexpected store EVT");
assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT");
if (allowsUnalignedMemoryAccesses()) {
return SDValue();
}
@ -482,17 +482,17 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG)
if (ST->getAlignment() == 2) {
int SVOffset = ST->getSrcValueOffset();
SDValue Low = Value;
SDValue High = DAG.getNode(ISD::SRL, dl, EVT::i32, Value,
DAG.getConstant(16, EVT::i32));
SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value,
DAG.getConstant(16, MVT::i32));
SDValue StoreLow = DAG.getTruncStore(Chain, dl, Low, BasePtr,
ST->getSrcValue(), SVOffset, EVT::i16,
ST->getSrcValue(), SVOffset, MVT::i16,
ST->isVolatile(), 2);
SDValue HighAddr = DAG.getNode(ISD::ADD, dl, EVT::i32, BasePtr,
DAG.getConstant(2, EVT::i32));
SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr,
DAG.getConstant(2, MVT::i32));
SDValue StoreHigh = DAG.getTruncStore(Chain, dl, High, HighAddr,
ST->getSrcValue(), SVOffset + 2,
EVT::i16, ST->isVolatile(), 2);
return DAG.getNode(ISD::TokenFactor, dl, EVT::Other, StoreLow, StoreHigh);
MVT::i16, ST->isVolatile(), 2);
return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh);
}
// Lower to a call to __misaligned_store(BasePtr, Value).
@ -520,35 +520,35 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG)
SDValue XCoreTargetLowering::
ExpandADDSUB(SDNode *N, SelectionDAG &DAG)
{
assert(N->getValueType(0) == EVT::i64 &&
assert(N->getValueType(0) == MVT::i64 &&
(N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
"Unknown operand to lower!");
assert(!Subtarget.isXS1A() && "Cannot custom lower ADD/SUB on xs1a");
DebugLoc dl = N->getDebugLoc();
// Extract components
SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, EVT::i32,
N->getOperand(0), DAG.getConstant(0, EVT::i32));
SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, EVT::i32,
N->getOperand(0), DAG.getConstant(1, EVT::i32));
SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, EVT::i32,
N->getOperand(1), DAG.getConstant(0, EVT::i32));
SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, EVT::i32,
N->getOperand(1), DAG.getConstant(1, EVT::i32));
SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
N->getOperand(0), DAG.getConstant(0, MVT::i32));
SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
N->getOperand(0), DAG.getConstant(1, MVT::i32));
SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
N->getOperand(1), DAG.getConstant(0, MVT::i32));
SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
N->getOperand(1), DAG.getConstant(1, MVT::i32));
// Expand
unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD :
XCoreISD::LSUB;
SDValue Zero = DAG.getConstant(0, EVT::i32);
SDValue Carry = DAG.getNode(Opcode, dl, DAG.getVTList(EVT::i32, EVT::i32),
SDValue Zero = DAG.getConstant(0, MVT::i32);
SDValue Carry = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
LHSL, RHSL, Zero);
SDValue Lo(Carry.getNode(), 1);
SDValue Ignored = DAG.getNode(Opcode, dl, DAG.getVTList(EVT::i32, EVT::i32),
SDValue Ignored = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
LHSH, RHSH, Carry);
SDValue Hi(Ignored.getNode(), 1);
// Merge the pieces
return DAG.getNode(ISD::BUILD_PAIR, dl, EVT::i64, Lo, Hi);
return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
}
SDValue XCoreTargetLowering::
@ -580,7 +580,7 @@ LowerVASTART(SDValue Op, SelectionDAG &DAG)
// memory location argument
MachineFunction &MF = DAG.getMachineFunction();
XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), EVT::i32);
SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32);
const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1), SV, 0);
}
@ -594,7 +594,7 @@ SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) {
MachineFunction &MF = DAG.getMachineFunction();
const TargetRegisterInfo *RegInfo = getTargetMachine().getRegisterInfo();
return DAG.getCopyFromReg(DAG.getEntryNode(), dl,
RegInfo->getFrameRegister(MF), EVT::i32);
RegInfo->getFrameRegister(MF), MVT::i32);
}
//===----------------------------------------------------------------------===//
@ -691,16 +691,16 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
int Offset = VA.getLocMemOffset();
MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, EVT::Other,
MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other,
Chain, Arg,
DAG.getConstant(Offset/4, EVT::i32)));
DAG.getConstant(Offset/4, MVT::i32)));
}
}
// Transform all store nodes into one single node because
// all store nodes are independent of each other.
if (!MemOpChains.empty())
Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&MemOpChains[0], MemOpChains.size());
// Build a sequence of copy-to-reg nodes chained together with token
@ -718,15 +718,15 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
// turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
// Likewise ExternalSymbol -> TargetExternalSymbol.
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), EVT::i32);
Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i32);
else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
Callee = DAG.getTargetExternalSymbol(E->getSymbol(), EVT::i32);
Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
// XCoreBranchLink = #chain, #target_address, #opt_in_flags...
// = Chain, Callee, Reg#1, Reg#2, ...
//
// Returns a chain & a flag for retval copy to use.
SDVTList NodeTys = DAG.getVTList(EVT::Other, EVT::Flag);
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
SmallVector<SDValue, 8> Ops;
Ops.push_back(Chain);
Ops.push_back(Callee);
@ -842,16 +842,16 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
if (VA.isRegLoc()) {
// Arguments passed in registers
EVT RegVT = VA.getLocVT();
switch (RegVT.getSimpleVT()) {
switch (RegVT.getSimpleVT().SimpleTy) {
default:
{
#ifndef NDEBUG
errs() << "LowerFormalArguments Unhandled argument type: "
<< RegVT.getSimpleVT() << "\n";
<< RegVT.getSimpleVT().SimpleTy << "\n";
#endif
llvm_unreachable(0);
}
case EVT::i32:
case MVT::i32:
unsigned VReg = RegInfo.createVirtualRegister(
XCore::GRRegsRegisterClass);
RegInfo.addLiveIn(VA.getLocReg(), VReg);
@ -864,7 +864,7 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
unsigned ObjSize = VA.getLocVT().getSizeInBits()/8;
if (ObjSize > StackSlotSize) {
errs() << "LowerFormalArguments Unhandled argument type: "
<< VA.getLocVT().getSimpleVT()
<< (unsigned)VA.getLocVT().getSimpleVT().SimpleTy
<< "\n";
}
// Create the frame index object for this incoming parameter...
@ -873,7 +873,7 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
// Create the SelectionDAG nodes corresponding to a load
//from this parameter
SDValue FIN = DAG.getFrameIndex(FI, EVT::i32);
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, NULL, 0));
}
}
@ -898,18 +898,18 @@ XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
XFI->setVarArgsFrameIndex(FI);
}
offset -= StackSlotSize;
SDValue FIN = DAG.getFrameIndex(FI, EVT::i32);
SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
// Move argument from phys reg -> virt reg
unsigned VReg = RegInfo.createVirtualRegister(
XCore::GRRegsRegisterClass);
RegInfo.addLiveIn(ArgRegs[i], VReg);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, EVT::i32);
SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
// Move argument from virt reg -> stack
SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0);
MemOps.push_back(Store);
}
if (!MemOps.empty())
Chain = DAG.getNode(ISD::TokenFactor, dl, EVT::Other,
Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
&MemOps[0], MemOps.size());
} else {
// This will point to the next argument passed via stack.
@ -967,11 +967,11 @@ XCoreTargetLowering::LowerReturn(SDValue Chain,
// Return on XCore is always a "retsp 0"
if (Flag.getNode())
return DAG.getNode(XCoreISD::RETSP, dl, EVT::Other,
Chain, DAG.getConstant(0, EVT::i32), Flag);
return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other,
Chain, DAG.getConstant(0, MVT::i32), Flag);
else // Return Void
return DAG.getNode(XCoreISD::RETSP, dl, EVT::Other,
Chain, DAG.getConstant(0, EVT::i32));
return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other,
Chain, DAG.getConstant(0, MVT::i32));
}
//===----------------------------------------------------------------------===//
@ -1072,7 +1072,7 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
Chain.reachesChainWithoutSideEffects(SDValue(LD, 1))) {
return DAG.getMemmove(Chain, dl, ST->getBasePtr(),
LD->getBasePtr(),
DAG.getConstant(StoreBits/8, EVT::i32),
DAG.getConstant(StoreBits/8, MVT::i32),
Alignment, ST->getSrcValue(),
ST->getSrcValueOffset(), LD->getSrcValue(),
LD->getSrcValueOffset());

View File

@ -82,7 +82,7 @@ unsigned EVT::getExtendedSizeInBits() const {
/// getEVTString - This function returns value type as a string, e.g. "i32".
std::string EVT::getEVTString() const {
switch (V) {
switch (V.SimpleTy) {
default:
if (isVector())
return "v" + utostr(getVectorNumElements()) +
@ -91,40 +91,40 @@ std::string EVT::getEVTString() const {
return "i" + utostr(getSizeInBits());
llvm_unreachable("Invalid EVT!");
return "?";
case EVT::i1: return "i1";
case EVT::i8: return "i8";
case EVT::i16: return "i16";
case EVT::i32: return "i32";
case EVT::i64: return "i64";
case EVT::i128: return "i128";
case EVT::f32: return "f32";
case EVT::f64: return "f64";
case EVT::f80: return "f80";
case EVT::f128: return "f128";
case EVT::ppcf128: return "ppcf128";
case EVT::isVoid: return "isVoid";
case EVT::Other: return "ch";
case EVT::Flag: return "flag";
case EVT::v2i8: return "v2i8";
case EVT::v4i8: return "v4i8";
case EVT::v8i8: return "v8i8";
case EVT::v16i8: return "v16i8";
case EVT::v32i8: return "v32i8";
case EVT::v2i16: return "v2i16";
case EVT::v4i16: return "v4i16";
case EVT::v8i16: return "v8i16";
case EVT::v16i16: return "v16i16";
case EVT::v2i32: return "v2i32";
case EVT::v4i32: return "v4i32";
case EVT::v8i32: return "v8i32";
case EVT::v1i64: return "v1i64";
case EVT::v2i64: return "v2i64";
case EVT::v4i64: return "v4i64";
case EVT::v2f32: return "v2f32";
case EVT::v4f32: return "v4f32";
case EVT::v8f32: return "v8f32";
case EVT::v2f64: return "v2f64";
case EVT::v4f64: return "v4f64";
case MVT::i1: return "i1";
case MVT::i8: return "i8";
case MVT::i16: return "i16";
case MVT::i32: return "i32";
case MVT::i64: return "i64";
case MVT::i128: return "i128";
case MVT::f32: return "f32";
case MVT::f64: return "f64";
case MVT::f80: return "f80";
case MVT::f128: return "f128";
case MVT::ppcf128: return "ppcf128";
case MVT::isVoid: return "isVoid";
case MVT::Other: return "ch";
case MVT::Flag: return "flag";
case MVT::v2i8: return "v2i8";
case MVT::v4i8: return "v4i8";
case MVT::v8i8: return "v8i8";
case MVT::v16i8: return "v16i8";
case MVT::v32i8: return "v32i8";
case MVT::v2i16: return "v2i16";
case MVT::v4i16: return "v4i16";
case MVT::v8i16: return "v8i16";
case MVT::v16i16: return "v16i16";
case MVT::v2i32: return "v2i32";
case MVT::v4i32: return "v4i32";
case MVT::v8i32: return "v8i32";
case MVT::v1i64: return "v1i64";
case MVT::v2i64: return "v2i64";
case MVT::v4i64: return "v4i64";
case MVT::v2f32: return "v2f32";
case MVT::v4f32: return "v4f32";
case MVT::v8f32: return "v8f32";
case MVT::v2f64: return "v2f64";
case MVT::v4f64: return "v4f64";
}
}
@ -132,64 +132,64 @@ std::string EVT::getEVTString() const {
/// specified EVT. For integer types, this returns an unsigned type. Note
/// that this will abort for types that cannot be represented.
const Type *EVT::getTypeForEVT() const {
switch (V) {
switch (V.SimpleTy) {
default:
assert(isExtended() && "Type is not extended!");
return LLVMTy;
case EVT::isVoid: return Type::VoidTy;
case EVT::i1: return Type::Int1Ty;
case EVT::i8: return Type::Int8Ty;
case EVT::i16: return Type::Int16Ty;
case EVT::i32: return Type::Int32Ty;
case EVT::i64: return Type::Int64Ty;
case EVT::i128: return IntegerType::get(128);
case EVT::f32: return Type::FloatTy;
case EVT::f64: return Type::DoubleTy;
case EVT::f80: return Type::X86_FP80Ty;
case EVT::f128: return Type::FP128Ty;
case EVT::ppcf128: return Type::PPC_FP128Ty;
case EVT::v2i8: return VectorType::get(Type::Int8Ty, 2);
case EVT::v4i8: return VectorType::get(Type::Int8Ty, 4);
case EVT::v8i8: return VectorType::get(Type::Int8Ty, 8);
case EVT::v16i8: return VectorType::get(Type::Int8Ty, 16);
case EVT::v32i8: return VectorType::get(Type::Int8Ty, 32);
case EVT::v2i16: return VectorType::get(Type::Int16Ty, 2);
case EVT::v4i16: return VectorType::get(Type::Int16Ty, 4);
case EVT::v8i16: return VectorType::get(Type::Int16Ty, 8);
case EVT::v16i16: return VectorType::get(Type::Int16Ty, 16);
case EVT::v2i32: return VectorType::get(Type::Int32Ty, 2);
case EVT::v4i32: return VectorType::get(Type::Int32Ty, 4);
case EVT::v8i32: return VectorType::get(Type::Int32Ty, 8);
case EVT::v1i64: return VectorType::get(Type::Int64Ty, 1);
case EVT::v2i64: return VectorType::get(Type::Int64Ty, 2);
case EVT::v4i64: return VectorType::get(Type::Int64Ty, 4);
case EVT::v2f32: return VectorType::get(Type::FloatTy, 2);
case EVT::v4f32: return VectorType::get(Type::FloatTy, 4);
case EVT::v8f32: return VectorType::get(Type::FloatTy, 8);
case EVT::v2f64: return VectorType::get(Type::DoubleTy, 2);
case EVT::v4f64: return VectorType::get(Type::DoubleTy, 4);
case MVT::isVoid: return Type::VoidTy;
case MVT::i1: return Type::Int1Ty;
case MVT::i8: return Type::Int8Ty;
case MVT::i16: return Type::Int16Ty;
case MVT::i32: return Type::Int32Ty;
case MVT::i64: return Type::Int64Ty;
case MVT::i128: return IntegerType::get(128);
case MVT::f32: return Type::FloatTy;
case MVT::f64: return Type::DoubleTy;
case MVT::f80: return Type::X86_FP80Ty;
case MVT::f128: return Type::FP128Ty;
case MVT::ppcf128: return Type::PPC_FP128Ty;
case MVT::v2i8: return VectorType::get(Type::Int8Ty, 2);
case MVT::v4i8: return VectorType::get(Type::Int8Ty, 4);
case MVT::v8i8: return VectorType::get(Type::Int8Ty, 8);
case MVT::v16i8: return VectorType::get(Type::Int8Ty, 16);
case MVT::v32i8: return VectorType::get(Type::Int8Ty, 32);
case MVT::v2i16: return VectorType::get(Type::Int16Ty, 2);
case MVT::v4i16: return VectorType::get(Type::Int16Ty, 4);
case MVT::v8i16: return VectorType::get(Type::Int16Ty, 8);
case MVT::v16i16: return VectorType::get(Type::Int16Ty, 16);
case MVT::v2i32: return VectorType::get(Type::Int32Ty, 2);
case MVT::v4i32: return VectorType::get(Type::Int32Ty, 4);
case MVT::v8i32: return VectorType::get(Type::Int32Ty, 8);
case MVT::v1i64: return VectorType::get(Type::Int64Ty, 1);
case MVT::v2i64: return VectorType::get(Type::Int64Ty, 2);
case MVT::v4i64: return VectorType::get(Type::Int64Ty, 4);
case MVT::v2f32: return VectorType::get(Type::FloatTy, 2);
case MVT::v4f32: return VectorType::get(Type::FloatTy, 4);
case MVT::v8f32: return VectorType::get(Type::FloatTy, 8);
case MVT::v2f64: return VectorType::get(Type::DoubleTy, 2);
case MVT::v4f64: return VectorType::get(Type::DoubleTy, 4);
}
}
/// getEVT - Return the value type corresponding to the specified type. This
/// returns all pointers as EVT::iPTR. If HandleUnknown is true, unknown types
/// returns all pointers as MVT::iPTR. If HandleUnknown is true, unknown types
/// are returned as Other, otherwise they are invalid.
EVT EVT::getEVT(const Type *Ty, bool HandleUnknown){
switch (Ty->getTypeID()) {
default:
if (HandleUnknown) return EVT::Other;
if (HandleUnknown) return MVT(MVT::Other);
llvm_unreachable("Unknown type!");
return EVT::isVoid;
return MVT(MVT::isVoid);
case Type::VoidTyID:
return EVT::isVoid;
return MVT(MVT::isVoid);
case Type::IntegerTyID:
return getIntegerVT(cast<IntegerType>(Ty)->getBitWidth());
case Type::FloatTyID: return EVT::f32;
case Type::DoubleTyID: return EVT::f64;
case Type::X86_FP80TyID: return EVT::f80;
case Type::FP128TyID: return EVT::f128;
case Type::PPC_FP128TyID: return EVT::ppcf128;
case Type::PointerTyID: return EVT::iPTR;
case Type::FloatTyID: return MVT(MVT::f32);
case Type::DoubleTyID: return MVT(MVT::f64);
case Type::X86_FP80TyID: return MVT(MVT::f80);
case Type::FP128TyID: return MVT(MVT::f128);
case Type::PPC_FP128TyID: return MVT(MVT::ppcf128);
case Type::PointerTyID: return MVT(MVT::iPTR);
case Type::VectorTyID: {
const VectorType *VTy = cast<VectorType>(Ty);
return getVectorVT(getEVT(VTy->getElementType(), false),

View File

@ -1559,7 +1559,7 @@ bool Verifier::PerformTypeCheck(Intrinsic::ID ID, Function *F, const Type *Ty,
return false;
}
}
} else if (VT == EVT::iAny) {
} else if (VT == MVT::iAny) {
if (!EltTy->isInteger()) {
CheckFailed(IntrinsicParam(ArgNo, NumRets) + " is not "
"an integer type.", F);
@ -1584,7 +1584,7 @@ bool Verifier::PerformTypeCheck(Intrinsic::ID ID, Function *F, const Type *Ty,
}
break;
}
} else if (VT == EVT::fAny) {
} else if (VT == MVT::fAny) {
if (!EltTy->isFloatingPoint()) {
CheckFailed(IntrinsicParam(ArgNo, NumRets) + " is not "
"a floating-point type.", F);
@ -1597,19 +1597,19 @@ bool Verifier::PerformTypeCheck(Intrinsic::ID ID, Function *F, const Type *Ty,
Suffix += "v" + utostr(NumElts);
Suffix += EVT::getEVT(EltTy).getEVTString();
} else if (VT == EVT::vAny) {
} else if (VT == MVT::vAny) {
if (!VTy) {
CheckFailed(IntrinsicParam(ArgNo, NumRets) + " is not a vector type.", F);
return false;
}
Suffix += ".v" + utostr(NumElts) + EVT::getEVT(EltTy).getEVTString();
} else if (VT == EVT::iPTR) {
} else if (VT == MVT::iPTR) {
if (!isa<PointerType>(Ty)) {
CheckFailed(IntrinsicParam(ArgNo, NumRets) + " is not a "
"pointer and a pointer is required.", F);
return false;
}
} else if (VT == EVT::iPTRAny) {
} else if (VT == MVT::iPTRAny) {
// Outside of TableGen, we don't distinguish iPTRAny (to any address space)
// and iPTR. In the verifier, we can not distinguish which case we have so
// allow either case to be legal.
@ -1621,8 +1621,8 @@ bool Verifier::PerformTypeCheck(Intrinsic::ID ID, Function *F, const Type *Ty,
"pointer and a pointer is required.", F);
return false;
}
} else if (EVT((EVT::SimpleValueType)VT).isVector()) {
EVT VVT = EVT((EVT::SimpleValueType)VT);
} else if (EVT((MVT::SimpleValueType)VT).isVector()) {
EVT VVT = EVT((MVT::SimpleValueType)VT);
// If this is a vector argument, verify the number and type of elements.
if (VVT.getVectorElementType() != EVT::getEVT(EltTy)) {
@ -1635,7 +1635,7 @@ bool Verifier::PerformTypeCheck(Intrinsic::ID ID, Function *F, const Type *Ty,
"vector elements!", F);
return false;
}
} else if (EVT((EVT::SimpleValueType)VT).getTypeForEVT() != EltTy) {
} else if (EVT((MVT::SimpleValueType)VT).getTypeForEVT() != EltTy) {
CheckFailed(IntrinsicParam(ArgNo, NumRets) + " is wrong!", F);
return false;
} else if (EltTy != Ty) {
@ -1677,7 +1677,7 @@ void Verifier::VerifyIntrinsicPrototype(Intrinsic::ID ID, Function *F,
}
for (unsigned ArgNo = 0; ArgNo < RetNum; ++ArgNo) {
int VT = va_arg(VA, int); // An EVT::SimpleValueType when non-negative.
int VT = va_arg(VA, int); // An MVT::SimpleValueType when non-negative.
if (ST) Ty = ST->getElementType(ArgNo);
@ -1687,9 +1687,9 @@ void Verifier::VerifyIntrinsicPrototype(Intrinsic::ID ID, Function *F,
// Verify the parameter types.
for (unsigned ArgNo = 0; ArgNo < ParamNum; ++ArgNo) {
int VT = va_arg(VA, int); // An EVT::SimpleValueType when non-negative.
int VT = va_arg(VA, int); // An MVT::SimpleValueType when non-negative.
if (VT == EVT::isVoid && ArgNo > 0) {
if (VT == MVT::isVoid && ArgNo > 0) {
if (!FTy->isVarArg())
CheckFailed("Intrinsic prototype has no '...'!", F);
break;

View File

@ -27,9 +27,9 @@ using namespace llvm;
/// FilterVTs - Filter a list of VT's according to a predicate.
///
template<typename T>
static std::vector<EVT::SimpleValueType>
FilterVTs(const std::vector<EVT::SimpleValueType> &InVTs, T Filter) {
std::vector<EVT::SimpleValueType> Result;
static std::vector<MVT::SimpleValueType>
FilterVTs(const std::vector<MVT::SimpleValueType> &InVTs, T Filter) {
std::vector<MVT::SimpleValueType> Result;
for (unsigned i = 0, e = InVTs.size(); i != e; ++i)
if (Filter(InVTs[i]))
Result.push_back(InVTs[i]);
@ -41,28 +41,28 @@ static std::vector<unsigned char>
FilterEVTs(const std::vector<unsigned char> &InVTs, T Filter) {
std::vector<unsigned char> Result;
for (unsigned i = 0, e = InVTs.size(); i != e; ++i)
if (Filter((EVT::SimpleValueType)InVTs[i]))
if (Filter((MVT::SimpleValueType)InVTs[i]))
Result.push_back(InVTs[i]);
return Result;
}
static std::vector<unsigned char>
ConvertVTs(const std::vector<EVT::SimpleValueType> &InVTs) {
ConvertVTs(const std::vector<MVT::SimpleValueType> &InVTs) {
std::vector<unsigned char> Result;
for (unsigned i = 0, e = InVTs.size(); i != e; ++i)
Result.push_back(InVTs[i]);
return Result;
}
static inline bool isInteger(EVT::SimpleValueType VT) {
static inline bool isInteger(MVT::SimpleValueType VT) {
return EVT(VT).isInteger();
}
static inline bool isFloatingPoint(EVT::SimpleValueType VT) {
static inline bool isFloatingPoint(MVT::SimpleValueType VT) {
return EVT(VT).isFloatingPoint();
}
static inline bool isVector(EVT::SimpleValueType VT) {
static inline bool isVector(MVT::SimpleValueType VT) {
return EVT(VT).isVector();
}
@ -261,11 +261,11 @@ bool SDTypeConstraint::ApplyTypeConstraint(TreePatternNode *N,
return NodeToApply->UpdateNodeType(x.SDTCisVT_Info.VT, TP);
case SDTCisPtrTy: {
// Operand must be same as target pointer type.
return NodeToApply->UpdateNodeType(EVT::iPTR, TP);
return NodeToApply->UpdateNodeType(MVT::iPTR, TP);
}
case SDTCisInt: {
// If there is only one integer type supported, this must be it.
std::vector<EVT::SimpleValueType> IntVTs =
std::vector<MVT::SimpleValueType> IntVTs =
FilterVTs(CGT.getLegalValueTypes(), isInteger);
// If we found exactly one supported integer type, apply it.
@ -275,7 +275,7 @@ bool SDTypeConstraint::ApplyTypeConstraint(TreePatternNode *N,
}
case SDTCisFP: {
// If there is only one FP type supported, this must be it.
std::vector<EVT::SimpleValueType> FPVTs =
std::vector<MVT::SimpleValueType> FPVTs =
FilterVTs(CGT.getLegalValueTypes(), isFloatingPoint);
// If we found exactly one supported FP type, apply it.
@ -297,7 +297,7 @@ bool SDTypeConstraint::ApplyTypeConstraint(TreePatternNode *N,
!static_cast<DefInit*>(NodeToApply->getLeafValue())->getDef()
->isSubClassOf("ValueType"))
TP.error(N->getOperator()->getName() + " expects a VT operand!");
EVT::SimpleValueType VT =
MVT::SimpleValueType VT =
getValueType(static_cast<DefInit*>(NodeToApply->getLeafValue())->getDef());
if (!isInteger(VT))
TP.error(N->getOperator()->getName() + " VT operand must be integer!");
@ -314,7 +314,7 @@ bool SDTypeConstraint::ApplyTypeConstraint(TreePatternNode *N,
// types at this point.
assert(OtherNode->getExtTypes().size() == 1 && "Node has too many types!");
if (OtherNode->hasTypeSet() && OtherNode->getTypeNum(0) <= VT)
OtherNode->UpdateNodeType(EVT::Other, TP); // Throw an error.
OtherNode->UpdateNodeType(MVT::Other, TP); // Throw an error.
return false;
}
case SDTCisOpSmallerThanOp: {
@ -341,7 +341,7 @@ bool SDTypeConstraint::ApplyTypeConstraint(TreePatternNode *N,
else if (EEVT::isExtFloatingPointInVTs(BigOperand->getExtTypes()))
MadeChange |= NodeToApply->UpdateNodeType(EEVT::isFP, TP);
std::vector<EVT::SimpleValueType> VTs = CGT.getLegalValueTypes();
std::vector<MVT::SimpleValueType> VTs = CGT.getLegalValueTypes();
if (EEVT::isExtIntegerInVTs(NodeToApply->getExtTypes())) {
VTs = FilterVTs(VTs, isInteger);
@ -356,7 +356,7 @@ bool SDTypeConstraint::ApplyTypeConstraint(TreePatternNode *N,
case 0: break; // No info yet.
case 1:
// Only one VT of this flavor. Cannot ever satisfy the constraints.
return NodeToApply->UpdateNodeType(EVT::Other, TP); // throw
return NodeToApply->UpdateNodeType(MVT::Other, TP); // throw
case 2:
// If we have exactly two possible types, the little operand must be the
// small one, the big operand should be the big one. Common with
@ -377,7 +377,7 @@ bool SDTypeConstraint::ApplyTypeConstraint(TreePatternNode *N,
TP.error(N->getOperator()->getName() + " VT operand must be a vector!");
EVT IVT = OtherOperand->getTypeNum(0);
IVT = IVT.getVectorElementType();
return NodeToApply->UpdateNodeType(IVT.getSimpleVT(), TP);
return NodeToApply->UpdateNodeType(IVT.getSimpleVT().SimpleTy, TP);
}
return false;
}
@ -459,8 +459,8 @@ bool TreePatternNode::UpdateNodeType(const std::vector<unsigned char> &ExtVTs,
return true;
}
if (getExtTypeNum(0) == EVT::iPTR || getExtTypeNum(0) == EVT::iPTRAny) {
if (ExtVTs[0] == EVT::iPTR || ExtVTs[0] == EVT::iPTRAny ||
if (getExtTypeNum(0) == MVT::iPTR || getExtTypeNum(0) == MVT::iPTRAny) {
if (ExtVTs[0] == MVT::iPTR || ExtVTs[0] == MVT::iPTRAny ||
ExtVTs[0] == EEVT::isInt)
return false;
if (EEVT::isExtIntegerInVTs(ExtVTs)) {
@ -472,7 +472,7 @@ bool TreePatternNode::UpdateNodeType(const std::vector<unsigned char> &ExtVTs,
}
}
if ((ExtVTs[0] == EEVT::isInt || ExtVTs[0] == EVT::iAny) &&
if ((ExtVTs[0] == EEVT::isInt || ExtVTs[0] == MVT::iAny) &&
EEVT::isExtIntegerInVTs(getExtTypes())) {
assert(hasTypeSet() && "should be handled above!");
std::vector<unsigned char> FVTs = FilterEVTs(getExtTypes(), isInteger);
@ -481,7 +481,7 @@ bool TreePatternNode::UpdateNodeType(const std::vector<unsigned char> &ExtVTs,
setTypes(FVTs);
return true;
}
if ((ExtVTs[0] == EVT::iPTR || ExtVTs[0] == EVT::iPTRAny) &&
if ((ExtVTs[0] == MVT::iPTR || ExtVTs[0] == MVT::iPTRAny) &&
EEVT::isExtIntegerInVTs(getExtTypes())) {
//assert(hasTypeSet() && "should be handled above!");
std::vector<unsigned char> FVTs = FilterEVTs(getExtTypes(), isInteger);
@ -492,7 +492,7 @@ bool TreePatternNode::UpdateNodeType(const std::vector<unsigned char> &ExtVTs,
return true;
}
}
if ((ExtVTs[0] == EEVT::isFP || ExtVTs[0] == EVT::fAny) &&
if ((ExtVTs[0] == EEVT::isFP || ExtVTs[0] == MVT::fAny) &&
EEVT::isExtFloatingPointInVTs(getExtTypes())) {
assert(hasTypeSet() && "should be handled above!");
std::vector<unsigned char> FVTs =
@ -502,7 +502,7 @@ bool TreePatternNode::UpdateNodeType(const std::vector<unsigned char> &ExtVTs,
setTypes(FVTs);
return true;
}
if (ExtVTs[0] == EVT::vAny && EEVT::isExtVectorInVTs(getExtTypes())) {
if (ExtVTs[0] == MVT::vAny && EEVT::isExtVectorInVTs(getExtTypes())) {
assert(hasTypeSet() && "should be handled above!");
std::vector<unsigned char> FVTs = FilterEVTs(getExtTypes(), isVector);
if (getExtTypes() == FVTs)
@ -516,17 +516,17 @@ bool TreePatternNode::UpdateNodeType(const std::vector<unsigned char> &ExtVTs,
//
// Similarly, we should probably set the type here to the intersection of
// {isInt|isFP} and ExtVTs
if (((getExtTypeNum(0) == EEVT::isInt || getExtTypeNum(0) == EVT::iAny) &&
if (((getExtTypeNum(0) == EEVT::isInt || getExtTypeNum(0) == MVT::iAny) &&
EEVT::isExtIntegerInVTs(ExtVTs)) ||
((getExtTypeNum(0) == EEVT::isFP || getExtTypeNum(0) == EVT::fAny) &&
((getExtTypeNum(0) == EEVT::isFP || getExtTypeNum(0) == MVT::fAny) &&
EEVT::isExtFloatingPointInVTs(ExtVTs)) ||
(getExtTypeNum(0) == EVT::vAny &&
(getExtTypeNum(0) == MVT::vAny &&
EEVT::isExtVectorInVTs(ExtVTs))) {
setTypes(ExtVTs);
return true;
}
if (getExtTypeNum(0) == EEVT::isInt &&
(ExtVTs[0] == EVT::iPTR || ExtVTs[0] == EVT::iPTRAny)) {
(ExtVTs[0] == MVT::iPTR || ExtVTs[0] == MVT::iPTRAny)) {
setTypes(ExtVTs);
return true;
}
@ -553,16 +553,16 @@ void TreePatternNode::print(raw_ostream &OS) const {
// FIXME: At some point we should handle printing all the value types for
// nodes that are multiply typed.
switch (getExtTypeNum(0)) {
case EVT::Other: OS << ":Other"; break;
case MVT::Other: OS << ":Other"; break;
case EEVT::isInt: OS << ":isInt"; break;
case EEVT::isFP : OS << ":isFP"; break;
case EEVT::isUnknown: ; /*OS << ":?";*/ break;
case EVT::iPTR: OS << ":iPTR"; break;
case EVT::iPTRAny: OS << ":iPTRAny"; break;
case MVT::iPTR: OS << ":iPTR"; break;
case MVT::iPTRAny: OS << ":iPTRAny"; break;
default: {
std::string VTName = llvm::getName(getTypeNum(0));
// Strip off EVT:: prefix if present.
if (VTName.substr(0,5) == "EVT::")
if (VTName.substr(0,5) == "MVT::")
VTName = VTName.substr(5);
OS << ":" << VTName;
break;
@ -744,7 +744,7 @@ static std::vector<unsigned char> getImplicitType(Record *R, bool NotRegisters,
TreePattern &TP) {
// Some common return values
std::vector<unsigned char> Unknown(1, EEVT::isUnknown);
std::vector<unsigned char> Other(1, EVT::Other);
std::vector<unsigned char> Other(1, MVT::Other);
// Check to see if this is a register or a register class...
if (R->isSubClassOf("RegisterClass")) {
@ -771,7 +771,7 @@ static std::vector<unsigned char> getImplicitType(Record *R, bool NotRegisters,
ComplexPat(1, TP.getDAGPatterns().getComplexPattern(R).getValueType());
return ComplexPat;
} else if (R->isSubClassOf("PointerLikeRegClass")) {
Other[0] = EVT::iPTR;
Other[0] = MVT::iPTR;
return Other;
} else if (R->getName() == "node" || R->getName() == "srcvalue" ||
R->getName() == "zero_reg") {
@ -827,12 +827,12 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
// multiple types. Assert here that it does not, so we revisit this
// code when appropriate.
assert(getExtTypes().size() >= 1 && "TreePattern doesn't have a type!");
EVT::SimpleValueType VT = getTypeNum(0);
MVT::SimpleValueType VT = getTypeNum(0);
for (unsigned i = 1, e = getExtTypes().size(); i != e; ++i)
assert(getTypeNum(i) == VT && "TreePattern has too many types!");
VT = getTypeNum(0);
if (VT != EVT::iPTR && VT != EVT::iPTRAny) {
if (VT != MVT::iPTR && VT != MVT::iPTRAny) {
unsigned Size = EVT(VT).getSizeInBits();
// Make sure that the value is representable for this type.
if (Size < 32) {
@ -873,7 +873,7 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
TP);
MadeChange |= getChild(NC-1)->UpdateNodeType(getChild(i)->getExtTypes(),
TP);
MadeChange |= UpdateNodeType(EVT::isVoid, TP);
MadeChange |= UpdateNodeType(MVT::isVoid, TP);
}
return MadeChange;
} else if (getOperator()->getName() == "implicit" ||
@ -881,7 +881,7 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
bool MadeChange = false;
for (unsigned i = 0; i < getNumChildren(); ++i)
MadeChange = getChild(i)->ApplyTypeConstraints(TP, NotRegisters);
MadeChange |= UpdateNodeType(EVT::isVoid, TP);
MadeChange |= UpdateNodeType(MVT::isVoid, TP);
return MadeChange;
} else if (getOperator()->getName() == "COPY_TO_REGCLASS") {
bool MadeChange = false;
@ -905,10 +905,10 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
utostr(getNumChildren() - 1) + " operands!");
// Apply type info to the intrinsic ID.
MadeChange |= getChild(0)->UpdateNodeType(EVT::iPTR, TP);
MadeChange |= getChild(0)->UpdateNodeType(MVT::iPTR, TP);
for (unsigned i = NumRetVTs, e = getNumChildren(); i != e; ++i) {
EVT::SimpleValueType OpVT = Int->IS.ParamVTs[i - NumRetVTs];
MVT::SimpleValueType OpVT = Int->IS.ParamVTs[i - NumRetVTs];
MadeChange |= getChild(i)->UpdateNodeType(OpVT, TP);
MadeChange |= getChild(i)->ApplyTypeConstraints(TP, NotRegisters);
}
@ -922,7 +922,7 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
// Branch, etc. do not produce results and top-level forms in instr pattern
// must have void types.
if (NI.getNumResults() == 0)
MadeChange |= UpdateNodeType(EVT::isVoid, TP);
MadeChange |= UpdateNodeType(MVT::isVoid, TP);
return MadeChange;
} else if (getOperator()->isSubClassOf("Instruction")) {
@ -937,13 +937,13 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
CDP.getTargetInfo().getInstruction(getOperator()->getName());
// Apply the result type to the node
if (NumResults == 0 || InstInfo.NumDefs == 0) {
MadeChange = UpdateNodeType(EVT::isVoid, TP);
MadeChange = UpdateNodeType(MVT::isVoid, TP);
} else {
Record *ResultNode = Inst.getResult(0);
if (ResultNode->isSubClassOf("PointerLikeRegClass")) {
std::vector<unsigned char> VT;
VT.push_back(EVT::iPTR);
VT.push_back(MVT::iPTR);
MadeChange = UpdateNodeType(VT, TP);
} else if (ResultNode->getName() == "unknown") {
std::vector<unsigned char> VT;
@ -976,7 +976,7 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
TP.error("Instruction '" + getOperator()->getName() +
"' expects more operands than were provided.");
EVT::SimpleValueType VT;
MVT::SimpleValueType VT;
TreePatternNode *Child = getChild(ChildNo++);
if (OperandNode->isSubClassOf("RegisterClass")) {
const CodeGenRegisterClass &RC =
@ -986,7 +986,7 @@ bool TreePatternNode::ApplyTypeConstraints(TreePattern &TP, bool NotRegisters) {
VT = getValueType(OperandNode->getValueAsDef("Type"));
MadeChange |= Child->UpdateNodeType(VT, TP);
} else if (OperandNode->isSubClassOf("PointerLikeRegClass")) {
MadeChange |= Child->UpdateNodeType(EVT::iPTR, TP);
MadeChange |= Child->UpdateNodeType(MVT::iPTR, TP);
} else if (OperandNode->getName() == "unknown") {
MadeChange |= Child->UpdateNodeType(EEVT::isUnknown, TP);
} else {
@ -1227,7 +1227,7 @@ TreePatternNode *TreePattern::ParseTreePattern(DagInit *Dag) {
// If this intrinsic returns void, it must have side-effects and thus a
// chain.
if (Int.IS.RetVTs[0] == EVT::isVoid) {
if (Int.IS.RetVTs[0] == MVT::isVoid) {
Operator = getDAGPatterns().get_intrinsic_void_sdnode();
} else if (Int.ModRef != CodeGenIntrinsic::NoMem) {
// Has side-effects, requires chain.
@ -1585,7 +1585,7 @@ FindPatternInputsAndOutputs(TreePattern *I, TreePatternNode *Pat,
// If this is not a set, verify that the children nodes are not void typed,
// and recurse.
for (unsigned i = 0, e = Pat->getNumChildren(); i != e; ++i) {
if (Pat->getChild(i)->getExtTypeNum(0) == EVT::isVoid)
if (Pat->getChild(i)->getExtTypeNum(0) == MVT::isVoid)
I->error("Cannot have void nodes inside of patterns!");
FindPatternInputsAndOutputs(I, Pat->getChild(i), InstInputs, InstResults,
InstImpInputs, InstImpResults);
@ -1833,7 +1833,7 @@ void CodeGenDAGPatterns::ParseInstructions() {
// fill in the InstResults map.
for (unsigned j = 0, e = I->getNumTrees(); j != e; ++j) {
TreePatternNode *Pat = I->getTree(j);
if (Pat->getExtTypeNum(0) != EVT::isVoid)
if (Pat->getExtTypeNum(0) != MVT::isVoid)
I->error("Top-level forms in instruction pattern should have"
" void types");

View File

@ -34,10 +34,10 @@ namespace llvm {
class ComplexPattern;
/// EEVT::DAGISelGenValueType - These are some extended forms of
/// EVT::SimpleValueType that we use as lattice values during type inference.
/// MVT::SimpleValueType that we use as lattice values during type inference.
namespace EEVT {
enum DAGISelGenValueType {
isFP = EVT::LAST_VALUETYPE,
isFP = MVT::LAST_VALUETYPE,
isInt,
isUnknown
};
@ -181,19 +181,19 @@ public:
bool isLeaf() const { return Val != 0; }
bool hasTypeSet() const {
return (Types[0] < EVT::LAST_VALUETYPE) || (Types[0] == EVT::iPTR) ||
(Types[0] == EVT::iPTRAny);
return (Types[0] < MVT::LAST_VALUETYPE) || (Types[0] == MVT::iPTR) ||
(Types[0] == MVT::iPTRAny);
}
bool isTypeCompletelyUnknown() const {
return Types[0] == EEVT::isUnknown;
}
bool isTypeDynamicallyResolved() const {
return (Types[0] == EVT::iPTR) || (Types[0] == EVT::iPTRAny);
return (Types[0] == MVT::iPTR) || (Types[0] == MVT::iPTRAny);
}
EVT::SimpleValueType getTypeNum(unsigned Num) const {
MVT::SimpleValueType getTypeNum(unsigned Num) const {
assert(hasTypeSet() && "Doesn't have a type yet!");
assert(Types.size() > Num && "Type num out of range!");
return (EVT::SimpleValueType)Types[Num];
return (MVT::SimpleValueType)Types[Num];
}
unsigned char getExtTypeNum(unsigned Num) const {
assert(Types.size() > Num && "Extended type num out of range!");

View File

@ -37,20 +37,20 @@ namespace llvm {
/// continues from there through the parameter list. This is useful for
/// "matching" types.
struct IntrinsicSignature {
/// RetVTs - The EVT::SimpleValueType for each return type. Note that this
/// RetVTs - The MVT::SimpleValueType for each return type. Note that this
/// list is only populated when in the context of a target .td file. When
/// building Intrinsics.td, this isn't available, because we don't know
/// the target pointer size.
std::vector<EVT::SimpleValueType> RetVTs;
std::vector<MVT::SimpleValueType> RetVTs;
/// RetTypeDefs - The records for each return type.
std::vector<Record*> RetTypeDefs;
/// ParamVTs - The EVT::SimpleValueType for each parameter type. Note that
/// ParamVTs - The MVT::SimpleValueType for each parameter type. Note that
/// this list is only populated when in the context of a target .td file.
/// When building Intrinsics.td, this isn't available, because we don't
/// know the target pointer size.
std::vector<EVT::SimpleValueType> ParamVTs;
std::vector<MVT::SimpleValueType> ParamVTs;
/// ParamTypeDefs - The records for each parameter type.
std::vector<Record*> ParamTypeDefs;

View File

@ -36,7 +36,7 @@ namespace llvm {
Record *TheDef;
std::string Namespace;
std::vector<Record*> Elements;
std::vector<EVT::SimpleValueType> VTs;
std::vector<MVT::SimpleValueType> VTs;
unsigned SpillSize;
unsigned SpillAlignment;
int CopyCost;
@ -44,10 +44,10 @@ namespace llvm {
std::string MethodProtos, MethodBodies;
const std::string &getName() const;
const std::vector<EVT::SimpleValueType> &getValueTypes() const {return VTs;}
const std::vector<MVT::SimpleValueType> &getValueTypes() const {return VTs;}
unsigned getNumValueTypes() const { return VTs.size(); }
EVT::SimpleValueType getValueTypeNum(unsigned VTNum) const {
MVT::SimpleValueType getValueTypeNum(unsigned VTNum) const {
if (VTNum < VTs.size())
return VTs[VTNum];
assert(0 && "VTNum greater than number of ValueTypes in RegClass!");

View File

@ -30,63 +30,63 @@ static cl::opt<unsigned>
AsmWriterNum("asmwriternum", cl::init(0),
cl::desc("Make -gen-asm-writer emit assembly writer #N"));
/// getValueType - Return the EVT::SimpleValueType that the specified TableGen
/// getValueType - Return the MVT::SimpleValueType that the specified TableGen
/// record corresponds to.
EVT::SimpleValueType llvm::getValueType(Record *Rec) {
return (EVT::SimpleValueType)Rec->getValueAsInt("Value");
MVT::SimpleValueType llvm::getValueType(Record *Rec) {
return (MVT::SimpleValueType)Rec->getValueAsInt("Value");
}
std::string llvm::getName(EVT::SimpleValueType T) {
std::string llvm::getName(MVT::SimpleValueType T) {
switch (T) {
case EVT::Other: return "UNKNOWN";
case EVT::iPTR: return "TLI.getPointerTy()";
case EVT::iPTRAny: return "TLI.getPointerTy()";
case MVT::Other: return "UNKNOWN";
case MVT::iPTR: return "TLI.getPointerTy()";
case MVT::iPTRAny: return "TLI.getPointerTy()";
default: return getEnumName(T);
}
}
std::string llvm::getEnumName(EVT::SimpleValueType T) {
std::string llvm::getEnumName(MVT::SimpleValueType T) {
switch (T) {
case EVT::Other: return "EVT::Other";
case EVT::i1: return "EVT::i1";
case EVT::i8: return "EVT::i8";
case EVT::i16: return "EVT::i16";
case EVT::i32: return "EVT::i32";
case EVT::i64: return "EVT::i64";
case EVT::i128: return "EVT::i128";
case EVT::iAny: return "EVT::iAny";
case EVT::fAny: return "EVT::fAny";
case EVT::vAny: return "EVT::vAny";
case EVT::f32: return "EVT::f32";
case EVT::f64: return "EVT::f64";
case EVT::f80: return "EVT::f80";
case EVT::f128: return "EVT::f128";
case EVT::ppcf128: return "EVT::ppcf128";
case EVT::Flag: return "EVT::Flag";
case EVT::isVoid:return "EVT::isVoid";
case EVT::v2i8: return "EVT::v2i8";
case EVT::v4i8: return "EVT::v4i8";
case EVT::v8i8: return "EVT::v8i8";
case EVT::v16i8: return "EVT::v16i8";
case EVT::v32i8: return "EVT::v32i8";
case EVT::v2i16: return "EVT::v2i16";
case EVT::v4i16: return "EVT::v4i16";
case EVT::v8i16: return "EVT::v8i16";
case EVT::v16i16: return "EVT::v16i16";
case EVT::v2i32: return "EVT::v2i32";
case EVT::v4i32: return "EVT::v4i32";
case EVT::v8i32: return "EVT::v8i32";
case EVT::v1i64: return "EVT::v1i64";
case EVT::v2i64: return "EVT::v2i64";
case EVT::v4i64: return "EVT::v4i64";
case EVT::v2f32: return "EVT::v2f32";
case EVT::v4f32: return "EVT::v4f32";
case EVT::v8f32: return "EVT::v8f32";
case EVT::v2f64: return "EVT::v2f64";
case EVT::v4f64: return "EVT::v4f64";
case EVT::Metadata: return "EVT::Metadata";
case EVT::iPTR: return "EVT::iPTR";
case EVT::iPTRAny: return "EVT::iPTRAny";
case MVT::Other: return "MVT::Other";
case MVT::i1: return "MVT::i1";
case MVT::i8: return "MVT::i8";
case MVT::i16: return "MVT::i16";
case MVT::i32: return "MVT::i32";
case MVT::i64: return "MVT::i64";
case MVT::i128: return "MVT::i128";
case MVT::iAny: return "MVT::iAny";
case MVT::fAny: return "MVT::fAny";
case MVT::vAny: return "MVT::vAny";
case MVT::f32: return "MVT::f32";
case MVT::f64: return "MVT::f64";
case MVT::f80: return "MVT::f80";
case MVT::f128: return "MVT::f128";
case MVT::ppcf128: return "MVT::ppcf128";
case MVT::Flag: return "MVT::Flag";
case MVT::isVoid:return "MVT::isVoid";
case MVT::v2i8: return "MVT::v2i8";
case MVT::v4i8: return "MVT::v4i8";
case MVT::v8i8: return "MVT::v8i8";
case MVT::v16i8: return "MVT::v16i8";
case MVT::v32i8: return "MVT::v32i8";
case MVT::v2i16: return "MVT::v2i16";
case MVT::v4i16: return "MVT::v4i16";
case MVT::v8i16: return "MVT::v8i16";
case MVT::v16i16: return "MVT::v16i16";
case MVT::v2i32: return "MVT::v2i32";
case MVT::v4i32: return "MVT::v4i32";
case MVT::v8i32: return "MVT::v8i32";
case MVT::v1i64: return "MVT::v1i64";
case MVT::v2i64: return "MVT::v2i64";
case MVT::v4i64: return "MVT::v4i64";
case MVT::v2f32: return "MVT::v2f32";
case MVT::v4f32: return "MVT::v4f32";
case MVT::v8f32: return "MVT::v8f32";
case MVT::v2f64: return "MVT::v2f64";
case MVT::v4f64: return "MVT::v4f64";
case MVT::Metadata: return "MVT::Metadata";
case MVT::iPTR: return "MVT::iPTR";
case MVT::iPTRAny: return "MVT::iPTRAny";
default: assert(0 && "ILLEGAL VALUE TYPE!"); return "";
}
}
@ -191,7 +191,7 @@ std::vector<unsigned char> CodeGenTarget::getRegisterVTs(Record *R) const {
const CodeGenRegisterClass &RC = RegisterClasses[i];
for (unsigned ei = 0, ee = RC.Elements.size(); ei != ee; ++ei) {
if (R == RC.Elements[ei]) {
const std::vector<EVT::SimpleValueType> &InVTs = RC.getValueTypes();
const std::vector<MVT::SimpleValueType> &InVTs = RC.getValueTypes();
for (unsigned i = 0, e = InVTs.size(); i != e; ++i)
Result.push_back(InVTs[i]);
}
@ -477,12 +477,12 @@ CodeGenIntrinsic::CodeGenIntrinsic(Record *R) {
}
// Parse the list of return types.
std::vector<EVT::SimpleValueType> OverloadedVTs;
std::vector<MVT::SimpleValueType> OverloadedVTs;
ListInit *TypeList = R->getValueAsListInit("RetTypes");
for (unsigned i = 0, e = TypeList->getSize(); i != e; ++i) {
Record *TyEl = TypeList->getElementAsRecord(i);
assert(TyEl->isSubClassOf("LLVMType") && "Expected a type!");
EVT::SimpleValueType VT;
MVT::SimpleValueType VT;
if (TyEl->isSubClassOf("LLVMMatchType")) {
unsigned MatchTy = TyEl->getValueAsInt("Number");
assert(MatchTy < OverloadedVTs.size() &&
@ -493,7 +493,7 @@ CodeGenIntrinsic::CodeGenIntrinsic(Record *R) {
// overloaded, all the types can be specified directly.
assert(((!TyEl->isSubClassOf("LLVMExtendedElementVectorType") &&
!TyEl->isSubClassOf("LLVMTruncatedElementVectorType")) ||
VT == EVT::iAny || VT == EVT::vAny) &&
VT == MVT::iAny || VT == MVT::vAny) &&
"Expected iAny or vAny type");
} else {
VT = getValueType(TyEl->getValueAsDef("VT"));
@ -514,7 +514,7 @@ CodeGenIntrinsic::CodeGenIntrinsic(Record *R) {
for (unsigned i = 0, e = TypeList->getSize(); i != e; ++i) {
Record *TyEl = TypeList->getElementAsRecord(i);
assert(TyEl->isSubClassOf("LLVMType") && "Expected a type!");
EVT::SimpleValueType VT;
MVT::SimpleValueType VT;
if (TyEl->isSubClassOf("LLVMMatchType")) {
unsigned MatchTy = TyEl->getValueAsInt("Number");
assert(MatchTy < OverloadedVTs.size() &&
@ -525,7 +525,7 @@ CodeGenIntrinsic::CodeGenIntrinsic(Record *R) {
// overloaded, all the types can be specified directly.
assert(((!TyEl->isSubClassOf("LLVMExtendedElementVectorType") &&
!TyEl->isSubClassOf("LLVMTruncatedElementVectorType")) ||
VT == EVT::iAny || VT == EVT::vAny) &&
VT == MVT::iAny || VT == MVT::vAny) &&
"Expected iAny or vAny type");
} else
VT = getValueType(TyEl->getValueAsDef("VT"));

View File

@ -49,12 +49,12 @@ enum SDNP {
// ComplexPattern attributes.
enum CPAttr { CPAttrParentAsRoot };
/// getValueType - Return the EVT::SimpleValueType that the specified TableGen
/// getValueType - Return the MVT::SimpleValueType that the specified TableGen
/// record corresponds to.
EVT::SimpleValueType getValueType(Record *Rec);
MVT::SimpleValueType getValueType(Record *Rec);
std::string getName(EVT::SimpleValueType T);
std::string getEnumName(EVT::SimpleValueType T);
std::string getName(MVT::SimpleValueType T);
std::string getEnumName(MVT::SimpleValueType T);
/// getQualifiedName - Return the name of the specified record, with a
/// namespace qualifier if the record contains one.
@ -68,7 +68,7 @@ class CodeGenTarget {
mutable std::map<std::string, CodeGenInstruction> Instructions;
mutable std::vector<CodeGenRegister> Registers;
mutable std::vector<CodeGenRegisterClass> RegisterClasses;
mutable std::vector<EVT::SimpleValueType> LegalValueTypes;
mutable std::vector<MVT::SimpleValueType> LegalValueTypes;
void ReadRegisters() const;
void ReadRegisterClasses() const;
void ReadInstructions() const;
@ -172,15 +172,15 @@ public:
/// specified physical register.
std::vector<unsigned char> getRegisterVTs(Record *R) const;
const std::vector<EVT::SimpleValueType> &getLegalValueTypes() const {
const std::vector<MVT::SimpleValueType> &getLegalValueTypes() const {
if (LegalValueTypes.empty()) ReadLegalValueTypes();
return LegalValueTypes;
}
/// isLegalValueType - Return true if the specified value type is natively
/// supported by the target (i.e. there are registers that directly hold it).
bool isLegalValueType(EVT::SimpleValueType VT) const {
const std::vector<EVT::SimpleValueType> &LegalVTs = getLegalValueTypes();
bool isLegalValueType(MVT::SimpleValueType VT) const {
const std::vector<MVT::SimpleValueType> &LegalVTs = getLegalValueTypes();
for (unsigned i = 0, e = LegalVTs.size(); i != e; ++i)
if (LegalVTs[i] == VT) return true;
return false;
@ -222,7 +222,7 @@ public:
/// ComplexPattern - ComplexPattern info, corresponding to the ComplexPattern
/// tablegen class in TargetSelectionDAG.td
class ComplexPattern {
EVT::SimpleValueType Ty;
MVT::SimpleValueType Ty;
unsigned NumOperands;
std::string SelectFunc;
std::vector<Record*> RootNodes;
@ -232,7 +232,7 @@ public:
ComplexPattern() : NumOperands(0) {};
ComplexPattern(Record *R);
EVT::SimpleValueType getValueType() const { return Ty; }
MVT::SimpleValueType getValueType() const { return Ty; }
unsigned getNumOperands() const { return NumOperands; }
const std::string &getSelectFunc() const { return SelectFunc; }
const std::vector<Record*> &getRootNodes() const {

View File

@ -59,10 +59,10 @@ static const ComplexPattern *NodeGetComplexPattern(TreePatternNode *N,
static unsigned getPatternSize(TreePatternNode *P, CodeGenDAGPatterns &CGP) {
assert((EEVT::isExtIntegerInVTs(P->getExtTypes()) ||
EEVT::isExtFloatingPointInVTs(P->getExtTypes()) ||
P->getExtTypeNum(0) == EVT::isVoid ||
P->getExtTypeNum(0) == EVT::Flag ||
P->getExtTypeNum(0) == EVT::iPTR ||
P->getExtTypeNum(0) == EVT::iPTRAny) &&
P->getExtTypeNum(0) == MVT::isVoid ||
P->getExtTypeNum(0) == MVT::Flag ||
P->getExtTypeNum(0) == MVT::iPTR ||
P->getExtTypeNum(0) == MVT::iPTRAny) &&
"Not a valid pattern node to size!");
unsigned Size = 3; // The node itself.
// If the root node is a ConstantSDNode, increases its size.
@ -87,7 +87,7 @@ static unsigned getPatternSize(TreePatternNode *P, CodeGenDAGPatterns &CGP) {
// Count children in the count if they are also nodes.
for (unsigned i = 0, e = P->getNumChildren(); i != e; ++i) {
TreePatternNode *Child = P->getChild(i);
if (!Child->isLeaf() && Child->getExtTypeNum(0) != EVT::Other)
if (!Child->isLeaf() && Child->getExtTypeNum(0) != MVT::Other)
Size += getPatternSize(Child, CGP);
else if (Child->isLeaf()) {
if (dynamic_cast<IntInit*>(Child->getLeafValue()))
@ -174,10 +174,10 @@ struct PatternSortingPredicate {
/// getRegisterValueType - Look up and return the ValueType of the specified
/// register. If the register is a member of multiple register classes which
/// have different associated types, return EVT::Other.
static EVT::SimpleValueType getRegisterValueType(Record *R, const CodeGenTarget &T) {
/// have different associated types, return MVT::Other.
static MVT::SimpleValueType getRegisterValueType(Record *R, const CodeGenTarget &T) {
bool FoundRC = false;
EVT::SimpleValueType VT = EVT::Other;
MVT::SimpleValueType VT = MVT::Other;
const std::vector<CodeGenRegisterClass> &RCs = T.getRegisterClasses();
std::vector<CodeGenRegisterClass>::const_iterator RC;
std::vector<Record*>::const_iterator Element;
@ -191,9 +191,9 @@ static EVT::SimpleValueType getRegisterValueType(Record *R, const CodeGenTarget
} else {
// In multiple RC's
if (VT != (*RC).getValueTypeNum(0)) {
// Types of the RC's do not agree. Return EVT::Other. The
// Types of the RC's do not agree. Return MVT::Other. The
// target is responsible for handling this.
return EVT::Other;
return MVT::Other;
}
}
}
@ -740,7 +740,7 @@ public:
} else if (LeafRec->isSubClassOf("ValueType")) {
// Make sure this is the specified value type.
emitCheck("cast<VTSDNode>(" + RootName +
")->getVT() == EVT::" + LeafRec->getName());
")->getVT() == MVT::" + LeafRec->getName());
} else if (LeafRec->isSubClassOf("CondCode")) {
// Make sure this is the specified cond code.
emitCheck("cast<CondCodeSDNode>(" + RootName +
@ -813,11 +813,11 @@ public:
errs() << "Cannot handle " << getEnumName(N->getTypeNum(0))
<< " type as an immediate constant. Aborting\n";
abort();
case EVT::i1: CastType = "bool"; break;
case EVT::i8: CastType = "unsigned char"; break;
case EVT::i16: CastType = "unsigned short"; break;
case EVT::i32: CastType = "unsigned"; break;
case EVT::i64: CastType = "uint64_t"; break;
case MVT::i1: CastType = "bool"; break;
case MVT::i8: CastType = "unsigned char"; break;
case MVT::i16: CastType = "unsigned short"; break;
case MVT::i32: CastType = "unsigned"; break;
case MVT::i64: CastType = "uint64_t"; break;
}
emitCode("SDValue " + TmpVar +
" = CurDAG->getTargetConstant(((" + CastType +
@ -921,7 +921,7 @@ public:
emitCode("SDValue Tmp" + utostr(ResNo) +
" = CurDAG->getTargetConstant(" +
getQualifiedName(DI->getDef()) + "RegClassID, " +
"EVT::i32);");
"MVT::i32);");
NodeOps.push_back("Tmp" + utostr(ResNo));
return NodeOps;
}
@ -979,7 +979,7 @@ public:
if (NodeHasOptInFlag) {
emitCode("bool HasInFlag = "
"(N.getOperand(N.getNumOperands()-1).getValueType() == EVT::Flag);");
"(N.getOperand(N.getNumOperands()-1).getValueType() == MVT::Flag);");
}
if (IsVariadic)
emitCode("SmallVector<SDValue, 8> Ops" + utostr(OpcNo) + ";");
@ -987,8 +987,8 @@ public:
// How many results is this pattern expected to produce?
unsigned NumPatResults = 0;
for (unsigned i = 0, e = Pattern->getExtTypes().size(); i != e; i++) {
EVT::SimpleValueType VT = Pattern->getTypeNum(i);
if (VT != EVT::isVoid && VT != EVT::Flag)
MVT::SimpleValueType VT = Pattern->getTypeNum(i);
if (VT != MVT::isVoid && VT != MVT::Flag)
NumPatResults++;
}
@ -1007,7 +1007,7 @@ public:
}
emitCode("InChains.push_back(" + ChainName + ");");
emitCode(ChainName + " = CurDAG->getNode(ISD::TokenFactor, "
"N.getDebugLoc(), EVT::Other, "
"N.getDebugLoc(), MVT::Other, "
"&InChains[0], InChains.size());");
if (GenDebug) {
emitCode("CurDAG->setSubgraphColor(" + ChainName +".getNode(), \"yellow\");");
@ -1096,7 +1096,7 @@ public:
// Output order: results, chain, flags
// Result types.
if (NumResults > 0 && N->getTypeNum(0) != EVT::isVoid) {
if (NumResults > 0 && N->getTypeNum(0) != MVT::isVoid) {
Code += ", VT" + utostr(VTNo);
emitVT(getEnumName(N->getTypeNum(0)));
}
@ -1105,14 +1105,14 @@ public:
for (unsigned i = 0; i < NumDstRegs; i++) {
Record *RR = DstRegs[i];
if (RR->isSubClassOf("Register")) {
EVT::SimpleValueType RVT = getRegisterValueType(RR, CGT);
MVT::SimpleValueType RVT = getRegisterValueType(RR, CGT);
Code += ", " + getEnumName(RVT);
}
}
if (NodeHasChain)
Code += ", EVT::Other";
Code += ", MVT::Other";
if (NodeHasOutFlag)
Code += ", EVT::Flag";
Code += ", MVT::Flag";
// Inputs.
if (IsVariadic) {
@ -1405,8 +1405,8 @@ private:
Record *RR = DI->getDef();
if (RR->isSubClassOf("Register")) {
EVT::SimpleValueType RVT = getRegisterValueType(RR, T);
if (RVT == EVT::Flag) {
MVT::SimpleValueType RVT = getRegisterValueType(RR, T);
if (RVT == MVT::Flag) {
if (!InFlagDecled) {
emitCode("SDValue InFlag = " + RootName + utostr(OpNo) + ";");
InFlagDecled = true;
@ -1707,7 +1707,7 @@ void DAGISelEmitter::EmitInstructionSelector(raw_ostream &OS) {
assert(!PatternsOfOp.empty() && "No patterns but map has entry?");
// Split them into groups by type.
std::map<EVT::SimpleValueType,
std::map<MVT::SimpleValueType,
std::vector<const PatternToMatch*> > PatternsByType;
for (unsigned i = 0, e = PatternsOfOp.size(); i != e; ++i) {
const PatternToMatch *Pat = PatternsOfOp[i];
@ -1715,11 +1715,11 @@ void DAGISelEmitter::EmitInstructionSelector(raw_ostream &OS) {
PatternsByType[SrcPat->getTypeNum(0)].push_back(Pat);
}
for (std::map<EVT::SimpleValueType,
for (std::map<MVT::SimpleValueType,
std::vector<const PatternToMatch*> >::iterator
II = PatternsByType.begin(), EE = PatternsByType.end(); II != EE;
++II) {
EVT::SimpleValueType OpVT = II->first;
MVT::SimpleValueType OpVT = II->first;
std::vector<const PatternToMatch*> &Patterns = II->second;
typedef std::pair<unsigned, std::string> CodeLine;
typedef std::vector<CodeLine> CodeList;
@ -1839,16 +1839,16 @@ void DAGISelEmitter::EmitInstructionSelector(raw_ostream &OS) {
// Print function.
std::string OpVTStr;
if (OpVT == EVT::iPTR) {
if (OpVT == MVT::iPTR) {
OpVTStr = "_iPTR";
} else if (OpVT == EVT::iPTRAny) {
} else if (OpVT == MVT::iPTRAny) {
OpVTStr = "_iPTRAny";
} else if (OpVT == EVT::isVoid) {
} else if (OpVT == MVT::isVoid) {
// Nodes with a void result actually have a first result type of either
// Other (a chain) or Flag. Since there is no one-to-one mapping from
// void to this case, we handle it specially here.
} else {
OpVTStr = "_" + getEnumName(OpVT).substr(5); // Skip 'EVT::'
OpVTStr = "_" + getEnumName(OpVT).substr(5); // Skip 'MVT::'
}
std::map<std::string, std::vector<std::string> >::iterator OpVTI =
OpcodeVTMap.find(OpName);
@ -1929,8 +1929,8 @@ void DAGISelEmitter::EmitInstructionSelector(raw_ostream &OS) {
<< " SelectInlineAsmMemoryOperands(Ops);\n\n"
<< " std::vector<EVT> VTs;\n"
<< " VTs.push_back(EVT::Other);\n"
<< " VTs.push_back(EVT::Flag);\n"
<< " VTs.push_back(MVT::Other);\n"
<< " VTs.push_back(MVT::Flag);\n"
<< " SDValue New = CurDAG->getNode(ISD::INLINEASM, N.getDebugLoc(), "
"VTs, &Ops[0], Ops.size());\n"
<< " return New.getNode();\n"
@ -1944,17 +1944,17 @@ void DAGISelEmitter::EmitInstructionSelector(raw_ostream &OS) {
OS << "SDNode *Select_DBG_LABEL(const SDValue &N) {\n"
<< " SDValue Chain = N.getOperand(0);\n"
<< " unsigned C = cast<LabelSDNode>(N)->getLabelID();\n"
<< " SDValue Tmp = CurDAG->getTargetConstant(C, EVT::i32);\n"
<< " SDValue Tmp = CurDAG->getTargetConstant(C, MVT::i32);\n"
<< " return CurDAG->SelectNodeTo(N.getNode(), TargetInstrInfo::DBG_LABEL,\n"
<< " EVT::Other, Tmp, Chain);\n"
<< " MVT::Other, Tmp, Chain);\n"
<< "}\n\n";
OS << "SDNode *Select_EH_LABEL(const SDValue &N) {\n"
<< " SDValue Chain = N.getOperand(0);\n"
<< " unsigned C = cast<LabelSDNode>(N)->getLabelID();\n"
<< " SDValue Tmp = CurDAG->getTargetConstant(C, EVT::i32);\n"
<< " SDValue Tmp = CurDAG->getTargetConstant(C, MVT::i32);\n"
<< " return CurDAG->SelectNodeTo(N.getNode(), TargetInstrInfo::EH_LABEL,\n"
<< " EVT::Other, Tmp, Chain);\n"
<< " MVT::Other, Tmp, Chain);\n"
<< "}\n\n";
OS << "SDNode *Select_DECLARE(const SDValue &N) {\n"
@ -1971,12 +1971,12 @@ void DAGISelEmitter::EmitInstructionSelector(raw_ostream &OS) {
<< " SDValue Tmp2 = "
<< "CurDAG->getTargetGlobalAddress(GV, TLI.getPointerTy());\n"
<< " return CurDAG->SelectNodeTo(N.getNode(), TargetInstrInfo::DECLARE,\n"
<< " EVT::Other, Tmp1, Tmp2, Chain);\n"
<< " MVT::Other, Tmp1, Tmp2, Chain);\n"
<< "}\n\n";
OS << "// The main instruction selector code.\n"
<< "SDNode *SelectCode(SDValue N) {\n"
<< " EVT::SimpleValueType NVT = N.getNode()->getValueType(0).getSimpleVT();\n"
<< " MVT::SimpleValueType NVT = N.getNode()->getValueType(0).getSimpleVT().SimpleTy;\n"
<< " switch (N.getOpcode()) {\n"
<< " default:\n"
<< " assert(!N.isMachineOpcode() && \"Node already selected!\");\n"
@ -2049,7 +2049,7 @@ void DAGISelEmitter::EmitInstructionSelector(raw_ostream &OS) {
HasPtrPattern = true;
continue;
}
OS << " case EVT::" << VTStr.substr(1) << ":\n"
OS << " case MVT::" << VTStr.substr(1) << ":\n"
<< " return Select_" << getLegalCName(OpName)
<< VTStr << "(N);\n";
}
@ -2091,7 +2091,7 @@ void DAGISelEmitter::EmitInstructionSelector(raw_ostream &OS) {
OS << "void CannotYetSelectIntrinsic(SDValue N) DISABLE_INLINE {\n"
<< " cerr << \"Cannot yet select: \";\n"
<< " unsigned iid = cast<ConstantSDNode>(N.getOperand("
<< "N.getOperand(0).getValueType() == EVT::Other))->getZExtValue();\n"
<< "N.getOperand(0).getValueType() == MVT::Other))->getZExtValue();\n"
<< " llvm_report_error(\"Cannot yet select: intrinsic %\" +\n"
<< "Intrinsic::getName((Intrinsic::ID)iid));\n"
<< "}\n\n";

View File

@ -53,7 +53,7 @@ struct OperandsSignature {
///
bool initialize(TreePatternNode *InstPatNode,
const CodeGenTarget &Target,
EVT::SimpleValueType VT) {
MVT::SimpleValueType VT) {
if (!InstPatNode->isLeaf() &&
InstPatNode->getOperator()->getName() == "imm") {
Operands.push_back("i");
@ -203,8 +203,8 @@ struct OperandsSignature {
class FastISelMap {
typedef std::map<std::string, InstructionMemo> PredMap;
typedef std::map<EVT::SimpleValueType, PredMap> RetPredMap;
typedef std::map<EVT::SimpleValueType, RetPredMap> TypeRetPredMap;
typedef std::map<MVT::SimpleValueType, PredMap> RetPredMap;
typedef std::map<MVT::SimpleValueType, RetPredMap> TypeRetPredMap;
typedef std::map<std::string, TypeRetPredMap> OpcodeTypeRetPredMap;
typedef std::map<OperandsSignature, OpcodeTypeRetPredMap> OperandsOpcodeTypeRetPredMap;
@ -297,8 +297,8 @@ void FastISelMap::CollectPatterns(CodeGenDAGPatterns &CGP) {
Record *InstPatOp = InstPatNode->getOperator();
std::string OpcodeName = getOpcodeName(InstPatOp, CGP);
EVT::SimpleValueType RetVT = InstPatNode->getTypeNum(0);
EVT::SimpleValueType VT = RetVT;
MVT::SimpleValueType RetVT = InstPatNode->getTypeNum(0);
MVT::SimpleValueType VT = RetVT;
if (InstPatNode->getNumChildren())
VT = InstPatNode->getChild(0)->getTypeNum(0);
@ -385,12 +385,12 @@ void FastISelMap::PrintFunctionDefinitions(raw_ostream &OS) {
// Emit one function for each opcode,type pair.
for (TypeRetPredMap::const_iterator TI = TM.begin(), TE = TM.end();
TI != TE; ++TI) {
EVT::SimpleValueType VT = TI->first;
MVT::SimpleValueType VT = TI->first;
const RetPredMap &RM = TI->second;
if (RM.size() != 1) {
for (RetPredMap::const_iterator RI = RM.begin(), RE = RM.end();
RI != RE; ++RI) {
EVT::SimpleValueType RetVT = RI->first;
MVT::SimpleValueType RetVT = RI->first;
const PredMap &PM = RI->second;
bool HasPred = false;
@ -461,14 +461,14 @@ void FastISelMap::PrintFunctionDefinitions(raw_ostream &OS) {
<< getLegalCName(Opcode) << "_"
<< getLegalCName(getName(VT)) << "_";
Operands.PrintManglingSuffix(OS);
OS << "(EVT::SimpleValueType RetVT";
OS << "(MVT RetVT";
if (!Operands.empty())
OS << ", ";
Operands.PrintParameters(OS);
OS << ") {\nswitch (RetVT) {\n";
OS << ") {\nswitch (RetVT.SimpleTy) {\n";
for (RetPredMap::const_iterator RI = RM.begin(), RE = RM.end();
RI != RE; ++RI) {
EVT::SimpleValueType RetVT = RI->first;
MVT::SimpleValueType RetVT = RI->first;
OS << " case " << getName(RetVT) << ": return FastEmit_"
<< getLegalCName(Opcode) << "_" << getLegalCName(getName(VT))
<< "_" << getLegalCName(getName(RetVT)) << "_";
@ -485,13 +485,13 @@ void FastISelMap::PrintFunctionDefinitions(raw_ostream &OS) {
<< getLegalCName(Opcode) << "_"
<< getLegalCName(getName(VT)) << "_";
Operands.PrintManglingSuffix(OS);
OS << "(EVT::SimpleValueType RetVT";
OS << "(MVT RetVT";
if (!Operands.empty())
OS << ", ";
Operands.PrintParameters(OS);
OS << ") {\n";
OS << " if (RetVT != " << getName(RM.begin()->first)
OS << " if (RetVT.SimpleTy != " << getName(RM.begin()->first)
<< ")\n return 0;\n";
const PredMap &PM = RM.begin()->second;
@ -555,15 +555,15 @@ void FastISelMap::PrintFunctionDefinitions(raw_ostream &OS) {
OS << "unsigned FastEmit_"
<< getLegalCName(Opcode) << "_";
Operands.PrintManglingSuffix(OS);
OS << "(EVT::SimpleValueType VT, EVT::SimpleValueType RetVT";
OS << "(MVT VT, MVT RetVT";
if (!Operands.empty())
OS << ", ";
Operands.PrintParameters(OS);
OS << ") {\n";
OS << " switch (VT) {\n";
OS << " switch (VT.SimpleTy) {\n";
for (TypeRetPredMap::const_iterator TI = TM.begin(), TE = TM.end();
TI != TE; ++TI) {
EVT::SimpleValueType VT = TI->first;
MVT::SimpleValueType VT = TI->first;
std::string TypeName = getName(VT);
OS << " case " << TypeName << ": return FastEmit_"
<< getLegalCName(Opcode) << "_" << getLegalCName(TypeName) << "_";
@ -587,7 +587,7 @@ void FastISelMap::PrintFunctionDefinitions(raw_ostream &OS) {
// on opcode and type.
OS << "unsigned FastEmit_";
Operands.PrintManglingSuffix(OS);
OS << "(EVT::SimpleValueType VT, EVT::SimpleValueType RetVT, ISD::NodeType Opcode";
OS << "(MVT VT, MVT RetVT, ISD::NodeType Opcode";
if (!Operands.empty())
OS << ", ";
Operands.PrintParameters(OS);

View File

@ -140,26 +140,26 @@ EmitIntrinsicToOverloadTable(const std::vector<CodeGenIntrinsic> &Ints,
OS << "#endif\n\n";
}
static void EmitTypeForValueType(raw_ostream &OS, EVT::SimpleValueType VT) {
static void EmitTypeForValueType(raw_ostream &OS, MVT::SimpleValueType VT) {
if (EVT(VT).isInteger()) {
unsigned BitWidth = EVT(VT).getSizeInBits();
OS << "IntegerType::get(" << BitWidth << ")";
} else if (VT == EVT::Other) {
// EVT::OtherVT is used to mean the empty struct type here.
} else if (VT == MVT::Other) {
// MVT::OtherVT is used to mean the empty struct type here.
OS << "StructType::get(Context)";
} else if (VT == EVT::f32) {
} else if (VT == MVT::f32) {
OS << "Type::FloatTy";
} else if (VT == EVT::f64) {
} else if (VT == MVT::f64) {
OS << "Type::DoubleTy";
} else if (VT == EVT::f80) {
} else if (VT == MVT::f80) {
OS << "Type::X86_FP80Ty";
} else if (VT == EVT::f128) {
} else if (VT == MVT::f128) {
OS << "Type::FP128Ty";
} else if (VT == EVT::ppcf128) {
} else if (VT == MVT::ppcf128) {
OS << "Type::PPC_FP128Ty";
} else if (VT == EVT::isVoid) {
} else if (VT == MVT::isVoid) {
OS << "Type::VoidTy";
} else if (VT == EVT::Metadata) {
} else if (VT == MVT::Metadata) {
OS << "Type::MetadataTy";
} else {
assert(false && "Unsupported ValueType!");
@ -190,7 +190,7 @@ static void EmitTypeGenerate(raw_ostream &OS,
static void EmitTypeGenerate(raw_ostream &OS, const Record *ArgType,
unsigned &ArgNo) {
EVT::SimpleValueType VT = getValueType(ArgType->getValueAsDef("VT"));
MVT::SimpleValueType VT = getValueType(ArgType->getValueAsDef("VT"));
if (ArgType->isSubClassOf("LLVMMatchType")) {
unsigned Number = ArgType->getValueAsInt("Number");
@ -203,7 +203,7 @@ static void EmitTypeGenerate(raw_ostream &OS, const Record *ArgType,
<< "(dyn_cast<VectorType>(Tys[" << Number << "]))";
else
OS << "Tys[" << Number << "]";
} else if (VT == EVT::iAny || VT == EVT::fAny || VT == EVT::vAny) {
} else if (VT == MVT::iAny || VT == MVT::fAny || VT == MVT::vAny) {
// NOTE: The ArgNo variable here is not the absolute argument number, it is
// the index of the "arbitrary" type in the Tys array passed to the
// Intrinsic::getDeclaration function. Consequently, we only want to
@ -213,13 +213,13 @@ static void EmitTypeGenerate(raw_ostream &OS, const Record *ArgType,
} else if (EVT(VT).isVector()) {
EVT VVT = VT;
OS << "VectorType::get(";
EmitTypeForValueType(OS, VVT.getVectorElementType().getSimpleVT());
EmitTypeForValueType(OS, VVT.getVectorElementType().getSimpleVT().SimpleTy);
OS << ", " << VVT.getVectorNumElements() << ")";
} else if (VT == EVT::iPTR) {
} else if (VT == MVT::iPTR) {
OS << "PointerType::getUnqual(";
EmitTypeGenerate(OS, ArgType->getValueAsDef("ElTy"), ArgNo);
OS << ")";
} else if (VT == EVT::iPTRAny) {
} else if (VT == MVT::iPTRAny) {
// Make sure the user has passed us an argument type to overload. If not,
// treat it as an ordinary (not overloaded) intrinsic.
OS << "(" << ArgNo << " < numTys) ? Tys[" << ArgNo
@ -227,11 +227,11 @@ static void EmitTypeGenerate(raw_ostream &OS, const Record *ArgType,
EmitTypeGenerate(OS, ArgType->getValueAsDef("ElTy"), ArgNo);
OS << ")";
++ArgNo;
} else if (VT == EVT::isVoid) {
} else if (VT == MVT::isVoid) {
if (ArgNo == 0)
OS << "Type::VoidTy";
else
// EVT::isVoid is used to mean varargs here.
// MVT::isVoid is used to mean varargs here.
OS << "...";
} else {
EmitTypeForValueType(OS, VT);
@ -326,13 +326,13 @@ void IntrinsicEmitter::EmitVerifier(const std::vector<CodeGenIntrinsic> &Ints,
else
OS << "~" << Number;
} else {
EVT::SimpleValueType VT = getValueType(ArgType->getValueAsDef("VT"));
MVT::SimpleValueType VT = getValueType(ArgType->getValueAsDef("VT"));
OS << getEnumName(VT);
if (EVT(VT).isOverloaded())
OverloadedTypeIndices.push_back(j);
if (VT == EVT::isVoid && j != 0 && j != je - 1)
if (VT == MVT::isVoid && j != 0 && j != je - 1)
throw "Var arg type not last argument";
}
}
@ -354,13 +354,13 @@ void IntrinsicEmitter::EmitVerifier(const std::vector<CodeGenIntrinsic> &Ints,
else
OS << "~" << Number;
} else {
EVT::SimpleValueType VT = getValueType(ArgType->getValueAsDef("VT"));
MVT::SimpleValueType VT = getValueType(ArgType->getValueAsDef("VT"));
OS << getEnumName(VT);
if (EVT(VT).isOverloaded())
OverloadedTypeIndices.push_back(j + RetTys.size());
if (VT == EVT::isVoid && j != 0 && j != je - 1)
if (VT == MVT::isVoid && j != 0 && j != je - 1)
throw "Var arg type not last argument";
}
}
@ -405,7 +405,7 @@ void IntrinsicEmitter::EmitGenerator(const std::vector<CodeGenIntrinsic> &Ints,
unsigned N = ParamTys.size();
if (N > 1 &&
getValueType(ParamTys[N - 1]->getValueAsDef("VT")) == EVT::isVoid) {
getValueType(ParamTys[N - 1]->getValueAsDef("VT")) == MVT::isVoid) {
OS << " IsVarArg = true;\n";
--N;
}

View File

@ -226,7 +226,7 @@ void RegisterInfoEmitter::run(raw_ostream &OS) {
<< "[] = {\n ";
for (unsigned i = 0, e = RC.VTs.size(); i != e; ++i)
OS << getEnumName(RC.VTs[i]) << ", ";
OS << "EVT::Other\n };\n\n";
OS << "MVT::Other\n };\n\n";
}
OS << "} // end anonymous namespace\n\n";