forked from OSchip/llvm-project
getScalarType().getSizeInBits() -> getScalarSizeInBits() ; NFCI
llvm-svn: 281489
This commit is contained in:
parent
fa5f767a38
commit
bd6fca1419
|
@ -181,7 +181,7 @@ namespace {
|
|||
/// if things it uses can be simplified by bit propagation.
|
||||
/// If so, return true.
|
||||
bool SimplifyDemandedBits(SDValue Op) {
|
||||
unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
|
||||
unsigned BitWidth = Op.getValueType().getScalarSizeInBits();
|
||||
APInt Demanded = APInt::getAllOnesValue(BitWidth);
|
||||
return SimplifyDemandedBits(Op, Demanded);
|
||||
}
|
||||
|
@ -1754,7 +1754,7 @@ SDValue DAGCombiner::visitADD(SDNode *N) {
|
|||
if (N1.getOpcode() == ISD::AND) {
|
||||
SDValue AndOp0 = N1.getOperand(0);
|
||||
unsigned NumSignBits = DAG.ComputeNumSignBits(AndOp0);
|
||||
unsigned DestBits = VT.getScalarType().getSizeInBits();
|
||||
unsigned DestBits = VT.getScalarSizeInBits();
|
||||
|
||||
// (add z, (and (sbbl x, x), 1)) -> (sub z, (sbbl x, x))
|
||||
// and similar xforms where the inner op is either ~0 or 0.
|
||||
|
@ -2058,7 +2058,7 @@ SDValue DAGCombiner::visitMUL(SDNode *N) {
|
|||
// We require a splat of the entire scalar bit width for non-contiguous
|
||||
// bit patterns.
|
||||
bool IsFullSplat =
|
||||
ConstValue1.getBitWidth() == VT.getScalarType().getSizeInBits();
|
||||
ConstValue1.getBitWidth() == VT.getScalarSizeInBits();
|
||||
// fold (mul x, 1) -> x
|
||||
if (N1IsConst && ConstValue1 == 1 && IsFullSplat)
|
||||
return N0;
|
||||
|
@ -3082,13 +3082,13 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
|
|||
// do not return N0, because undef node may exist in N0
|
||||
return DAG.getConstant(
|
||||
APInt::getNullValue(
|
||||
N0.getValueType().getScalarType().getSizeInBits()),
|
||||
N0.getValueType().getScalarSizeInBits()),
|
||||
SDLoc(N), N0.getValueType());
|
||||
if (ISD::isBuildVectorAllZeros(N1.getNode()))
|
||||
// do not return N1, because undef node may exist in N1
|
||||
return DAG.getConstant(
|
||||
APInt::getNullValue(
|
||||
N1.getValueType().getScalarType().getSizeInBits()),
|
||||
N1.getValueType().getScalarSizeInBits()),
|
||||
SDLoc(N), N1.getValueType());
|
||||
|
||||
// fold (and x, -1) -> x, vector edition
|
||||
|
@ -3111,7 +3111,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
|
|||
if (isAllOnesConstant(N1))
|
||||
return N0;
|
||||
// if (and x, c) is known to be zero, return 0
|
||||
unsigned BitWidth = VT.getScalarType().getSizeInBits();
|
||||
unsigned BitWidth = VT.getScalarSizeInBits();
|
||||
if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0),
|
||||
APInt::getAllOnesValue(BitWidth)))
|
||||
return DAG.getConstant(0, SDLoc(N), VT);
|
||||
|
@ -3178,7 +3178,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
|
|||
// that will apply equally to all members of the vector, so AND all the
|
||||
// lanes of the constant together.
|
||||
EVT VT = Vector->getValueType(0);
|
||||
unsigned BitWidth = VT.getScalarType().getSizeInBits();
|
||||
unsigned BitWidth = VT.getScalarSizeInBits();
|
||||
|
||||
// If the splat value has been compressed to a bitlength lower
|
||||
// than the size of the vector lane, we need to re-expand it to
|
||||
|
@ -3210,7 +3210,7 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
|
|||
// extension. If it is still the AllOnesValue then this AND is completely
|
||||
// unneeded.
|
||||
Constant =
|
||||
Constant.zextOrTrunc(Load->getMemoryVT().getScalarType().getSizeInBits());
|
||||
Constant.zextOrTrunc(Load->getMemoryVT().getScalarSizeInBits());
|
||||
|
||||
bool B;
|
||||
switch (Load->getExtensionType()) {
|
||||
|
@ -3327,9 +3327,9 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
|
|||
EVT MemVT = LN0->getMemoryVT();
|
||||
// If we zero all the possible extended bits, then we can turn this into
|
||||
// a zextload if we are running before legalize or the operation is legal.
|
||||
unsigned BitWidth = N1.getValueType().getScalarType().getSizeInBits();
|
||||
unsigned BitWidth = N1.getValueType().getScalarSizeInBits();
|
||||
if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth,
|
||||
BitWidth - MemVT.getScalarType().getSizeInBits())) &&
|
||||
BitWidth - MemVT.getScalarSizeInBits())) &&
|
||||
((!LegalOperations && !LN0->isVolatile()) ||
|
||||
TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, MemVT))) {
|
||||
SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N0), VT,
|
||||
|
@ -3347,9 +3347,9 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
|
|||
EVT MemVT = LN0->getMemoryVT();
|
||||
// If we zero all the possible extended bits, then we can turn this into
|
||||
// a zextload if we are running before legalize or the operation is legal.
|
||||
unsigned BitWidth = N1.getValueType().getScalarType().getSizeInBits();
|
||||
unsigned BitWidth = N1.getValueType().getScalarSizeInBits();
|
||||
if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth,
|
||||
BitWidth - MemVT.getScalarType().getSizeInBits())) &&
|
||||
BitWidth - MemVT.getScalarSizeInBits())) &&
|
||||
((!LegalOperations && !LN0->isVolatile()) ||
|
||||
TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, MemVT))) {
|
||||
SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N0), VT,
|
||||
|
@ -3752,13 +3752,13 @@ SDValue DAGCombiner::visitOR(SDNode *N) {
|
|||
// do not return N0, because undef node may exist in N0
|
||||
return DAG.getConstant(
|
||||
APInt::getAllOnesValue(
|
||||
N0.getValueType().getScalarType().getSizeInBits()),
|
||||
N0.getValueType().getScalarSizeInBits()),
|
||||
SDLoc(N), N0.getValueType());
|
||||
if (ISD::isBuildVectorAllOnes(N1.getNode()))
|
||||
// do not return N1, because undef node may exist in N1
|
||||
return DAG.getConstant(
|
||||
APInt::getAllOnesValue(
|
||||
N1.getValueType().getScalarType().getSizeInBits()),
|
||||
N1.getValueType().getScalarSizeInBits()),
|
||||
SDLoc(N), N1.getValueType());
|
||||
|
||||
// fold (or (shuf A, V_0, MA), (shuf B, V_0, MB)) -> (shuf A, B, Mask)
|
||||
|
@ -4650,7 +4650,7 @@ SDValue DAGCombiner::visitSRA(SDNode *N) {
|
|||
SDValue N0 = N->getOperand(0);
|
||||
SDValue N1 = N->getOperand(1);
|
||||
EVT VT = N0.getValueType();
|
||||
unsigned OpSizeInBits = VT.getScalarType().getSizeInBits();
|
||||
unsigned OpSizeInBits = VT.getScalarSizeInBits();
|
||||
|
||||
// fold vector ops
|
||||
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
|
||||
|
@ -4802,7 +4802,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
|
|||
SDValue N0 = N->getOperand(0);
|
||||
SDValue N1 = N->getOperand(1);
|
||||
EVT VT = N0.getValueType();
|
||||
unsigned OpSizeInBits = VT.getScalarType().getSizeInBits();
|
||||
unsigned OpSizeInBits = VT.getScalarSizeInBits();
|
||||
|
||||
// fold vector ops
|
||||
ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
|
||||
|
@ -4858,7 +4858,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
|
|||
uint64_t c2 = N1C->getZExtValue();
|
||||
EVT InnerShiftVT = N0.getOperand(0).getValueType();
|
||||
EVT ShiftCountVT = N0.getOperand(0)->getOperand(1).getValueType();
|
||||
uint64_t InnerShiftSize = InnerShiftVT.getScalarType().getSizeInBits();
|
||||
uint64_t InnerShiftSize = InnerShiftVT.getScalarSizeInBits();
|
||||
// This is only valid if the OpSizeInBits + c1 = size of inner shift.
|
||||
if (c1 + OpSizeInBits == InnerShiftSize) {
|
||||
SDLoc DL(N0);
|
||||
|
@ -5712,7 +5712,7 @@ SDValue DAGCombiner::visitVSELECT(SDNode *N) {
|
|||
EVT VT = LHS.getValueType();
|
||||
SDValue Shift = DAG.getNode(
|
||||
ISD::SRA, DL, VT, LHS,
|
||||
DAG.getConstant(VT.getScalarType().getSizeInBits() - 1, DL, VT));
|
||||
DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT));
|
||||
SDValue Add = DAG.getNode(ISD::ADD, DL, VT, LHS, Shift);
|
||||
AddToWorklist(Shift.getNode());
|
||||
AddToWorklist(Add.getNode());
|
||||
|
@ -5867,7 +5867,7 @@ static SDNode *tryToFoldExtendOfConstant(SDNode *N, const TargetLowering &TLI,
|
|||
|
||||
// We can fold this node into a build_vector.
|
||||
unsigned VTBits = SVT.getSizeInBits();
|
||||
unsigned EVTBits = N0->getValueType(0).getScalarType().getSizeInBits();
|
||||
unsigned EVTBits = N0->getValueType(0).getScalarSizeInBits();
|
||||
SmallVector<SDValue, 8> Elts;
|
||||
unsigned NumElts = VT.getVectorNumElements();
|
||||
SDLoc DL(N);
|
||||
|
@ -6102,9 +6102,9 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
|
|||
// See if the value being truncated is already sign extended. If so, just
|
||||
// eliminate the trunc/sext pair.
|
||||
SDValue Op = N0.getOperand(0);
|
||||
unsigned OpBits = Op.getValueType().getScalarType().getSizeInBits();
|
||||
unsigned MidBits = N0.getValueType().getScalarType().getSizeInBits();
|
||||
unsigned DestBits = VT.getScalarType().getSizeInBits();
|
||||
unsigned OpBits = Op.getValueType().getScalarSizeInBits();
|
||||
unsigned MidBits = N0.getValueType().getScalarSizeInBits();
|
||||
unsigned DestBits = VT.getScalarSizeInBits();
|
||||
unsigned NumSignBits = DAG.ComputeNumSignBits(Op);
|
||||
|
||||
if (OpBits == DestBits) {
|
||||
|
@ -6592,7 +6592,7 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
|
|||
// elements we can use a matching integer vector type and then
|
||||
// truncate/sign extend.
|
||||
EVT MatchingElementType = EVT::getIntegerVT(
|
||||
*DAG.getContext(), N00VT.getScalarType().getSizeInBits());
|
||||
*DAG.getContext(), N00VT.getScalarSizeInBits());
|
||||
EVT MatchingVectorType = EVT::getVectorVT(
|
||||
*DAG.getContext(), MatchingElementType, N00VT.getVectorNumElements());
|
||||
SDValue VsetCC =
|
||||
|
@ -7016,8 +7016,8 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
|
|||
SDValue N1 = N->getOperand(1);
|
||||
EVT VT = N->getValueType(0);
|
||||
EVT EVT = cast<VTSDNode>(N1)->getVT();
|
||||
unsigned VTBits = VT.getScalarType().getSizeInBits();
|
||||
unsigned EVTBits = EVT.getScalarType().getSizeInBits();
|
||||
unsigned VTBits = VT.getScalarSizeInBits();
|
||||
unsigned EVTBits = EVT.getScalarSizeInBits();
|
||||
|
||||
if (N0.isUndef())
|
||||
return DAG.getUNDEF(VT);
|
||||
|
@ -7041,7 +7041,7 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
|
|||
// if x is small enough.
|
||||
if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) {
|
||||
SDValue N00 = N0.getOperand(0);
|
||||
if (N00.getValueType().getScalarType().getSizeInBits() <= EVTBits &&
|
||||
if (N00.getValueType().getScalarSizeInBits() <= EVTBits &&
|
||||
(!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND, VT)))
|
||||
return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, N00, N1);
|
||||
}
|
||||
|
@ -12162,8 +12162,8 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
|
|||
SDValue Shorter =
|
||||
GetDemandedBits(Value,
|
||||
APInt::getLowBitsSet(
|
||||
Value.getValueType().getScalarType().getSizeInBits(),
|
||||
ST->getMemoryVT().getScalarType().getSizeInBits()));
|
||||
Value.getValueType().getScalarSizeInBits(),
|
||||
ST->getMemoryVT().getScalarSizeInBits()));
|
||||
AddToWorklist(Value.getNode());
|
||||
if (Shorter.getNode())
|
||||
return DAG.getTruncStore(Chain, SDLoc(N), Shorter,
|
||||
|
@ -12173,8 +12173,8 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
|
|||
// SimplifyDemandedBits, which only works if the value has a single use.
|
||||
if (SimplifyDemandedBits(Value,
|
||||
APInt::getLowBitsSet(
|
||||
Value.getValueType().getScalarType().getSizeInBits(),
|
||||
ST->getMemoryVT().getScalarType().getSizeInBits())))
|
||||
Value.getValueType().getScalarSizeInBits(),
|
||||
ST->getMemoryVT().getScalarSizeInBits())))
|
||||
return SDValue(N, 0);
|
||||
}
|
||||
|
||||
|
@ -13459,8 +13459,8 @@ SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode* N) {
|
|||
// Into:
|
||||
// indices are equal or bit offsets are equal => V1
|
||||
// otherwise => (extract_subvec V1, ExtIdx)
|
||||
if (InsIdx->getZExtValue() * SmallVT.getScalarType().getSizeInBits() ==
|
||||
ExtIdx->getZExtValue() * NVT.getScalarType().getSizeInBits())
|
||||
if (InsIdx->getZExtValue() * SmallVT.getScalarSizeInBits() ==
|
||||
ExtIdx->getZExtValue() * NVT.getScalarSizeInBits())
|
||||
return DAG.getBitcast(NVT, V->getOperand(1));
|
||||
return DAG.getNode(
|
||||
ISD::EXTRACT_SUBVECTOR, dl, NVT,
|
||||
|
|
|
@ -2942,8 +2942,8 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
|
|||
EVT ShiftAmountTy = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
|
||||
if (VT.isVector())
|
||||
ShiftAmountTy = VT;
|
||||
unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
|
||||
ExtraVT.getScalarType().getSizeInBits();
|
||||
unsigned BitsDiff = VT.getScalarSizeInBits() -
|
||||
ExtraVT.getScalarSizeInBits();
|
||||
SDValue ShiftCst = DAG.getConstant(BitsDiff, dl, ShiftAmountTy);
|
||||
Tmp1 = DAG.getNode(ISD::SHL, dl, Node->getValueType(0),
|
||||
Node->getOperand(0), ShiftCst);
|
||||
|
|
|
@ -1518,8 +1518,8 @@ ExpandShiftWithKnownAmountBit(SDNode *N, SDValue &Lo, SDValue &Hi) {
|
|||
SDValue Amt = N->getOperand(1);
|
||||
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
|
||||
EVT ShTy = Amt.getValueType();
|
||||
unsigned ShBits = ShTy.getScalarType().getSizeInBits();
|
||||
unsigned NVTBits = NVT.getScalarType().getSizeInBits();
|
||||
unsigned ShBits = ShTy.getScalarSizeInBits();
|
||||
unsigned NVTBits = NVT.getScalarSizeInBits();
|
||||
assert(isPowerOf2_32(NVTBits) &&
|
||||
"Expanded integer type size not a power of two!");
|
||||
SDLoc dl(N);
|
||||
|
@ -2364,8 +2364,8 @@ void DAGTypeLegalizer::ExpandIntRes_Shift(SDNode *N,
|
|||
// the new SHL_PARTS operation would need further legalization.
|
||||
SDValue ShiftOp = N->getOperand(1);
|
||||
EVT ShiftTy = TLI.getShiftAmountTy(VT, DAG.getDataLayout());
|
||||
assert(ShiftTy.getScalarType().getSizeInBits() >=
|
||||
Log2_32_Ceil(VT.getScalarType().getSizeInBits()) &&
|
||||
assert(ShiftTy.getScalarSizeInBits() >=
|
||||
Log2_32_Ceil(VT.getScalarSizeInBits()) &&
|
||||
"ShiftAmountTy is too small to cover the range of this type!");
|
||||
if (ShiftOp.getValueType() != ShiftTy)
|
||||
ShiftOp = DAG.getZExtOrTrunc(ShiftOp, dl, ShiftTy);
|
||||
|
|
|
@ -770,8 +770,8 @@ SDValue VectorLegalizer::ExpandSEXTINREG(SDValue Op) {
|
|||
SDLoc DL(Op);
|
||||
EVT OrigTy = cast<VTSDNode>(Op->getOperand(1))->getVT();
|
||||
|
||||
unsigned BW = VT.getScalarType().getSizeInBits();
|
||||
unsigned OrigBW = OrigTy.getScalarType().getSizeInBits();
|
||||
unsigned BW = VT.getScalarSizeInBits();
|
||||
unsigned OrigBW = OrigTy.getScalarSizeInBits();
|
||||
SDValue ShiftSz = DAG.getConstant(BW - OrigBW, DL, VT);
|
||||
|
||||
Op = Op.getOperand(0);
|
||||
|
@ -961,7 +961,7 @@ SDValue VectorLegalizer::ExpandVSELECT(SDValue Op) {
|
|||
Op2 = DAG.getNode(ISD::BITCAST, DL, VT, Op2);
|
||||
|
||||
SDValue AllOnes = DAG.getConstant(
|
||||
APInt::getAllOnesValue(VT.getScalarType().getSizeInBits()), DL, VT);
|
||||
APInt::getAllOnesValue(VT.getScalarSizeInBits()), DL, VT);
|
||||
SDValue NotMask = DAG.getNode(ISD::XOR, DL, VT, Mask, AllOnes);
|
||||
|
||||
Op1 = DAG.getNode(ISD::AND, DL, VT, Op1, Mask);
|
||||
|
|
|
@ -1012,7 +1012,7 @@ SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
|
|||
"getZeroExtendInReg should use the vector element type instead of "
|
||||
"the vector type!");
|
||||
if (Op.getValueType() == VT) return Op;
|
||||
unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
|
||||
unsigned BitWidth = Op.getValueType().getScalarSizeInBits();
|
||||
APInt Imm = APInt::getLowBitsSet(BitWidth,
|
||||
VT.getSizeInBits());
|
||||
return getNode(ISD::AND, DL, Op.getValueType(), Op,
|
||||
|
@ -1984,7 +1984,7 @@ bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
|
|||
if (Op.getValueType().isVector())
|
||||
return false;
|
||||
|
||||
unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
|
||||
unsigned BitWidth = Op.getValueType().getScalarSizeInBits();
|
||||
return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth);
|
||||
}
|
||||
|
||||
|
@ -2002,7 +2002,7 @@ bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask,
|
|||
/// them in the KnownZero/KnownOne bitsets.
|
||||
void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
|
||||
APInt &KnownOne, unsigned Depth) const {
|
||||
unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
|
||||
unsigned BitWidth = Op.getValueType().getScalarSizeInBits();
|
||||
|
||||
KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
|
||||
if (Depth == 6)
|
||||
|
@ -2207,7 +2207,7 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
|
|||
break;
|
||||
case ISD::SIGN_EXTEND_INREG: {
|
||||
EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
|
||||
unsigned EBits = EVT.getScalarType().getSizeInBits();
|
||||
unsigned EBits = EVT.getScalarSizeInBits();
|
||||
|
||||
// Sign extension. Compute the demanded bits in the result that are not
|
||||
// present in the input.
|
||||
|
@ -2255,7 +2255,7 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
|
|||
// If this is a ZEXTLoad and we are looking at the loaded value.
|
||||
if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) {
|
||||
EVT VT = LD->getMemoryVT();
|
||||
unsigned MemBits = VT.getScalarType().getSizeInBits();
|
||||
unsigned MemBits = VT.getScalarSizeInBits();
|
||||
KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
|
||||
} else if (const MDNode *Ranges = LD->getRanges()) {
|
||||
if (LD->getExtensionType() == ISD::NON_EXTLOAD)
|
||||
|
@ -2265,7 +2265,7 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
|
|||
}
|
||||
case ISD::ZERO_EXTEND: {
|
||||
EVT InVT = Op.getOperand(0).getValueType();
|
||||
unsigned InBits = InVT.getScalarType().getSizeInBits();
|
||||
unsigned InBits = InVT.getScalarSizeInBits();
|
||||
APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
|
||||
KnownZero = KnownZero.trunc(InBits);
|
||||
KnownOne = KnownOne.trunc(InBits);
|
||||
|
@ -2277,7 +2277,7 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
|
|||
}
|
||||
case ISD::SIGN_EXTEND: {
|
||||
EVT InVT = Op.getOperand(0).getValueType();
|
||||
unsigned InBits = InVT.getScalarType().getSizeInBits();
|
||||
unsigned InBits = InVT.getScalarSizeInBits();
|
||||
APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
|
||||
|
||||
KnownZero = KnownZero.trunc(InBits);
|
||||
|
@ -2300,7 +2300,7 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
|
|||
}
|
||||
case ISD::ANY_EXTEND: {
|
||||
EVT InVT = Op.getOperand(0).getValueType();
|
||||
unsigned InBits = InVT.getScalarType().getSizeInBits();
|
||||
unsigned InBits = InVT.getScalarSizeInBits();
|
||||
KnownZero = KnownZero.trunc(InBits);
|
||||
KnownOne = KnownOne.trunc(InBits);
|
||||
computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
|
||||
|
@ -2310,7 +2310,7 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
|
|||
}
|
||||
case ISD::TRUNCATE: {
|
||||
EVT InVT = Op.getOperand(0).getValueType();
|
||||
unsigned InBits = InVT.getScalarType().getSizeInBits();
|
||||
unsigned InBits = InVT.getScalarSizeInBits();
|
||||
KnownZero = KnownZero.zext(InBits);
|
||||
KnownOne = KnownOne.zext(InBits);
|
||||
computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
|
||||
|
@ -2517,7 +2517,7 @@ bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const {
|
|||
|
||||
// Fall back to computeKnownBits to catch other known cases.
|
||||
EVT OpVT = Val.getValueType();
|
||||
unsigned BitWidth = OpVT.getScalarType().getSizeInBits();
|
||||
unsigned BitWidth = OpVT.getScalarSizeInBits();
|
||||
APInt KnownZero, KnownOne;
|
||||
computeKnownBits(Val, KnownZero, KnownOne);
|
||||
return (KnownZero.countPopulation() == BitWidth - 1) &&
|
||||
|
@ -2527,7 +2527,7 @@ bool SelectionDAG::isKnownToBeAPowerOfTwo(SDValue Val) const {
|
|||
unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const {
|
||||
EVT VT = Op.getValueType();
|
||||
assert(VT.isInteger() && "Invalid VT!");
|
||||
unsigned VTBits = VT.getScalarType().getSizeInBits();
|
||||
unsigned VTBits = VT.getScalarSizeInBits();
|
||||
unsigned Tmp, Tmp2;
|
||||
unsigned FirstAnswer = 1;
|
||||
|
||||
|
@ -2550,13 +2550,13 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const {
|
|||
|
||||
case ISD::SIGN_EXTEND:
|
||||
Tmp =
|
||||
VTBits-Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
|
||||
VTBits-Op.getOperand(0).getValueType().getScalarSizeInBits();
|
||||
return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp;
|
||||
|
||||
case ISD::SIGN_EXTEND_INREG:
|
||||
// Max of the input and what this extends.
|
||||
Tmp =
|
||||
cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarType().getSizeInBits();
|
||||
cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarSizeInBits();
|
||||
Tmp = VTBits-Tmp+1;
|
||||
|
||||
Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1);
|
||||
|
@ -2732,10 +2732,10 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const {
|
|||
switch (ExtType) {
|
||||
default: break;
|
||||
case ISD::SEXTLOAD: // '17' bits known
|
||||
Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
|
||||
Tmp = LD->getMemoryVT().getScalarSizeInBits();
|
||||
return VTBits-Tmp+1;
|
||||
case ISD::ZEXTLOAD: // '16' bits known
|
||||
Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
|
||||
Tmp = LD->getMemoryVT().getScalarSizeInBits();
|
||||
return VTBits-Tmp;
|
||||
}
|
||||
}
|
||||
|
@ -3642,7 +3642,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
|
|||
if (EVT == VT) return N1; // Not actually extending
|
||||
|
||||
auto SignExtendInReg = [&](APInt Val) {
|
||||
unsigned FromBits = EVT.getScalarType().getSizeInBits();
|
||||
unsigned FromBits = EVT.getScalarSizeInBits();
|
||||
Val <<= Val.getBitWidth() - FromBits;
|
||||
Val = Val.ashr(Val.getBitWidth() - FromBits);
|
||||
return getConstant(Val, DL, VT.getScalarType());
|
||||
|
@ -4080,7 +4080,7 @@ static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
|
|||
const SDLoc &dl) {
|
||||
assert(!Value.isUndef());
|
||||
|
||||
unsigned NumBits = VT.getScalarType().getSizeInBits();
|
||||
unsigned NumBits = VT.getScalarSizeInBits();
|
||||
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
|
||||
assert(C->getAPIntValue().getBitWidth() == 8);
|
||||
APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
|
||||
|
@ -5549,7 +5549,7 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
|
|||
if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
|
||||
// If the and is only masking out bits that cannot effect the shift,
|
||||
// eliminate the and.
|
||||
unsigned NumBits = VT.getScalarType().getSizeInBits()*2;
|
||||
unsigned NumBits = VT.getScalarSizeInBits()*2;
|
||||
if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
|
||||
return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
|
||||
}
|
||||
|
|
|
@ -432,7 +432,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
|
|||
TargetLoweringOpt &TLO,
|
||||
unsigned Depth) const {
|
||||
unsigned BitWidth = DemandedMask.getBitWidth();
|
||||
assert(Op.getValueType().getScalarType().getSizeInBits() == BitWidth &&
|
||||
assert(Op.getValueType().getScalarSizeInBits() == BitWidth &&
|
||||
"Mask size mismatches value type size!");
|
||||
APInt NewMask = DemandedMask;
|
||||
SDLoc dl(Op);
|
||||
|
@ -850,7 +850,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
|
|||
// demand the input sign bit.
|
||||
APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
|
||||
if (HighBits.intersects(NewMask))
|
||||
InDemandedMask |= APInt::getSignBit(VT.getScalarType().getSizeInBits());
|
||||
InDemandedMask |= APInt::getSignBit(VT.getScalarSizeInBits());
|
||||
|
||||
if (SimplifyDemandedBits(Op.getOperand(0), InDemandedMask,
|
||||
KnownZero, KnownOne, TLO, Depth+1))
|
||||
|
@ -893,9 +893,9 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
|
|||
APInt MsbMask = APInt::getHighBitsSet(BitWidth, 1);
|
||||
// If we only care about the highest bit, don't bother shifting right.
|
||||
if (MsbMask == NewMask) {
|
||||
unsigned ShAmt = ExVT.getScalarType().getSizeInBits();
|
||||
unsigned ShAmt = ExVT.getScalarSizeInBits();
|
||||
SDValue InOp = Op.getOperand(0);
|
||||
unsigned VTBits = Op->getValueType(0).getScalarType().getSizeInBits();
|
||||
unsigned VTBits = Op->getValueType(0).getScalarSizeInBits();
|
||||
bool AlreadySignExtended =
|
||||
TLO.DAG.ComputeNumSignBits(InOp) >= VTBits-ShAmt+1;
|
||||
// However if the input is already sign extended we expect the sign
|
||||
|
@ -919,17 +919,17 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
|
|||
// present in the input.
|
||||
APInt NewBits =
|
||||
APInt::getHighBitsSet(BitWidth,
|
||||
BitWidth - ExVT.getScalarType().getSizeInBits());
|
||||
BitWidth - ExVT.getScalarSizeInBits());
|
||||
|
||||
// If none of the extended bits are demanded, eliminate the sextinreg.
|
||||
if ((NewBits & NewMask) == 0)
|
||||
return TLO.CombineTo(Op, Op.getOperand(0));
|
||||
|
||||
APInt InSignBit =
|
||||
APInt::getSignBit(ExVT.getScalarType().getSizeInBits()).zext(BitWidth);
|
||||
APInt::getSignBit(ExVT.getScalarSizeInBits()).zext(BitWidth);
|
||||
APInt InputDemandedBits =
|
||||
APInt::getLowBitsSet(BitWidth,
|
||||
ExVT.getScalarType().getSizeInBits()) &
|
||||
ExVT.getScalarSizeInBits()) &
|
||||
NewMask;
|
||||
|
||||
// Since the sign extended bits are demanded, we know that the sign
|
||||
|
@ -985,7 +985,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
|
|||
}
|
||||
case ISD::ZERO_EXTEND: {
|
||||
unsigned OperandBitWidth =
|
||||
Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
|
||||
Op.getOperand(0).getValueType().getScalarSizeInBits();
|
||||
APInt InMask = NewMask.trunc(OperandBitWidth);
|
||||
|
||||
// If none of the top bits are demanded, convert this into an any_extend.
|
||||
|
@ -1007,7 +1007,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
|
|||
}
|
||||
case ISD::SIGN_EXTEND: {
|
||||
EVT InVT = Op.getOperand(0).getValueType();
|
||||
unsigned InBits = InVT.getScalarType().getSizeInBits();
|
||||
unsigned InBits = InVT.getScalarSizeInBits();
|
||||
APInt InMask = APInt::getLowBitsSet(BitWidth, InBits);
|
||||
APInt InSignBit = APInt::getBitsSet(BitWidth, InBits - 1, InBits);
|
||||
APInt NewBits = ~InMask & NewMask;
|
||||
|
@ -1048,7 +1048,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
|
|||
}
|
||||
case ISD::ANY_EXTEND: {
|
||||
unsigned OperandBitWidth =
|
||||
Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
|
||||
Op.getOperand(0).getValueType().getScalarSizeInBits();
|
||||
APInt InMask = NewMask.trunc(OperandBitWidth);
|
||||
if (SimplifyDemandedBits(Op.getOperand(0), InMask,
|
||||
KnownZero, KnownOne, TLO, Depth+1))
|
||||
|
@ -1062,7 +1062,7 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
|
|||
// Simplify the input, using demanded bit information, and compute the known
|
||||
// zero/one bits live out.
|
||||
unsigned OperandBitWidth =
|
||||
Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
|
||||
Op.getOperand(0).getValueType().getScalarSizeInBits();
|
||||
APInt TruncMask = NewMask.zext(OperandBitWidth);
|
||||
if (SimplifyDemandedBits(Op.getOperand(0), TruncMask,
|
||||
KnownZero, KnownOne, TLO, Depth+1))
|
||||
|
|
|
@ -1931,7 +1931,7 @@ static void getUsefulBits(SDValue Op, APInt &UsefulBits, unsigned Depth) {
|
|||
return;
|
||||
// Initialize UsefulBits
|
||||
if (!Depth) {
|
||||
unsigned Bitwidth = Op.getValueType().getScalarType().getSizeInBits();
|
||||
unsigned Bitwidth = Op.getValueType().getScalarSizeInBits();
|
||||
// At the beginning, assume every produced bits is useful
|
||||
UsefulBits = APInt(Bitwidth, 0);
|
||||
UsefulBits.flipAllBits();
|
||||
|
|
|
@ -757,7 +757,7 @@ void AArch64TargetLowering::computeKnownBitsForTargetNode(
|
|||
case Intrinsic::aarch64_ldxr: {
|
||||
unsigned BitWidth = KnownOne.getBitWidth();
|
||||
EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT();
|
||||
unsigned MemBits = VT.getScalarType().getSizeInBits();
|
||||
unsigned MemBits = VT.getScalarSizeInBits();
|
||||
KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -11987,7 +11987,7 @@ void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
|
|||
case Intrinsic::arm_ldaex:
|
||||
case Intrinsic::arm_ldrex: {
|
||||
EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT();
|
||||
unsigned MemBits = VT.getScalarType().getSizeInBits();
|
||||
unsigned MemBits = VT.getScalarSizeInBits();
|
||||
KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -11026,7 +11026,7 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
|
|||
Add->getOperand(1),
|
||||
APInt::getAllOnesValue(Bits /* alignment */)
|
||||
.zext(
|
||||
Add.getValueType().getScalarType().getSizeInBits()))) {
|
||||
Add.getValueType().getScalarSizeInBits()))) {
|
||||
SDNode *BasePtr = Add->getOperand(0).getNode();
|
||||
for (SDNode::use_iterator UI = BasePtr->use_begin(),
|
||||
UE = BasePtr->use_end();
|
||||
|
|
|
@ -529,7 +529,7 @@ def inserthf : PatFrag<(ops node:$src1, node:$src2),
|
|||
// ORs that can be treated as insertions.
|
||||
def or_as_inserti8 : PatFrag<(ops node:$src1, node:$src2),
|
||||
(or node:$src1, node:$src2), [{
|
||||
unsigned BitWidth = N->getValueType(0).getScalarType().getSizeInBits();
|
||||
unsigned BitWidth = N->getValueType(0).getScalarSizeInBits();
|
||||
return CurDAG->MaskedValueIsZero(N->getOperand(0),
|
||||
APInt::getLowBitsSet(BitWidth, 8));
|
||||
}]>;
|
||||
|
@ -537,7 +537,7 @@ def or_as_inserti8 : PatFrag<(ops node:$src1, node:$src2),
|
|||
// ORs that can be treated as reversed insertions.
|
||||
def or_as_revinserti8 : PatFrag<(ops node:$src1, node:$src2),
|
||||
(or node:$src1, node:$src2), [{
|
||||
unsigned BitWidth = N->getValueType(0).getScalarType().getSizeInBits();
|
||||
unsigned BitWidth = N->getValueType(0).getScalarSizeInBits();
|
||||
return CurDAG->MaskedValueIsZero(N->getOperand(1),
|
||||
APInt::getLowBitsSet(BitWidth, 8));
|
||||
}]>;
|
||||
|
|
|
@ -28786,7 +28786,7 @@ static SDValue combinePCMPAnd1(SDNode *N, SelectionDAG &DAG) {
|
|||
// masked compare nodes, so they should not make it here.
|
||||
EVT VT0 = Op0.getValueType();
|
||||
EVT VT1 = Op1.getValueType();
|
||||
unsigned EltBitWidth = VT0.getScalarType().getSizeInBits();
|
||||
unsigned EltBitWidth = VT0.getScalarSizeInBits();
|
||||
if (VT0 != VT1 || EltBitWidth == 8)
|
||||
return SDValue();
|
||||
|
||||
|
|
Loading…
Reference in New Issue