forked from OSchip/llvm-project
[APInt] Use lshrInPlace to replace lshr where possible
This patch uses lshrInPlace to replace code where the object that lshr is called on is being overwritten with the result. This adds an lshrInPlace(const APInt &) version as well. Differential Revision: https://reviews.llvm.org/D32155 llvm-svn: 300566
This commit is contained in:
parent
ec9deb7f54
commit
fc947bcfba
|
@ -914,7 +914,14 @@ public:
|
|||
/// \brief Logical right-shift function.
|
||||
///
|
||||
/// Logical right-shift this APInt by shiftAmt.
|
||||
APInt lshr(const APInt &shiftAmt) const;
|
||||
APInt lshr(const APInt &ShiftAmt) const {
|
||||
APInt R(*this);
|
||||
R.lshrInPlace(ShiftAmt);
|
||||
return R;
|
||||
}
|
||||
|
||||
/// Logical right-shift this APInt by ShiftAmt in place.
|
||||
void lshrInPlace(const APInt &ShiftAmt);
|
||||
|
||||
/// \brief Left-shift function.
|
||||
///
|
||||
|
|
|
@ -1093,7 +1093,7 @@ static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
|
|||
APInt Mult(W, i);
|
||||
unsigned TwoFactors = Mult.countTrailingZeros();
|
||||
T += TwoFactors;
|
||||
Mult = Mult.lshr(TwoFactors);
|
||||
Mult.lshrInPlace(TwoFactors);
|
||||
OddFactorial *= Mult;
|
||||
}
|
||||
|
||||
|
|
|
@ -661,8 +661,10 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
|
|||
computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
|
||||
// For those bits in RHS that are known, we can propagate them to known
|
||||
// bits in V shifted to the right by C.
|
||||
KnownZero |= RHSKnownZero.lshr(C->getZExtValue());
|
||||
KnownOne |= RHSKnownOne.lshr(C->getZExtValue());
|
||||
RHSKnownZero.lshrInPlace(C->getZExtValue());
|
||||
KnownZero |= RHSKnownZero;
|
||||
RHSKnownOne.lshrInPlace(C->getZExtValue());
|
||||
KnownOne |= RHSKnownOne;
|
||||
// assume(~(v << c) = a)
|
||||
} else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
|
||||
m_Value(A))) &&
|
||||
|
@ -672,8 +674,10 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
|
|||
computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I));
|
||||
// For those bits in RHS that are known, we can propagate them inverted
|
||||
// to known bits in V shifted to the right by C.
|
||||
KnownZero |= RHSKnownOne.lshr(C->getZExtValue());
|
||||
KnownOne |= RHSKnownZero.lshr(C->getZExtValue());
|
||||
RHSKnownOne.lshrInPlace(C->getZExtValue());
|
||||
KnownZero |= RHSKnownOne;
|
||||
RHSKnownZero.lshrInPlace(C->getZExtValue());
|
||||
KnownOne |= RHSKnownZero;
|
||||
// assume(v >> c = a)
|
||||
} else if (match(Arg,
|
||||
m_c_ICmp(Pred, m_CombineOr(m_LShr(m_V, m_ConstantInt(C)),
|
||||
|
@ -1111,10 +1115,11 @@ static void computeKnownBitsFromOperator(const Operator *I, APInt &KnownZero,
|
|||
}
|
||||
case Instruction::LShr: {
|
||||
// (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
|
||||
auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) {
|
||||
return KnownZero.lshr(ShiftAmt) |
|
||||
auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) {
|
||||
APInt KZResult = KnownZero.lshr(ShiftAmt);
|
||||
// High bits known zero.
|
||||
APInt::getHighBitsSet(BitWidth, ShiftAmt);
|
||||
KZResult.setHighBits(ShiftAmt);
|
||||
return KZResult;
|
||||
};
|
||||
|
||||
auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) {
|
||||
|
|
|
@ -2246,7 +2246,7 @@ static void emitGlobalConstantLargeInt(const ConstantInt *CI, AsmPrinter &AP) {
|
|||
// chu[nk1 chu][nk2 chu] ... [nkN-1 chunkN]
|
||||
ExtraBits = Realigned.getRawData()[0] &
|
||||
(((uint64_t)-1) >> (64 - ExtraBitsSize));
|
||||
Realigned = Realigned.lshr(ExtraBitsSize);
|
||||
Realigned.lshrInPlace(ExtraBitsSize);
|
||||
} else
|
||||
ExtraBits = Realigned.getRawData()[BitWidth / 64];
|
||||
}
|
||||
|
|
|
@ -5065,16 +5065,14 @@ bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) {
|
|||
if (!ShlC)
|
||||
return false;
|
||||
uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1);
|
||||
auto ShlDemandBits = APInt::getAllOnesValue(BitWidth).lshr(ShiftAmt);
|
||||
DemandBits |= ShlDemandBits;
|
||||
DemandBits.setLowBits(BitWidth - ShiftAmt);
|
||||
break;
|
||||
}
|
||||
|
||||
case llvm::Instruction::Trunc: {
|
||||
EVT TruncVT = TLI->getValueType(*DL, I->getType());
|
||||
unsigned TruncBitWidth = TruncVT.getSizeInBits();
|
||||
auto TruncBits = APInt::getAllOnesValue(TruncBitWidth).zext(BitWidth);
|
||||
DemandBits |= TruncBits;
|
||||
DemandBits.setLowBits(TruncBitWidth);
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -5350,7 +5350,7 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
|
|||
Shift = DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0),
|
||||
DAG.getConstant(c2 - c1, DL, N1.getValueType()));
|
||||
} else {
|
||||
Mask = Mask.lshr(c1 - c2);
|
||||
Mask.lshrInPlace(c1 - c2);
|
||||
SDLoc DL(N);
|
||||
Shift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0),
|
||||
DAG.getConstant(c1 - c2, DL, N1.getValueType()));
|
||||
|
@ -5660,7 +5660,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
|
|||
DAG.getConstant(ShiftAmt, DL0,
|
||||
getShiftAmountTy(SmallVT)));
|
||||
AddToWorklist(SmallShift.getNode());
|
||||
APInt Mask = APInt::getAllOnesValue(OpSizeInBits).lshr(ShiftAmt);
|
||||
APInt Mask = APInt::getLowBitsSet(OpSizeInBits, OpSizeInBits - ShiftAmt);
|
||||
SDLoc DL(N);
|
||||
return DAG.getNode(ISD::AND, DL, VT,
|
||||
DAG.getNode(ISD::ANY_EXTEND, DL, VT, SmallShift),
|
||||
|
@ -8687,7 +8687,7 @@ ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
|
|||
for (unsigned j = 0; j != NumOutputsPerInput; ++j) {
|
||||
APInt ThisVal = OpVal.trunc(DstBitSize);
|
||||
Ops.push_back(DAG.getConstant(ThisVal, DL, DstEltVT));
|
||||
OpVal = OpVal.lshr(DstBitSize);
|
||||
OpVal.lshrInPlace(DstBitSize);
|
||||
}
|
||||
|
||||
// For big endian targets, swap the order of the pieces of each element.
|
||||
|
@ -15143,9 +15143,9 @@ SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) {
|
|||
|
||||
// Extract the sub element from the constant bit mask.
|
||||
if (DAG.getDataLayout().isBigEndian()) {
|
||||
Bits = Bits.lshr((Split - SubIdx - 1) * NumSubBits);
|
||||
Bits.lshrInPlace((Split - SubIdx - 1) * NumSubBits);
|
||||
} else {
|
||||
Bits = Bits.lshr(SubIdx * NumSubBits);
|
||||
Bits.lshrInPlace(SubIdx * NumSubBits);
|
||||
}
|
||||
|
||||
if (Split > 1)
|
||||
|
|
|
@ -2330,8 +2330,8 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
|
|||
if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) {
|
||||
computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
|
||||
Depth + 1);
|
||||
KnownZero = KnownZero.lshr(*ShAmt);
|
||||
KnownOne = KnownOne.lshr(*ShAmt);
|
||||
KnownZero.lshrInPlace(*ShAmt);
|
||||
KnownOne.lshrInPlace(*ShAmt);
|
||||
// High bits are known zero.
|
||||
KnownZero.setHighBits(ShAmt->getZExtValue());
|
||||
}
|
||||
|
@ -2340,12 +2340,12 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
|
|||
if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) {
|
||||
computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts,
|
||||
Depth + 1);
|
||||
KnownZero = KnownZero.lshr(*ShAmt);
|
||||
KnownOne = KnownOne.lshr(*ShAmt);
|
||||
KnownZero.lshrInPlace(*ShAmt);
|
||||
KnownOne.lshrInPlace(*ShAmt);
|
||||
// If we know the value of the sign bit, then we know it is copied across
|
||||
// the high bits by the shift amount.
|
||||
APInt SignBit = APInt::getSignBit(BitWidth);
|
||||
SignBit = SignBit.lshr(*ShAmt); // Adjust to where it is now in the mask.
|
||||
SignBit.lshrInPlace(*ShAmt); // Adjust to where it is now in the mask.
|
||||
if (KnownZero.intersects(SignBit)) {
|
||||
KnownZero.setHighBits(ShAmt->getZExtValue());// New bits are known zero.
|
||||
} else if (KnownOne.intersects(SignBit)) {
|
||||
|
|
|
@ -929,8 +929,8 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
|
|||
KnownZero, KnownOne, TLO, Depth+1))
|
||||
return true;
|
||||
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
||||
KnownZero = KnownZero.lshr(ShAmt);
|
||||
KnownOne = KnownOne.lshr(ShAmt);
|
||||
KnownZero.lshrInPlace(ShAmt);
|
||||
KnownOne.lshrInPlace(ShAmt);
|
||||
|
||||
KnownZero.setHighBits(ShAmt); // High bits known zero.
|
||||
}
|
||||
|
@ -970,8 +970,8 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
|
|||
KnownZero, KnownOne, TLO, Depth+1))
|
||||
return true;
|
||||
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
|
||||
KnownZero = KnownZero.lshr(ShAmt);
|
||||
KnownOne = KnownOne.lshr(ShAmt);
|
||||
KnownZero.lshrInPlace(ShAmt);
|
||||
KnownOne.lshrInPlace(ShAmt);
|
||||
|
||||
// Handle the sign bit, adjusted to where it is now in the mask.
|
||||
APInt SignBit = APInt::getSignBit(BitWidth).lshr(ShAmt);
|
||||
|
@ -1207,7 +1207,8 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
|
|||
|
||||
APInt HighBits = APInt::getHighBitsSet(OperandBitWidth,
|
||||
OperandBitWidth - BitWidth);
|
||||
HighBits = HighBits.lshr(ShAmt->getZExtValue()).trunc(BitWidth);
|
||||
HighBits.lshrInPlace(ShAmt->getZExtValue());
|
||||
HighBits = HighBits.trunc(BitWidth);
|
||||
|
||||
if (ShAmt->getZExtValue() < BitWidth && !(HighBits & NewMask)) {
|
||||
// None of the shifted in bits are needed. Add a truncate of the
|
||||
|
@ -2055,7 +2056,7 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
|
|||
} else {
|
||||
ShiftBits = C1.countTrailingZeros();
|
||||
}
|
||||
NewC = NewC.lshr(ShiftBits);
|
||||
NewC.lshrInPlace(ShiftBits);
|
||||
if (ShiftBits && NewC.getMinSignedBits() <= 64 &&
|
||||
isLegalICmpImmediate(NewC.getSExtValue())) {
|
||||
auto &DL = DAG.getDataLayout();
|
||||
|
|
|
@ -1580,7 +1580,7 @@ GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy,
|
|||
GenericValue Elt;
|
||||
Elt.IntVal = Elt.IntVal.zext(SrcBitSize);
|
||||
Elt.IntVal = TempSrc.AggregateVal[i].IntVal;
|
||||
Elt.IntVal = Elt.IntVal.lshr(ShiftAmt);
|
||||
Elt.IntVal.lshrInPlace(ShiftAmt);
|
||||
// it could be DstBitSize == SrcBitSize, so check it
|
||||
if (DstBitSize < SrcBitSize)
|
||||
Elt.IntVal = Elt.IntVal.trunc(DstBitSize);
|
||||
|
|
|
@ -223,7 +223,7 @@ static Constant *ExtractConstantBytes(Constant *C, unsigned ByteStart,
|
|||
if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
|
||||
APInt V = CI->getValue();
|
||||
if (ByteStart)
|
||||
V = V.lshr(ByteStart*8);
|
||||
V.lshrInPlace(ByteStart*8);
|
||||
V = V.trunc(ByteSize*8);
|
||||
return ConstantInt::get(CI->getContext(), V);
|
||||
}
|
||||
|
|
|
@ -3442,7 +3442,7 @@ void IEEEFloat::toString(SmallVectorImpl<char> &Str, unsigned FormatPrecision,
|
|||
// Ignore trailing binary zeros.
|
||||
int trailingZeros = significand.countTrailingZeros();
|
||||
exp += trailingZeros;
|
||||
significand = significand.lshr(trailingZeros);
|
||||
significand.lshrInPlace(trailingZeros);
|
||||
|
||||
// Change the exponent from 2^e to 10^e.
|
||||
if (exp == 0) {
|
||||
|
|
|
@ -1134,8 +1134,8 @@ APInt APInt::ashr(unsigned shiftAmt) const {
|
|||
|
||||
/// Logical right-shift this APInt by shiftAmt.
|
||||
/// @brief Logical right-shift function.
|
||||
APInt APInt::lshr(const APInt &shiftAmt) const {
|
||||
return lshr((unsigned)shiftAmt.getLimitedValue(BitWidth));
|
||||
void APInt::lshrInPlace(const APInt &shiftAmt) {
|
||||
lshrInPlace((unsigned)shiftAmt.getLimitedValue(BitWidth));
|
||||
}
|
||||
|
||||
/// Logical right-shift this APInt by shiftAmt.
|
||||
|
@ -1149,7 +1149,7 @@ void APInt::lshrInPlace(unsigned ShiftAmt) {
|
|||
return;
|
||||
}
|
||||
|
||||
return tcShiftRight(pVal, getNumWords(), ShiftAmt);
|
||||
tcShiftRight(pVal, getNumWords(), ShiftAmt);
|
||||
}
|
||||
|
||||
/// Left-shift this APInt by shiftAmt.
|
||||
|
@ -2145,7 +2145,7 @@ void APInt::toString(SmallVectorImpl<char> &Str, unsigned Radix,
|
|||
while (Tmp != 0) {
|
||||
unsigned Digit = unsigned(Tmp.getRawData()[0]) & MaskAmt;
|
||||
Str.push_back(Digits[Digit]);
|
||||
Tmp = Tmp.lshr(ShiftAmt);
|
||||
Tmp.lshrInPlace(ShiftAmt);
|
||||
}
|
||||
} else {
|
||||
APInt divisor(Radix == 10? 4 : 8, Radix);
|
||||
|
|
|
@ -1865,7 +1865,7 @@ static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits,
|
|||
OpUsefulBits = OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm);
|
||||
getUsefulBits(Op, OpUsefulBits, Depth + 1);
|
||||
// The interesting part was at zero in the argument
|
||||
OpUsefulBits = OpUsefulBits.lshr(OpUsefulBits.getBitWidth() - Imm);
|
||||
OpUsefulBits.lshrInPlace(OpUsefulBits.getBitWidth() - Imm);
|
||||
}
|
||||
|
||||
UsefulBits &= OpUsefulBits;
|
||||
|
@ -1894,13 +1894,13 @@ static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits,
|
|||
uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
|
||||
Mask = Mask.shl(ShiftAmt);
|
||||
getUsefulBits(Op, Mask, Depth + 1);
|
||||
Mask = Mask.lshr(ShiftAmt);
|
||||
Mask.lshrInPlace(ShiftAmt);
|
||||
} else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) {
|
||||
// Shift Right
|
||||
// We do not handle AArch64_AM::ASR, because the sign will change the
|
||||
// number of useful bits
|
||||
uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue);
|
||||
Mask = Mask.lshr(ShiftAmt);
|
||||
Mask.lshrInPlace(ShiftAmt);
|
||||
getUsefulBits(Op, Mask, Depth + 1);
|
||||
Mask = Mask.shl(ShiftAmt);
|
||||
} else
|
||||
|
@ -1954,7 +1954,7 @@ static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits,
|
|||
if (Op.getOperand(1) == Orig) {
|
||||
// Copy the bits from the result to the zero bits.
|
||||
Mask = ResultUsefulBits & OpUsefulBits;
|
||||
Mask = Mask.lshr(LSB);
|
||||
Mask.lshrInPlace(LSB);
|
||||
}
|
||||
|
||||
if (Op.getOperand(0) == Orig)
|
||||
|
|
|
@ -2004,7 +2004,7 @@ void NVPTXAsmPrinter::bufferAggregateConstant(const Constant *CPV,
|
|||
for (unsigned I = 0, E = DL.getTypeAllocSize(CPV->getType()); I < E; ++I) {
|
||||
uint8_t Byte = Val.getLoBits(8).getZExtValue();
|
||||
aggBuffer->addBytes(&Byte, 1, 1);
|
||||
Val = Val.lshr(8);
|
||||
Val.lshrInPlace(8);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -8327,13 +8327,13 @@ static APInt computeZeroableShuffleElements(ArrayRef<int> Mask,
|
|||
Zeroable.setBit(i);
|
||||
else if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
|
||||
APInt Val = Cst->getAPIntValue();
|
||||
Val = Val.lshr((M % Scale) * ScalarSizeInBits);
|
||||
Val.lshrInPlace((M % Scale) * ScalarSizeInBits);
|
||||
Val = Val.getLoBits(ScalarSizeInBits);
|
||||
if (Val == 0)
|
||||
Zeroable.setBit(i);
|
||||
} else if (ConstantFPSDNode *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
|
||||
APInt Val = Cst->getValueAPF().bitcastToAPInt();
|
||||
Val = Val.lshr((M % Scale) * ScalarSizeInBits);
|
||||
Val.lshrInPlace((M % Scale) * ScalarSizeInBits);
|
||||
Val = Val.getLoBits(ScalarSizeInBits);
|
||||
if (Val == 0)
|
||||
Zeroable.setBit(i);
|
||||
|
@ -26722,8 +26722,8 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
|
|||
// Low bits are known zero.
|
||||
KnownZero.setLowBits(ShAmt);
|
||||
} else {
|
||||
KnownZero = KnownZero.lshr(ShAmt);
|
||||
KnownOne = KnownOne.lshr(ShAmt);
|
||||
KnownZero.lshrInPlace(ShAmt);
|
||||
KnownOne.lshrInPlace(ShAmt);
|
||||
// High bits are known zero.
|
||||
KnownZero.setHighBits(ShAmt);
|
||||
}
|
||||
|
@ -31269,7 +31269,7 @@ static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
|
|||
else if (X86ISD::VSRAI == Opcode)
|
||||
Elt = Elt.ashr(ShiftImm);
|
||||
else
|
||||
Elt = Elt.lshr(ShiftImm);
|
||||
Elt.lshrInPlace(ShiftImm);
|
||||
}
|
||||
return getConstVector(EltBits, UndefElts, VT.getSimpleVT(), DAG, SDLoc(N));
|
||||
}
|
||||
|
|
|
@ -839,7 +839,8 @@ static Value *simplifyX86extrq(IntrinsicInst &II, Value *Op0,
|
|||
// Length bits.
|
||||
if (CI0) {
|
||||
APInt Elt = CI0->getValue();
|
||||
Elt = Elt.lshr(Index).zextOrTrunc(Length);
|
||||
Elt.lshrInPlace(Index);
|
||||
Elt = Elt.zextOrTrunc(Length);
|
||||
return LowConstantHighUndef(Elt.getZExtValue());
|
||||
}
|
||||
|
||||
|
@ -1036,7 +1037,7 @@ static Value *simplifyX86vpermilvar(const IntrinsicInst &II,
|
|||
// The PD variants uses bit 1 to select per-lane element index, so
|
||||
// shift down to convert to generic shuffle mask index.
|
||||
if (IsPD)
|
||||
Index = Index.lshr(1);
|
||||
Index.lshrInPlace(1);
|
||||
|
||||
// The _256 variants are a bit trickier since the mask bits always index
|
||||
// into the corresponding 128 half. In order to convert to a generic
|
||||
|
|
|
@ -370,7 +370,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
|
|||
MaskV <<= Op1C->getZExtValue();
|
||||
else {
|
||||
assert(I.getOpcode() == Instruction::LShr && "Unknown logical shift");
|
||||
MaskV = MaskV.lshr(Op1C->getZExtValue());
|
||||
MaskV.lshrInPlace(Op1C->getZExtValue());
|
||||
}
|
||||
|
||||
// shift1 & 0x00FF
|
||||
|
|
|
@ -546,8 +546,8 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
|
|||
Depth + 1))
|
||||
return I;
|
||||
assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
|
||||
KnownZero = KnownZero.lshr(ShiftAmt);
|
||||
KnownOne = KnownOne.lshr(ShiftAmt);
|
||||
KnownZero.lshrInPlace(ShiftAmt);
|
||||
KnownOne.lshrInPlace(ShiftAmt);
|
||||
if (ShiftAmt)
|
||||
KnownZero.setHighBits(ShiftAmt); // high bits known zero.
|
||||
}
|
||||
|
@ -590,13 +590,13 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
|
|||
assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?");
|
||||
// Compute the new bits that are at the top now.
|
||||
APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt));
|
||||
KnownZero = KnownZero.lshr(ShiftAmt);
|
||||
KnownOne = KnownOne.lshr(ShiftAmt);
|
||||
KnownZero.lshrInPlace(ShiftAmt);
|
||||
KnownOne.lshrInPlace(ShiftAmt);
|
||||
|
||||
// Handle the sign bits.
|
||||
APInt SignBit(APInt::getSignBit(BitWidth));
|
||||
// Adjust to where it is now in the mask.
|
||||
SignBit = SignBit.lshr(ShiftAmt);
|
||||
SignBit.lshrInPlace(ShiftAmt);
|
||||
|
||||
// If the input sign bit is known to be zero, or if none of the top bits
|
||||
// are demanded, turn this into an unsigned shift right.
|
||||
|
|
Loading…
Reference in New Issue