From fc947bcfba2fc8bc9cc4f8750d1d74c5bf24ee08 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Tue, 18 Apr 2017 17:14:21 +0000 Subject: [PATCH] [APInt] Use lshrInPlace to replace lshr where possible This patch uses lshrInPlace to replace code where the object that lshr is called on is being overwritten with the result. This adds an lshrInPlace(const APInt &) version as well. Differential Revision: https://reviews.llvm.org/D32155 llvm-svn: 300566 --- llvm/include/llvm/ADT/APInt.h | 9 +++++++- llvm/lib/Analysis/ScalarEvolution.cpp | 2 +- llvm/lib/Analysis/ValueTracking.cpp | 21 ++++++++++++------- llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp | 2 +- llvm/lib/CodeGen/CodeGenPrepare.cpp | 6 ++---- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 10 ++++----- .../lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 10 ++++----- .../CodeGen/SelectionDAG/TargetLowering.cpp | 13 ++++++------ .../ExecutionEngine/Interpreter/Execution.cpp | 2 +- llvm/lib/IR/ConstantFold.cpp | 2 +- llvm/lib/Support/APFloat.cpp | 2 +- llvm/lib/Support/APInt.cpp | 8 +++---- .../Target/AArch64/AArch64ISelDAGToDAG.cpp | 8 +++---- llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp | 2 +- llvm/lib/Target/X86/X86ISelLowering.cpp | 10 ++++----- .../InstCombine/InstCombineCalls.cpp | 5 +++-- .../InstCombine/InstCombineShifts.cpp | 2 +- .../InstCombineSimplifyDemanded.cpp | 10 ++++----- 18 files changed, 68 insertions(+), 56 deletions(-) diff --git a/llvm/include/llvm/ADT/APInt.h b/llvm/include/llvm/ADT/APInt.h index de097811ceba..11fb489eaa72 100644 --- a/llvm/include/llvm/ADT/APInt.h +++ b/llvm/include/llvm/ADT/APInt.h @@ -914,7 +914,14 @@ public: /// \brief Logical right-shift function. /// /// Logical right-shift this APInt by shiftAmt. - APInt lshr(const APInt &shiftAmt) const; + APInt lshr(const APInt &ShiftAmt) const { + APInt R(*this); + R.lshrInPlace(ShiftAmt); + return R; + } + + /// Logical right-shift this APInt by ShiftAmt in place. + void lshrInPlace(const APInt &ShiftAmt); /// \brief Left-shift function. /// diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp index 0534909a5e41..278b65c68f8c 100644 --- a/llvm/lib/Analysis/ScalarEvolution.cpp +++ b/llvm/lib/Analysis/ScalarEvolution.cpp @@ -1093,7 +1093,7 @@ static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K, APInt Mult(W, i); unsigned TwoFactors = Mult.countTrailingZeros(); T += TwoFactors; - Mult = Mult.lshr(TwoFactors); + Mult.lshrInPlace(TwoFactors); OddFactorial *= Mult; } diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index 2079c1b33aea..974ec02598f3 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -661,8 +661,10 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero, computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); // For those bits in RHS that are known, we can propagate them to known // bits in V shifted to the right by C. - KnownZero |= RHSKnownZero.lshr(C->getZExtValue()); - KnownOne |= RHSKnownOne.lshr(C->getZExtValue()); + RHSKnownZero.lshrInPlace(C->getZExtValue()); + KnownZero |= RHSKnownZero; + RHSKnownOne.lshrInPlace(C->getZExtValue()); + KnownOne |= RHSKnownOne; // assume(~(v << c) = a) } else if (match(Arg, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))), m_Value(A))) && @@ -672,8 +674,10 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero, computeKnownBits(A, RHSKnownZero, RHSKnownOne, Depth+1, Query(Q, I)); // For those bits in RHS that are known, we can propagate them inverted // to known bits in V shifted to the right by C. - KnownZero |= RHSKnownOne.lshr(C->getZExtValue()); - KnownOne |= RHSKnownZero.lshr(C->getZExtValue()); + RHSKnownOne.lshrInPlace(C->getZExtValue()); + KnownZero |= RHSKnownOne; + RHSKnownZero.lshrInPlace(C->getZExtValue()); + KnownOne |= RHSKnownZero; // assume(v >> c = a) } else if (match(Arg, m_c_ICmp(Pred, m_CombineOr(m_LShr(m_V, m_ConstantInt(C)), @@ -1111,10 +1115,11 @@ static void computeKnownBitsFromOperator(const Operator *I, APInt &KnownZero, } case Instruction::LShr: { // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0 - auto KZF = [BitWidth](const APInt &KnownZero, unsigned ShiftAmt) { - return KnownZero.lshr(ShiftAmt) | - // High bits known zero. - APInt::getHighBitsSet(BitWidth, ShiftAmt); + auto KZF = [](const APInt &KnownZero, unsigned ShiftAmt) { + APInt KZResult = KnownZero.lshr(ShiftAmt); + // High bits known zero. + KZResult.setHighBits(ShiftAmt); + return KZResult; }; auto KOF = [](const APInt &KnownOne, unsigned ShiftAmt) { diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp index b18dd7d00676..028c79f3ab6d 100644 --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp @@ -2246,7 +2246,7 @@ static void emitGlobalConstantLargeInt(const ConstantInt *CI, AsmPrinter &AP) { // chu[nk1 chu][nk2 chu] ... [nkN-1 chunkN] ExtraBits = Realigned.getRawData()[0] & (((uint64_t)-1) >> (64 - ExtraBitsSize)); - Realigned = Realigned.lshr(ExtraBitsSize); + Realigned.lshrInPlace(ExtraBitsSize); } else ExtraBits = Realigned.getRawData()[BitWidth / 64]; } diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp index aa0be910e3be..c862cfd28add 100644 --- a/llvm/lib/CodeGen/CodeGenPrepare.cpp +++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp @@ -5065,16 +5065,14 @@ bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) { if (!ShlC) return false; uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1); - auto ShlDemandBits = APInt::getAllOnesValue(BitWidth).lshr(ShiftAmt); - DemandBits |= ShlDemandBits; + DemandBits.setLowBits(BitWidth - ShiftAmt); break; } case llvm::Instruction::Trunc: { EVT TruncVT = TLI->getValueType(*DL, I->getType()); unsigned TruncBitWidth = TruncVT.getSizeInBits(); - auto TruncBits = APInt::getAllOnesValue(TruncBitWidth).zext(BitWidth); - DemandBits |= TruncBits; + DemandBits.setLowBits(TruncBitWidth); break; } diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index c947a36e89f9..a24030acbbe9 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -5350,7 +5350,7 @@ SDValue DAGCombiner::visitSHL(SDNode *N) { Shift = DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0), DAG.getConstant(c2 - c1, DL, N1.getValueType())); } else { - Mask = Mask.lshr(c1 - c2); + Mask.lshrInPlace(c1 - c2); SDLoc DL(N); Shift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), DAG.getConstant(c1 - c2, DL, N1.getValueType())); @@ -5660,7 +5660,7 @@ SDValue DAGCombiner::visitSRL(SDNode *N) { DAG.getConstant(ShiftAmt, DL0, getShiftAmountTy(SmallVT))); AddToWorklist(SmallShift.getNode()); - APInt Mask = APInt::getAllOnesValue(OpSizeInBits).lshr(ShiftAmt); + APInt Mask = APInt::getLowBitsSet(OpSizeInBits, OpSizeInBits - ShiftAmt); SDLoc DL(N); return DAG.getNode(ISD::AND, DL, VT, DAG.getNode(ISD::ANY_EXTEND, DL, VT, SmallShift), @@ -8687,7 +8687,7 @@ ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) { for (unsigned j = 0; j != NumOutputsPerInput; ++j) { APInt ThisVal = OpVal.trunc(DstBitSize); Ops.push_back(DAG.getConstant(ThisVal, DL, DstEltVT)); - OpVal = OpVal.lshr(DstBitSize); + OpVal.lshrInPlace(DstBitSize); } // For big endian targets, swap the order of the pieces of each element. @@ -15143,9 +15143,9 @@ SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) { // Extract the sub element from the constant bit mask. if (DAG.getDataLayout().isBigEndian()) { - Bits = Bits.lshr((Split - SubIdx - 1) * NumSubBits); + Bits.lshrInPlace((Split - SubIdx - 1) * NumSubBits); } else { - Bits = Bits.lshr(SubIdx * NumSubBits); + Bits.lshrInPlace(SubIdx * NumSubBits); } if (Split > 1) diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index 003ea5030bfc..8c677d901e44 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -2330,8 +2330,8 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero, if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts, Depth + 1); - KnownZero = KnownZero.lshr(*ShAmt); - KnownOne = KnownOne.lshr(*ShAmt); + KnownZero.lshrInPlace(*ShAmt); + KnownOne.lshrInPlace(*ShAmt); // High bits are known zero. KnownZero.setHighBits(ShAmt->getZExtValue()); } @@ -2340,12 +2340,12 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero, if (const APInt *ShAmt = getValidShiftAmountConstant(Op)) { computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, DemandedElts, Depth + 1); - KnownZero = KnownZero.lshr(*ShAmt); - KnownOne = KnownOne.lshr(*ShAmt); + KnownZero.lshrInPlace(*ShAmt); + KnownOne.lshrInPlace(*ShAmt); // If we know the value of the sign bit, then we know it is copied across // the high bits by the shift amount. APInt SignBit = APInt::getSignBit(BitWidth); - SignBit = SignBit.lshr(*ShAmt); // Adjust to where it is now in the mask. + SignBit.lshrInPlace(*ShAmt); // Adjust to where it is now in the mask. if (KnownZero.intersects(SignBit)) { KnownZero.setHighBits(ShAmt->getZExtValue());// New bits are known zero. } else if (KnownOne.intersects(SignBit)) { diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp index 2756e276c6a9..ce892eb3ec12 100644 --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -929,8 +929,8 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op, KnownZero, KnownOne, TLO, Depth+1)) return true; assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); - KnownZero = KnownZero.lshr(ShAmt); - KnownOne = KnownOne.lshr(ShAmt); + KnownZero.lshrInPlace(ShAmt); + KnownOne.lshrInPlace(ShAmt); KnownZero.setHighBits(ShAmt); // High bits known zero. } @@ -970,8 +970,8 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op, KnownZero, KnownOne, TLO, Depth+1)) return true; assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); - KnownZero = KnownZero.lshr(ShAmt); - KnownOne = KnownOne.lshr(ShAmt); + KnownZero.lshrInPlace(ShAmt); + KnownOne.lshrInPlace(ShAmt); // Handle the sign bit, adjusted to where it is now in the mask. APInt SignBit = APInt::getSignBit(BitWidth).lshr(ShAmt); @@ -1207,7 +1207,8 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op, APInt HighBits = APInt::getHighBitsSet(OperandBitWidth, OperandBitWidth - BitWidth); - HighBits = HighBits.lshr(ShAmt->getZExtValue()).trunc(BitWidth); + HighBits.lshrInPlace(ShAmt->getZExtValue()); + HighBits = HighBits.trunc(BitWidth); if (ShAmt->getZExtValue() < BitWidth && !(HighBits & NewMask)) { // None of the shifted in bits are needed. Add a truncate of the @@ -2055,7 +2056,7 @@ SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1, } else { ShiftBits = C1.countTrailingZeros(); } - NewC = NewC.lshr(ShiftBits); + NewC.lshrInPlace(ShiftBits); if (ShiftBits && NewC.getMinSignedBits() <= 64 && isLegalICmpImmediate(NewC.getSExtValue())) { auto &DL = DAG.getDataLayout(); diff --git a/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp b/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp index e29e9fc2c702..10b4e98b6079 100644 --- a/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp +++ b/llvm/lib/ExecutionEngine/Interpreter/Execution.cpp @@ -1580,7 +1580,7 @@ GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy, GenericValue Elt; Elt.IntVal = Elt.IntVal.zext(SrcBitSize); Elt.IntVal = TempSrc.AggregateVal[i].IntVal; - Elt.IntVal = Elt.IntVal.lshr(ShiftAmt); + Elt.IntVal.lshrInPlace(ShiftAmt); // it could be DstBitSize == SrcBitSize, so check it if (DstBitSize < SrcBitSize) Elt.IntVal = Elt.IntVal.trunc(DstBitSize); diff --git a/llvm/lib/IR/ConstantFold.cpp b/llvm/lib/IR/ConstantFold.cpp index bba230677ebf..80b117015ede 100644 --- a/llvm/lib/IR/ConstantFold.cpp +++ b/llvm/lib/IR/ConstantFold.cpp @@ -223,7 +223,7 @@ static Constant *ExtractConstantBytes(Constant *C, unsigned ByteStart, if (ConstantInt *CI = dyn_cast(C)) { APInt V = CI->getValue(); if (ByteStart) - V = V.lshr(ByteStart*8); + V.lshrInPlace(ByteStart*8); V = V.trunc(ByteSize*8); return ConstantInt::get(CI->getContext(), V); } diff --git a/llvm/lib/Support/APFloat.cpp b/llvm/lib/Support/APFloat.cpp index 9778628911cd..c4c892f0352a 100644 --- a/llvm/lib/Support/APFloat.cpp +++ b/llvm/lib/Support/APFloat.cpp @@ -3442,7 +3442,7 @@ void IEEEFloat::toString(SmallVectorImpl &Str, unsigned FormatPrecision, // Ignore trailing binary zeros. int trailingZeros = significand.countTrailingZeros(); exp += trailingZeros; - significand = significand.lshr(trailingZeros); + significand.lshrInPlace(trailingZeros); // Change the exponent from 2^e to 10^e. if (exp == 0) { diff --git a/llvm/lib/Support/APInt.cpp b/llvm/lib/Support/APInt.cpp index 5ac98e1c127f..8b23179b44b2 100644 --- a/llvm/lib/Support/APInt.cpp +++ b/llvm/lib/Support/APInt.cpp @@ -1134,8 +1134,8 @@ APInt APInt::ashr(unsigned shiftAmt) const { /// Logical right-shift this APInt by shiftAmt. /// @brief Logical right-shift function. -APInt APInt::lshr(const APInt &shiftAmt) const { - return lshr((unsigned)shiftAmt.getLimitedValue(BitWidth)); +void APInt::lshrInPlace(const APInt &shiftAmt) { + lshrInPlace((unsigned)shiftAmt.getLimitedValue(BitWidth)); } /// Logical right-shift this APInt by shiftAmt. @@ -1149,7 +1149,7 @@ void APInt::lshrInPlace(unsigned ShiftAmt) { return; } - return tcShiftRight(pVal, getNumWords(), ShiftAmt); + tcShiftRight(pVal, getNumWords(), ShiftAmt); } /// Left-shift this APInt by shiftAmt. @@ -2145,7 +2145,7 @@ void APInt::toString(SmallVectorImpl &Str, unsigned Radix, while (Tmp != 0) { unsigned Digit = unsigned(Tmp.getRawData()[0]) & MaskAmt; Str.push_back(Digits[Digit]); - Tmp = Tmp.lshr(ShiftAmt); + Tmp.lshrInPlace(ShiftAmt); } } else { APInt divisor(Radix == 10? 4 : 8, Radix); diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index ae01ea477bb9..7141e77fcd25 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -1865,7 +1865,7 @@ static void getUsefulBitsFromBitfieldMoveOpd(SDValue Op, APInt &UsefulBits, OpUsefulBits = OpUsefulBits.shl(OpUsefulBits.getBitWidth() - Imm); getUsefulBits(Op, OpUsefulBits, Depth + 1); // The interesting part was at zero in the argument - OpUsefulBits = OpUsefulBits.lshr(OpUsefulBits.getBitWidth() - Imm); + OpUsefulBits.lshrInPlace(OpUsefulBits.getBitWidth() - Imm); } UsefulBits &= OpUsefulBits; @@ -1894,13 +1894,13 @@ static void getUsefulBitsFromOrWithShiftedReg(SDValue Op, APInt &UsefulBits, uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue); Mask = Mask.shl(ShiftAmt); getUsefulBits(Op, Mask, Depth + 1); - Mask = Mask.lshr(ShiftAmt); + Mask.lshrInPlace(ShiftAmt); } else if (AArch64_AM::getShiftType(ShiftTypeAndValue) == AArch64_AM::LSR) { // Shift Right // We do not handle AArch64_AM::ASR, because the sign will change the // number of useful bits uint64_t ShiftAmt = AArch64_AM::getShiftValue(ShiftTypeAndValue); - Mask = Mask.lshr(ShiftAmt); + Mask.lshrInPlace(ShiftAmt); getUsefulBits(Op, Mask, Depth + 1); Mask = Mask.shl(ShiftAmt); } else @@ -1954,7 +1954,7 @@ static void getUsefulBitsFromBFM(SDValue Op, SDValue Orig, APInt &UsefulBits, if (Op.getOperand(1) == Orig) { // Copy the bits from the result to the zero bits. Mask = ResultUsefulBits & OpUsefulBits; - Mask = Mask.lshr(LSB); + Mask.lshrInPlace(LSB); } if (Op.getOperand(0) == Orig) diff --git a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp index 21e25de80dc7..ba28cd83278b 100644 --- a/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp +++ b/llvm/lib/Target/NVPTX/NVPTXAsmPrinter.cpp @@ -2004,7 +2004,7 @@ void NVPTXAsmPrinter::bufferAggregateConstant(const Constant *CPV, for (unsigned I = 0, E = DL.getTypeAllocSize(CPV->getType()); I < E; ++I) { uint8_t Byte = Val.getLoBits(8).getZExtValue(); aggBuffer->addBytes(&Byte, 1, 1); - Val = Val.lshr(8); + Val.lshrInPlace(8); } return; } diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 6bf3672c3c08..d7e03ab4a5a6 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -8327,13 +8327,13 @@ static APInt computeZeroableShuffleElements(ArrayRef Mask, Zeroable.setBit(i); else if (ConstantSDNode *Cst = dyn_cast(Op)) { APInt Val = Cst->getAPIntValue(); - Val = Val.lshr((M % Scale) * ScalarSizeInBits); + Val.lshrInPlace((M % Scale) * ScalarSizeInBits); Val = Val.getLoBits(ScalarSizeInBits); if (Val == 0) Zeroable.setBit(i); } else if (ConstantFPSDNode *Cst = dyn_cast(Op)) { APInt Val = Cst->getValueAPF().bitcastToAPInt(); - Val = Val.lshr((M % Scale) * ScalarSizeInBits); + Val.lshrInPlace((M % Scale) * ScalarSizeInBits); Val = Val.getLoBits(ScalarSizeInBits); if (Val == 0) Zeroable.setBit(i); @@ -26722,8 +26722,8 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op, // Low bits are known zero. KnownZero.setLowBits(ShAmt); } else { - KnownZero = KnownZero.lshr(ShAmt); - KnownOne = KnownOne.lshr(ShAmt); + KnownZero.lshrInPlace(ShAmt); + KnownOne.lshrInPlace(ShAmt); // High bits are known zero. KnownZero.setHighBits(ShAmt); } @@ -31269,7 +31269,7 @@ static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG, else if (X86ISD::VSRAI == Opcode) Elt = Elt.ashr(ShiftImm); else - Elt = Elt.lshr(ShiftImm); + Elt.lshrInPlace(ShiftImm); } return getConstVector(EltBits, UndefElts, VT.getSimpleVT(), DAG, SDLoc(N)); } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index 69484f47223f..7ed999fa70bc 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -839,7 +839,8 @@ static Value *simplifyX86extrq(IntrinsicInst &II, Value *Op0, // Length bits. if (CI0) { APInt Elt = CI0->getValue(); - Elt = Elt.lshr(Index).zextOrTrunc(Length); + Elt.lshrInPlace(Index); + Elt = Elt.zextOrTrunc(Length); return LowConstantHighUndef(Elt.getZExtValue()); } @@ -1036,7 +1037,7 @@ static Value *simplifyX86vpermilvar(const IntrinsicInst &II, // The PD variants uses bit 1 to select per-lane element index, so // shift down to convert to generic shuffle mask index. if (IsPD) - Index = Index.lshr(1); + Index.lshrInPlace(1); // The _256 variants are a bit trickier since the mask bits always index // into the corresponding 128 half. In order to convert to a generic diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp index 9aa679c60e47..cb6145c3293f 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp @@ -370,7 +370,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1, MaskV <<= Op1C->getZExtValue(); else { assert(I.getOpcode() == Instruction::LShr && "Unknown logical shift"); - MaskV = MaskV.lshr(Op1C->getZExtValue()); + MaskV.lshrInPlace(Op1C->getZExtValue()); } // shift1 & 0x00FF diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp index 9cd19eeb8b89..2f66bc7b8d67 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp @@ -546,8 +546,8 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, Depth + 1)) return I; assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?"); - KnownZero = KnownZero.lshr(ShiftAmt); - KnownOne = KnownOne.lshr(ShiftAmt); + KnownZero.lshrInPlace(ShiftAmt); + KnownOne.lshrInPlace(ShiftAmt); if (ShiftAmt) KnownZero.setHighBits(ShiftAmt); // high bits known zero. } @@ -590,13 +590,13 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask, assert(!(KnownZero & KnownOne) && "Bits known to be one AND zero?"); // Compute the new bits that are at the top now. APInt HighBits(APInt::getHighBitsSet(BitWidth, ShiftAmt)); - KnownZero = KnownZero.lshr(ShiftAmt); - KnownOne = KnownOne.lshr(ShiftAmt); + KnownZero.lshrInPlace(ShiftAmt); + KnownOne.lshrInPlace(ShiftAmt); // Handle the sign bits. APInt SignBit(APInt::getSignBit(BitWidth)); // Adjust to where it is now in the mask. - SignBit = SignBit.lshr(ShiftAmt); + SignBit.lshrInPlace(ShiftAmt); // If the input sign bit is known to be zero, or if none of the top bits // are demanded, turn this into an unsigned shift right.