diff --git a/llvm/include/llvm/Analysis/ValueTracking.h b/llvm/include/llvm/Analysis/ValueTracking.h index 6c7fcefade37..cc588381727d 100644 --- a/llvm/include/llvm/Analysis/ValueTracking.h +++ b/llvm/include/llvm/Analysis/ValueTracking.h @@ -221,6 +221,11 @@ namespace llvm { AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT); + OverflowResult computeOverflowForUnsignedAdd(Value *LHS, Value *RHS, + const DataLayout *DL, + AssumptionCache *AC, + const Instruction *CxtI, + const DominatorTree *DT); } // end namespace llvm #endif diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index bba2f9f2735c..d32b004cb77f 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -2729,3 +2729,32 @@ OverflowResult llvm::computeOverflowForUnsignedMul(Value *LHS, Value *RHS, return OverflowResult::MayOverflow; } + +OverflowResult llvm::computeOverflowForUnsignedAdd(Value *LHS, Value *RHS, + const DataLayout *DL, + AssumptionCache *AC, + const Instruction *CxtI, + const DominatorTree *DT) { + bool LHSKnownNonNegative, LHSKnownNegative; + ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, DL, /*Depth=*/0, + AC, CxtI, DT); + if (LHSKnownNonNegative || LHSKnownNegative) { + bool RHSKnownNonNegative, RHSKnownNegative; + ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, DL, /*Depth=*/0, + AC, CxtI, DT); + + if (LHSKnownNegative && RHSKnownNegative) { + // The sign bit is set in both cases: this MUST overflow. + // Create a simple add instruction, and insert it into the struct. + return OverflowResult::AlwaysOverflows; + } + + if (LHSKnownNonNegative && RHSKnownNonNegative) { + // The sign bit is clear in both cases: this CANNOT overflow. + // Create a simple add instruction, and insert it into the struct. + return OverflowResult::NeverOverflows; + } + } + + return OverflowResult::MayOverflow; +} diff --git a/llvm/lib/Transforms/InstCombine/InstCombine.h b/llvm/lib/Transforms/InstCombine/InstCombine.h index 4c8e8d4e59c7..3c3c13551937 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombine.h +++ b/llvm/lib/Transforms/InstCombine/InstCombine.h @@ -282,7 +282,6 @@ private: bool DoXform = true); Instruction *transformSExtICmp(ICmpInst *ICI, Instruction &CI); bool WillNotOverflowSignedAdd(Value *LHS, Value *RHS, Instruction *CxtI); - bool WillNotOverflowUnsignedAdd(Value *LHS, Value *RHS, Instruction *CxtI); bool WillNotOverflowSignedSub(Value *LHS, Value *RHS, Instruction *CxtI); bool WillNotOverflowUnsignedSub(Value *LHS, Value *RHS, Instruction *CxtI); bool WillNotOverflowSignedMul(Value *LHS, Value *RHS, Instruction *CxtI); @@ -391,6 +390,10 @@ public: const Instruction *CxtI) { return llvm::computeOverflowForUnsignedMul(LHS, RHS, DL, AC, CxtI, DT); } + OverflowResult computeOverflowForUnsignedAdd(Value *LHS, Value *RHS, + const Instruction *CxtI) { + return llvm::computeOverflowForUnsignedAdd(LHS, RHS, DL, AC, CxtI, DT); + } private: /// SimplifyAssociativeOrCommutative - This performs a few simplifications for diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp index fbec98dbec85..6d20384e5d17 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -937,22 +937,6 @@ bool InstCombiner::WillNotOverflowSignedAdd(Value *LHS, Value *RHS, return false; } -/// WillNotOverflowUnsignedAdd - Return true if we can prove that: -/// (zext (add LHS, RHS)) === (add (zext LHS), (zext RHS)) -bool InstCombiner::WillNotOverflowUnsignedAdd(Value *LHS, Value *RHS, - Instruction *CxtI) { - // There are different heuristics we can use for this. Here is a simple one. - // If the sign bit of LHS and that of RHS are both zero, no unsigned wrap. - bool LHSKnownNonNegative, LHSKnownNegative; - bool RHSKnownNonNegative, RHSKnownNegative; - ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, /*Depth=*/0, CxtI); - ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, /*Depth=*/0, CxtI); - if (LHSKnownNonNegative && RHSKnownNonNegative) - return true; - - return false; -} - /// \brief Return true if we can prove that: /// (sub LHS, RHS) === (sub nsw LHS, RHS) /// This basically requires proving that the add in the original type would not @@ -1327,7 +1311,9 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) { Changed = true; I.setHasNoSignedWrap(true); } - if (!I.hasNoUnsignedWrap() && WillNotOverflowUnsignedAdd(LHS, RHS, &I)) { + if (!I.hasNoUnsignedWrap() && + computeOverflowForUnsignedAdd(LHS, RHS, &I) == + OverflowResult::NeverOverflows) { Changed = true; I.setHasNoUnsignedWrap(true); } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index c42202a2fa6c..dab2c4b47ad6 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -352,33 +352,11 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { break; case Intrinsic::uadd_with_overflow: { Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); - IntegerType *IT = cast(II->getArgOperand(0)->getType()); - uint32_t BitWidth = IT->getBitWidth(); - APInt LHSKnownZero(BitWidth, 0); - APInt LHSKnownOne(BitWidth, 0); - computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, 0, II); - bool LHSKnownNegative = LHSKnownOne[BitWidth - 1]; - bool LHSKnownPositive = LHSKnownZero[BitWidth - 1]; - - if (LHSKnownNegative || LHSKnownPositive) { - APInt RHSKnownZero(BitWidth, 0); - APInt RHSKnownOne(BitWidth, 0); - computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, 0, II); - bool RHSKnownNegative = RHSKnownOne[BitWidth - 1]; - bool RHSKnownPositive = RHSKnownZero[BitWidth - 1]; - if (LHSKnownNegative && RHSKnownNegative) { - // The sign bit is set in both cases: this MUST overflow. - // Create a simple add instruction, and insert it into the struct. - return CreateOverflowTuple(II, Builder->CreateAdd(LHS, RHS), true, - /*ReUseName*/true); - } - - if (LHSKnownPositive && RHSKnownPositive) { - // The sign bit is clear in both cases: this CANNOT overflow. - // Create a simple add instruction, and insert it into the struct. - return CreateOverflowTuple(II, Builder->CreateNUWAdd(LHS, RHS), false); - } - } + OverflowResult OR = computeOverflowForUnsignedAdd(LHS, RHS, II); + if (OR == OverflowResult::NeverOverflows) + return CreateOverflowTuple(II, Builder->CreateNUWAdd(LHS, RHS), false); + if (OR == OverflowResult::AlwaysOverflows) + return CreateOverflowTuple(II, Builder->CreateAdd(LHS, RHS), true); } // FALL THROUGH uadd into sadd case Intrinsic::sadd_with_overflow: